ID
stringlengths
36
36
Language
stringclasses
1 value
Repository Name
stringclasses
13 values
File Name
stringlengths
2
48
File Path in Repository
stringlengths
11
111
File Path for Unit Test
stringlengths
13
116
Code
stringlengths
0
278k
Unit Test - (Ground Truth)
stringlengths
78
663k
Code Url
stringlengths
91
198
Test Code Url
stringlengths
93
203
Commit Hash
stringclasses
13 values
0d292e46-1a6f-4535-a312-25c725947c33
cpp
tensorflow/tensorflow
as_string_op
tensorflow/core/kernels/as_string_op.cc
tensorflow/core/kernels/as_string_op_test.cc
#include <string> #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/framework/variant_tensor_data.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/stringpiece.h" namespace tensorflow { class AsStringOp : public OpKernel { public: using OpKernel::OpKernel; explicit AsStringOp(OpKernelConstruction* ctx) : OpKernel(ctx) { int32_t precision; bool scientific; bool shortest; int32_t width; string fill_string; DataType dtype; OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype)); OP_REQUIRES_OK(ctx, ctx->GetAttr("precision", &precision)); OP_REQUIRES_OK(ctx, ctx->GetAttr("scientific", &scientific)); OP_REQUIRES_OK(ctx, ctx->GetAttr("shortest", &shortest)); OP_REQUIRES_OK(ctx, ctx->GetAttr("width", &width)); OP_REQUIRES_OK(ctx, ctx->GetAttr("fill", &fill_string)); switch (dtype) { case DT_STRING: case DT_HALF: case DT_BFLOAT16: case DT_FLOAT: case DT_DOUBLE: case DT_COMPLEX64: case DT_COMPLEX128: break; default: OP_REQUIRES(ctx, !(scientific || shortest), errors::InvalidArgument("scientific and shortest format " "not supported for datatype ", DataTypeString(dtype))); OP_REQUIRES(ctx, precision < 0, errors::InvalidArgument("precision not supported " "for datatype ", DataTypeString(dtype))); } OP_REQUIRES( ctx, fill_string.size() <= 1, errors::InvalidArgument("Fill string must be one or fewer characters")); OP_REQUIRES(ctx, !(scientific && shortest), errors::InvalidArgument( "Cannot select both scientific and shortest notation")); format_ = "%"; if (!fill_string.empty()) { switch (fill_string[0]) { case ' ': case '+': case '-': case '0': case '#': strings::Appendf(&format_, "%s", fill_string.c_str()); break; default: bool fill_not_supported = true; OP_REQUIRES(ctx, !fill_not_supported, errors::InvalidArgument("Fill argument not supported: \"", fill_string, "\"")); } } if (width > -1) { strings::Appendf(&format_, "%d", width); } if (precision > -1) { strings::Appendf(&format_, ".%d", precision); } switch (dtype) { case DT_STRING: if (width <= 0) { format_ = ""; } else { strings::Appendf(&format_, "s"); } break; case DT_UINT8: case DT_UINT16: case DT_UINT32: strings::Appendf(&format_, "u"); break; case DT_UINT64: strings::Appendf(&format_, "llu"); break; case DT_INT8: case DT_INT16: case DT_INT32: strings::Appendf(&format_, "d"); break; case DT_INT64: strings::Appendf(&format_, "lld"); break; case DT_HALF: case DT_BFLOAT16: case DT_FLOAT: case DT_DOUBLE: case DT_COMPLEX64: case DT_COMPLEX128: if (shortest) { strings::Appendf(&format_, "g"); } else if (scientific) { strings::Appendf(&format_, "e"); } else { strings::Appendf(&format_, "f"); } break; case DT_BOOL: break; case DT_VARIANT: break; default: bool type_not_supported = true; OP_REQUIRES(ctx, !type_not_supported, errors::InvalidArgument("Type not supported: ", DataTypeString(dtype))); } if (dtype == DT_COMPLEX64 || dtype == DT_COMPLEX128) { format_ = strings::Printf("(%s,%s)", format_.c_str(), format_.c_str()); } } void Compute(OpKernelContext* context) override { const Tensor* input_tensor; OP_REQUIRES_OK(context, context->input("input", &input_tensor)); const DataType& dtype = input_tensor->dtype(); if (dtype == DT_STRING && format_.empty()) { context->set_output(0, context->input(0)); return; } Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output("output", input_tensor->shape(), &output_tensor)); auto output_flat = output_tensor->flat<tstring>(); #define ENCODE_TYPE(type, T, enc_str) \ case (type): { \ const auto& input_flat = input_tensor->flat<T>(); \ for (int i = 0; i < input_flat.size(); ++i) { \ output_flat(i) = strings::Printf((enc_str.c_str()), input_flat(i)); \ } \ } break switch (dtype) { ENCODE_TYPE(DT_UINT8, uint8, format_); ENCODE_TYPE(DT_UINT16, uint16, format_); ENCODE_TYPE(DT_UINT32, uint32, format_); ENCODE_TYPE(DT_UINT64, uint64, format_); ENCODE_TYPE(DT_INT8, int8, format_); ENCODE_TYPE(DT_INT16, int16, format_); ENCODE_TYPE(DT_INT32, int32, format_); ENCODE_TYPE(DT_INT64, int64_t, format_); ENCODE_TYPE(DT_FLOAT, float, format_); ENCODE_TYPE(DT_DOUBLE, double, format_); case (DT_BOOL): { const auto& input_flat = input_tensor->flat<bool>(); for (int i = 0; i < input_flat.size(); ++i) { output_flat(i) = (input_flat(i)) ? "true" : "false"; } } break; case (DT_STRING): { const auto& input_flat = input_tensor->flat<tstring>(); for (int i = 0; i < input_flat.size(); ++i) { output_flat(i) = strings::Printf(format_.c_str(), StringPiece(input_flat(i)).data()); } } break; case (DT_VARIANT): { const auto& input_flat = input_tensor->flat<Variant>(); for (int i = 0; i < input_flat.size(); ++i) { output_flat(i) = input_flat(i).DebugString(); } } break; case (DT_HALF): { const auto& input_flat = input_tensor->flat<Eigen::half>(); for (int i = 0; i < input_flat.size(); ++i) { output_flat(i) = strings::Printf(format_.c_str(), static_cast<float>(input_flat(i))); } } break; case (DT_BFLOAT16): { const auto& input_flat = input_tensor->flat<bfloat16>(); for (int i = 0; i < input_flat.size(); ++i) { output_flat(i) = strings::Printf(format_.c_str(), static_cast<float>(input_flat(i))); } } break; case (DT_COMPLEX64): { const auto& input_flat = input_tensor->flat<complex64>(); for (int i = 0; i < input_flat.size(); ++i) { output_flat(i) = strings::Printf( format_.c_str(), input_flat(i).real(), input_flat(i).imag()); } } break; case (DT_COMPLEX128): { const auto& input_flat = input_tensor->flat<complex128>(); for (int i = 0; i < input_flat.size(); ++i) { output_flat(i) = strings::Printf( format_.c_str(), input_flat(i).real(), input_flat(i).imag()); } } break; default: bool can_encode_type = false; OP_REQUIRES(context, can_encode_type, errors::InvalidArgument("Cannot encode input of type ", DataTypeString(dtype))); } #undef ENCODE_TYPE } private: string format_; }; REGISTER_KERNEL_BUILDER(Name("AsString").Device(DEVICE_CPU), AsStringOp); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/framework/variant_tensor_data.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { namespace { class AsStringGraphTest : public OpsTestBase { protected: Status Init(DataType input_type, const string& fill = "", int width = -1, int precision = -1, bool scientific = false, bool shortest = false) { TF_CHECK_OK(NodeDefBuilder("op", "AsString") .Input(FakeInput(input_type)) .Attr("fill", fill) .Attr("precision", precision) .Attr("scientific", scientific) .Attr("shortest", shortest) .Attr("width", width) .Finalize(node_def())); return InitOp(); } }; TEST_F(AsStringGraphTest, Int8) { TF_ASSERT_OK(Init(DT_INT8)); AddInputFromArray<int8>(TensorShape({3}), {-42, 0, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3})); test::FillValues<tstring>(&expected, {"-42", "0", "42"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, Int64) { TF_ASSERT_OK(Init(DT_INT64)); AddInputFromArray<int64_t>(TensorShape({3}), {-42, 0, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3})); test::FillValues<tstring>(&expected, {"-42", "0", "42"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FloatDefault) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4})); test::FillValues<tstring>( &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FloatScientific) { TF_ASSERT_OK(Init(DT_FLOAT, "", -1, -1, true)); AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4})); test::FillValues<tstring>(&expected, {"-4.200000e+01", "0.000000e+00", "3.141590e+00", "4.200000e+01"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FloatShortest) { TF_ASSERT_OK(Init(DT_FLOAT, "", -1, -1, false, true)); AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4})); test::FillValues<tstring>(&expected, {"-42", "0", "3.14159", "42"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FloatPrecisionOnly) { TF_ASSERT_OK(Init(DT_FLOAT, "", -1, 2)); AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4})); test::FillValues<tstring>(&expected, {"-42.00", "0.00", "3.14", "42.00"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FloatWidthOnly) { TF_ASSERT_OK(Init(DT_FLOAT, "", 5)); AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4})); test::FillValues<tstring>( &expected, {"-42.000000", "0.000000", "3.141590", "42.000000"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, Float_5_2_Format) { TF_ASSERT_OK(Init(DT_FLOAT, "", 5, 2)); AddInputFromArray<float>(TensorShape({4}), {-42, 0, 3.14159, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4})); test::FillValues<tstring>(&expected, {"-42.00", " 0.00", " 3.14", "42.00"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, Complex) { TF_ASSERT_OK(Init(DT_COMPLEX64, "", 5, 2)); AddInputFromArray<complex64>(TensorShape({3}), {{-4, 2}, {0}, {3.14159, -1}}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3})); test::FillValues<tstring>( &expected, {"(-4.00, 2.00)", "( 0.00, 0.00)", "( 3.14,-1.00)"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, Bool) { TF_ASSERT_OK(Init(DT_BOOL)); AddInputFromArray<bool>(TensorShape({2}), {true, false}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({2})); test::FillValues<tstring>(&expected, {"true", "false"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, Variant) { TF_ASSERT_OK(Init(DT_VARIANT)); AddInput(DT_VARIANT, TensorShape({4})); auto inputs = mutable_input(0)->flat<Variant>(); inputs(0) = 2; inputs(1) = 3; inputs(2) = true; inputs(3) = Tensor("hi"); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4})); test::FillValues<tstring>( &expected, {"Variant<type: int value: 2>", "Variant<type: int value: 3>", "Variant<type: bool value: 1>", ("Variant<type: tensorflow::Tensor value: Tensor<type: string" " shape: [] values: hi>>")}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, OnlyOneOfScientificAndShortest) { Status s = Init(DT_FLOAT, "", -1, -1, true, true); ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); ASSERT_TRUE(absl::StrContains( s.message(), "Cannot select both scientific and shortest notation")); } TEST_F(AsStringGraphTest, NoShortestForNonFloat) { Status s = Init(DT_INT32, "", -1, -1, false, true); ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); ASSERT_TRUE(absl::StrContains( s.message(), "scientific and shortest format not supported for datatype")); } TEST_F(AsStringGraphTest, NoScientificForNonFloat) { Status s = Init(DT_INT32, "", -1, -1, true); ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); ASSERT_TRUE(absl::StrContains( s.message(), "scientific and shortest format not supported for datatype")); } TEST_F(AsStringGraphTest, NoPrecisionForNonFloat) { Status s = Init(DT_INT32, "", -1, 5); ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); ASSERT_TRUE( absl::StrContains(s.message(), "precision not supported for datatype")); } TEST_F(AsStringGraphTest, LongFill) { Status s = Init(DT_INT32, "asdf"); ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); ASSERT_TRUE(absl::StrContains(s.message(), "Fill string must be one or fewer characters")); } TEST_F(AsStringGraphTest, FillWithZero) { TF_ASSERT_OK(Init(DT_INT64, "0", 4)); AddInputFromArray<int64_t>(TensorShape({3}), {-42, 0, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3})); test::FillValues<tstring>(&expected, {"-042", "0000", "0042"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FillWithSpace) { TF_ASSERT_OK(Init(DT_INT64, " ", 4)); AddInputFromArray<int64_t>(TensorShape({3}), {-42, 0, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3})); test::FillValues<tstring>(&expected, {" -42", " 0", " 42"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FillWithChar1) { TF_ASSERT_OK(Init(DT_INT64, "-", 4)); AddInputFromArray<int64_t>(TensorShape({3}), {-42, 0, 42}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3})); test::FillValues<tstring>(&expected, {"-42 ", "0 ", "42 "}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(AsStringGraphTest, FillWithChar3) { Status s = Init(DT_INT32, "s"); ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); ASSERT_TRUE(absl::StrContains(s.message(), "Fill argument not supported")); } TEST_F(AsStringGraphTest, FillWithChar4) { Status s = Init(DT_INT32, "n"); ASSERT_EQ(error::INVALID_ARGUMENT, s.code()); ASSERT_TRUE(absl::StrContains(s.message(), "Fill argument not supported")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/as_string_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/as_string_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7960904d-c953-4df5-b235-7e81058ab006
cpp
tensorflow/tensorflow
quantized_conv_ops
tensorflow/core/kernels/quantized_conv_ops.cc
tensorflow/core/kernels/quantized_conv_ops_test.cc
#include <algorithm> #include <vector> #define EIGEN_USE_THREADS #define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK #include "absl/status/status.h" #include "public/gemmlowp.h" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/conv_ops.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/kernels/reference_gemm.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/util/padding.h" namespace tensorflow { template <class T1, class T2, class T3> class ReferenceConvFunctor { public: void operator()(OpKernelContext* context, const T1* input_data, int input_batches, int input_height, int input_width, int input_depth, int input_offset, const T2* filter_data, int filter_height, int filter_width, int filter_count, int filter_offset, int stride, Padding padding, T3* output_data, int output_height, int output_width, int output_shift, int output_offset, int output_mult) { const int32_t highest = static_cast<int32>(Eigen::NumTraits<T3>::highest()); const int32_t lowest = static_cast<int32>(Eigen::NumTraits<T3>::lowest()); const int32_t rounding = (output_shift < 1) ? 0 : (1 << (output_shift - 1)); int filter_left_offset; int filter_top_offset; if (padding == VALID) { filter_left_offset = ((output_width - 1) * stride + filter_width - input_width + 1) / 2; filter_top_offset = ((output_height - 1) * stride + filter_height - input_height + 1) / 2; } else { filter_left_offset = ((output_width - 1) * stride + filter_width - input_width) / 2; filter_top_offset = ((output_height - 1) * stride + filter_height - input_height) / 2; } for (int batch = 0; batch < input_batches; ++batch) { for (int out_y = 0; out_y < output_height; ++out_y) { for (int out_x = 0; out_x < output_width; ++out_x) { for (int out_channel = 0; out_channel < filter_count; ++out_channel) { const int in_x_origin = (out_x * stride) - filter_left_offset; const int in_y_origin = (out_y * stride) - filter_top_offset; int32_t total = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { for (int filter_x = 0; filter_x < filter_width; ++filter_x) { for (int in_channel = 0; in_channel < input_depth; ++in_channel) { const int in_x = in_x_origin + filter_x; const int in_y = in_y_origin + filter_y; int32_t input_value; if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && (in_y < input_height)) { const T1 input_source_value = input_data[(batch * input_height * input_width * input_depth) + (in_y * input_width * input_depth) + (in_x * input_depth) + in_channel]; input_value = static_cast<int32>(input_source_value) - input_offset; } else { input_value = 0; } const T2 filter_source_value = filter_data[(filter_y * filter_width * input_depth * filter_count) + (filter_x * input_depth * filter_count) + (in_channel * filter_count) + out_channel]; const int32_t filter_value = static_cast<int32>(filter_source_value) - filter_offset; total += (input_value * filter_value); } } } const int32_t output = ((((total + output_offset) * output_mult) + rounding) >> output_shift); const int32_t top_clamped_output = std::min(output, highest); const int32_t clamped_output = std::max(top_clamped_output, lowest); output_data[(batch * output_height * output_width * filter_count) + (out_y * output_width * filter_count) + (out_x * filter_count) + out_channel] = clamped_output; } } } } } }; const size_t kMaxChunkSize = (1 * 1024 * 1024); template <class T1, class T2, class T3> class Im2ColConvFunctor { public: void operator()(OpKernelContext* context, const T1* input_data, int input_batches, int input_height, int input_width, int input_depth, int input_offset, const T2* filter_data, int filter_height, int filter_width, int filter_count, int filter_offset, int stride, Padding padding, T3* output_data, int output_height, int output_width, int output_shift, int output_offset, int output_mult) { if (input_offset < 0) { static int warning_count = 0; if (warning_count < 10) { ++warning_count; LOG(WARNING) << "For kernel '" << context->op_kernel().name() << "' from input '" << context->op_kernel().requested_input(0) << "': Zero is not representable in the quantized range used by the" << " input. This means QuantizedConv2d has to fall back to a slow" << " implementation, since the border of zero values can't be" << " represented easily. You should try to construct graphs that" << " avoid this situation."; } ReferenceConvFunctor<T1, T2, T3> conv_functor; conv_functor(context, input_data, input_batches, input_height, input_width, input_depth, input_offset, filter_data, filter_height, filter_width, filter_count, filter_offset, stride, padding, output_data, output_height, output_width, output_shift, output_offset, output_mult); return; } OP_REQUIRES( context, output_width > 0, errors::InvalidArgument("output_width must be strictly positive")); OP_REQUIRES( context, output_height > 0, errors::InvalidArgument("output_height must be strictly positive")); int filter_left_offset; int filter_top_offset; if (padding == VALID) { filter_left_offset = ((output_width - 1) * stride + filter_width - input_width + 1) / 2; filter_top_offset = ((output_height - 1) * stride + filter_height - input_height + 1) / 2; } else { filter_left_offset = ((output_width - 1) * stride + filter_width - input_width) / 2; filter_top_offset = ((output_height - 1) * stride + filter_height - input_height) / 2; } const int filter_value_count = filter_width * filter_height * input_depth; OP_REQUIRES(context, filter_value_count > 0, errors::InvalidArgument( "filter patch must contain at least one element")); const int64_t patches_per_chunk = kMaxChunkSize / (filter_value_count * sizeof(T1)); const int64_t chunk_value_count = (kMaxChunkSize + (sizeof(T1) - 1)) / sizeof(T1); Im2ColBufferResource<T1, chunk_value_count>* im2col_buffer_resource; std::function<Status(Im2ColBufferResource<T1, chunk_value_count>**)> creator = [](Im2ColBufferResource<T1, chunk_value_count>** resource) { #ifdef _MSC_VER const int64 chunk_value_count = (kMaxChunkSize + (sizeof(T1) - 1)) / sizeof(T1); #endif *resource = new Im2ColBufferResource<T1, chunk_value_count>(); return absl::OkStatus(); }; OP_REQUIRES_OK(context, context->resource_manager()->LookupOrCreate( "Conv2d", "im2col_buffer", &im2col_buffer_resource, creator)); mutex_lock lock_buffer(im2col_buffer_resource->mu); core::ScopedUnref unref_buffer(im2col_buffer_resource); T1* im2col_buffer = im2col_buffer_resource->data; const int64_t patch_count = (input_batches * output_height * output_width); const int64_t chunk_count = (patch_count + (patches_per_chunk - 1)) / patches_per_chunk; for (int64_t chunk_index = 0; chunk_index < chunk_count; ++chunk_index) { const int64_t patch_index_start = chunk_index * patches_per_chunk; const int64_t patch_index_end = std::min(patch_index_start + patches_per_chunk, patch_count); for (int64_t patch_index = patch_index_start; patch_index < patch_index_end; ++patch_index) { const int64_t batch = patch_index / (output_height * output_width); const int64_t out_y = (patch_index / output_width) % output_height; const int64_t out_x = patch_index % output_width; const T1* input_batch_start = input_data + (batch * input_height * input_width * input_depth); const int in_y_origin = (out_y * stride) - filter_top_offset; const int in_x_origin = (out_x * stride) - filter_left_offset; const int patch_index_within_chunk = patch_index % patches_per_chunk; T1* im2col_patch_start = im2col_buffer + (patch_index_within_chunk * filter_value_count); for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + filter_y; T1* im2col_row_start = im2col_patch_start + (filter_y * filter_width * input_depth); if ((in_y < 0) || (in_y >= input_height)) { memset(im2col_row_start, input_offset, (filter_width * input_depth)); } else { const int in_x_end = in_x_origin + filter_width; const int left_zero_count = std::max(0, 0 - in_x_origin); const int right_zero_count = std::max(0, in_x_end - input_width); const int center_copy_count = filter_width - (left_zero_count + right_zero_count); if (left_zero_count > 0) { T1* im2col_left_start = im2col_row_start; memset(im2col_left_start, input_offset, (left_zero_count * input_depth)); } if (center_copy_count > 0) { const T1* input_row_start = input_batch_start + (in_y * input_width * input_depth) + (std::max(0, in_x_origin) * input_depth); T1* im2col_center_start = im2col_row_start + (left_zero_count * input_depth); memcpy(im2col_center_start, input_row_start, (center_copy_count * input_depth)); } if (right_zero_count > 0) { T1* im2col_right_start = im2col_row_start + ((left_zero_count + center_copy_count) * input_depth); memset(im2col_right_start, input_offset, (right_zero_count * input_depth)); } } } } const int how_many_patches = patch_index_end - patch_index_start; const bool transpose_a = false; const bool transpose_b = false; const bool transpose_c = false; const int m = how_many_patches; const int n = filter_count; const int k = filter_value_count; const int lda = filter_value_count; const int ldb = filter_count; const int ldc = filter_count; T3* chunk_output_data = output_data + (patch_index_start * filter_count); if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && std::is_same<T3, qint32>() && (output_offset == 0) && (output_mult == 1) && (output_shift == 0) && (transpose_c == false) && (k <= 2048)) { meta::QuantizedGemm(context, transpose_a, transpose_b, im2col_buffer, filter_data, chunk_output_data, m, n, k, -input_offset, -filter_offset, lda, ldb, ldc); } else if (std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && std::is_same<T3, qint32>() && (output_offset == 0) && (output_mult == 1) && (output_shift == 0)) { const uint8* im2col_data_as_uint8 = &(im2col_buffer->value); const uint8* filter_data_as_uint8 = &(filter_data->value); int32* output_data_as_int32 = &(chunk_output_data->value); static const gemmlowp::MapOrder ResultOrder = !transpose_c ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; static const gemmlowp::MapOrder LhsOrder = !transpose_a ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; static const gemmlowp::MapOrder RhsOrder = !transpose_b ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; gemmlowp::MatrixMap<const std::uint8_t, LhsOrder> lhs( im2col_data_as_uint8, m, k, lda); gemmlowp::MatrixMap<const std::uint8_t, RhsOrder> rhs( filter_data_as_uint8, k, n, ldb); gemmlowp::MatrixMap<std::int32_t, ResultOrder> result( output_data_as_int32, m, n, ldc); const std::tuple<> empty_pipeline = {}; auto& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); TensorflowGemmContext context(worker_threads.num_threads, worker_threads.workers); gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::int32_t, gemmlowp::DefaultL8R8BitDepthParams>( &context, lhs, rhs, &result, -input_offset, -filter_offset, empty_pipeline); TF_ANNOTATE_MEMORY_IS_INITIALIZED(output_data_as_int32, m * n * sizeof(int32)); } else { ReferenceGemm<T1, T2, T3>( transpose_a, transpose_b, transpose_c, m, n, k, im2col_buffer, input_offset, lda, filter_data, filter_offset, ldb, chunk_output_data, output_shift, output_offset, output_mult, ldc); } } } }; template <class T1, class T2, class T3, template <class TF1, class TF2, class TF3> class ConvFunctor> class QuantizedConv2DOp : public OpKernel { public: explicit QuantizedConv2DOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_)); OP_REQUIRES(context, strides_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, strides_[1] == strides_[2], errors::InvalidArgument( "Current implementation only supports equal length " "strides in the row and column dimensions.")); OP_REQUIRES( context, (strides_[0] == 1 && strides_[3] == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); std::vector<int32> dilations; OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations)); OP_REQUIRES(context, dilations.size() == 4, errors::InvalidArgument("Dilations field must " "specify 4 dimensions")); OP_REQUIRES(context, dilations[1] == 1 && dilations[2] == 1, errors::InvalidArgument( "Current implementation only supports dilated rate as 1 " "in the row and column dimensions.")); OP_REQUIRES(context, (dilations[0] == 1 && dilations[3] == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilations in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); int input_dims = input.shape().dims(); for (int i = 0; i < input_dims; ++i) { OP_REQUIRES(context, input.shape().dim_size(i) != 0, absl::InvalidArgumentError( "Invalid input: Shapes dimension cannot be 0.")); } const Tensor& filter = context->input(1); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be rank 4 but is rank ", input.shape().dims())); OP_REQUIRES(context, filter.dims() == 4, errors::InvalidArgument("filter must be rank 4 but is rank ", filter.shape().dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(2).shape()), errors::InvalidArgument("min_input must be rank 0 but is rank ", context->input(2).shape().dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(3).shape()), errors::InvalidArgument("max_input must be rank 0 but is rank ", context->input(3).shape().dims())); OP_REQUIRES( context, TensorShapeUtils::IsScalar(context->input(4).shape()), errors::InvalidArgument("min_filter must be rank 0 but is rank ", context->input(4).shape().dims())); OP_REQUIRES( context, TensorShapeUtils::IsScalar(context->input(5).shape()), errors::InvalidArgument("max_filter must be rank 0 but is rank ", context->input(5).shape().dims())); const float min_input = context->input(2).flat<float>()(0); const float max_input = context->input(3).flat<float>()(0); const float min_filter = context->input(4).flat<float>()(0); const float max_filter = context->input(5).flat<float>()(0); const int32_t offset_input = FloatToQuantizedUnclamped<T1>(0.0f, min_input, max_input); const int32_t offset_filter = FloatToQuantizedUnclamped<T2>(0.0f, min_filter, max_filter); const int32_t offset_output = 0; const int32_t mult_output = 1; const int32_t shift_output = 0; const int64_t in_depth = input.dim_size(3); OP_REQUIRES(context, in_depth == filter.dim_size(2), errors::InvalidArgument( "input and filter must have the same depth: ", in_depth, " vs ", filter.dim_size(2))); const int64_t out_depth = filter.dim_size(3); const int64_t input_rows = input.dim_size(1); const int64_t filter_rows = filter.dim_size(0); const int64_t input_cols = input.dim_size(2); const int64_t filter_cols = filter.dim_size(1); const int64_t batch = input.dim_size(0); const int stride = strides_[1]; int64_t out_rows = 0, out_cols = 0, pad_rows = 0, pad_cols = 0; OP_REQUIRES_OK(context, GetWindowedOutputSize( input_rows, filter_rows, 1, stride, padding_, &out_rows, &pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSize( input_cols, filter_cols, 1, stride, padding_, &out_cols, &pad_cols)); CHECK_GT(batch, 0); CHECK_GT(out_rows, 0); CHECK_GT(out_cols, 0); CHECK_GT(out_depth, 0); TensorShape out_shape({batch, out_rows, out_cols, out_depth}); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); ConvFunctor<T1, T2, T3> conv_functor; conv_functor(context, input.flat<T1>().data(), batch, input_rows, input_cols, in_depth, offset_input, filter.flat<T2>().data(), filter_rows, filter_cols, out_depth, offset_filter, stride, padding_, output->flat<T3>().data(), out_rows, out_cols, shift_output, offset_output, mult_output); float min_output_value; float max_output_value; QuantizationRangeForMultiplication<T1, T2, T3>( min_input, max_input, min_filter, max_filter, &min_output_value, &max_output_value); Tensor* output_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); output_min->flat<float>()(0) = min_output_value; Tensor* output_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_max->flat<float>()(0) = max_output_value; } private: std::vector<int32> strides_; Padding padding_; }; REGISTER_KERNEL_BUILDER( Name("QuantizedConv2D") .Device(DEVICE_CPU) .TypeConstraint<quint8>("Tinput") .TypeConstraint<quint8>("Tfilter") .TypeConstraint<qint32>("out_type"), QuantizedConv2DOp<quint8, quint8, qint32, Im2ColConvFunctor>); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class QuantizedConv2DTest : public OpsTestBase { protected: }; TEST_F(QuantizedConv2DTest, Small) { const int stride = 1; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 3; const int image_batch_count = 1; const float image_min = 0.0f; const float image_max = 12.0f; Tensor image_float(DT_FLOAT, {image_batch_count, image_height, image_width, depth}); test::FillValues<float>(&image_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Tensor image_quantized = FloatTensorToQuantized<quint8>(image_float, image_min, image_max); const int filter_size = 3; const int filter_count = 1; const float filter_min = 1.0f; const float filter_max = 9.0f; Tensor filter_float(DT_FLOAT, {filter_size, filter_size, depth, filter_count}); test::FillValues<float>(&filter_float, {1, 4, 7, 2, 5, 8, 3, 6, 9}); Tensor filter_quantized = FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); AddInputFromArray<quint8>(image_quantized.shape(), image_quantized.flat<quint8>()); AddInputFromArray<quint8>(filter_quantized.shape(), filter_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {image_min}); AddInputFromArray<float>(TensorShape({}), {image_max}); AddInputFromArray<float>(TensorShape({}), {filter_min}); AddInputFromArray<float>(TensorShape({}), {filter_max}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width; const int expected_height = image_height * filter_count; Tensor expected_float( DT_FLOAT, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<float>(&expected_float, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121}); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 1.0); } TEST_F(QuantizedConv2DTest, Small32Bit) { const int stride = 1; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 3; const int image_batch_count = 1; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {10, 40, 70, 20, 50, 80, 30, 60, 90}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width; const int expected_height = image_height * filter_count; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>( &expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800, 18700, 23400, 26100, 12100}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(QuantizedConv2DTest, OddPadding) { const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 4; const int image_batch_count = 1; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width / stride; const int expected_height = (image_height * filter_count) / stride; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>(&expected, {348, 252, 274, 175}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(QuantizedConv2DTest, OddPaddingBatch) { const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 4; const int image_batch_count = 3; AddInputFromArray<quint8>( TensorShape({image_batch_count, image_height, image_width, depth}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); const int filter_size = 3; const int filter_count = 1; AddInputFromArray<quint8>( TensorShape({filter_size, filter_size, depth, filter_count}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width / stride; const int expected_height = (image_height * filter_count) / stride; Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<qint32>(&expected, {348, 252, 274, 175, 348, 252, 274, 175, 348, 252, 274, 175}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(QuantizedConv2DTest, SmallWithNoZero) { const int stride = 1; TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "QuantizedConv2D") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 3; const int image_batch_count = 1; const float image_min = 1.0f; const float image_max = 12.0f; Tensor image_float(DT_FLOAT, {image_batch_count, image_height, image_width, depth}); test::FillValues<float>(&image_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Tensor image_quantized = FloatTensorToQuantized<quint8>(image_float, image_min, image_max); const int filter_size = 3; const int filter_count = 1; const float filter_min = 1.0f; const float filter_max = 9.0f; Tensor filter_float(DT_FLOAT, {filter_size, filter_size, depth, filter_count}); test::FillValues<float>(&filter_float, {1, 4, 7, 2, 5, 8, 3, 6, 9}); Tensor filter_quantized = FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); AddInputFromArray<quint8>(image_quantized.shape(), image_quantized.flat<quint8>()); AddInputFromArray<quint8>(filter_quantized.shape(), filter_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {image_min}); AddInputFromArray<float>(TensorShape({}), {image_max}); AddInputFromArray<float>(TensorShape({}), {filter_min}); AddInputFromArray<float>(TensorShape({}), {filter_max}); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width; const int expected_height = image_height * filter_count; Tensor expected_float( DT_FLOAT, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<float>(&expected_float, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121}); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 1.0); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_conv_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_conv_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
188d98db-14ee-4116-be46-fa26ccb5f35a
cpp
tensorflow/tensorflow
summary_image_op
tensorflow/core/kernels/summary_image_op.cc
tensorflow/core/kernels/summary_image_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/png/png_io.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { class SummaryImageOp : public OpKernel { public: typedef Eigen::Tensor<uint8, 2, Eigen::RowMajor> Uint8Image; explicit SummaryImageOp(OpKernelConstruction* context) : OpKernel(context) { int64_t max_images_tmp; OP_REQUIRES_OK(context, context->GetAttr("max_images", &max_images_tmp)); OP_REQUIRES(context, max_images_tmp < (1LL << 31), errors::InvalidArgument("max_images must be < 2^31")); max_images_ = static_cast<int32>(max_images_tmp); const TensorProto* proto; OP_REQUIRES_OK(context, context->GetAttr("bad_color", &proto)); OP_REQUIRES_OK(context, context->device()->MakeTensorFromProto( *proto, AllocatorAttributes(), &bad_color_)); OP_REQUIRES(context, bad_color_.dtype() == DT_UINT8, errors::InvalidArgument("bad_color must be uint8, got ", DataTypeString(bad_color_.dtype()))); OP_REQUIRES( context, TensorShapeUtils::IsVector(bad_color_.shape()), errors::InvalidArgument("bad_color must be a vector, got shape ", bad_color_.shape().DebugString())); } void Compute(OpKernelContext* c) override { const Tensor& tags = c->input(0); const Tensor& tensor = c->input(1); OP_REQUIRES(c, TensorShapeUtils::IsScalar(tags.shape()), errors::InvalidArgument("Tags must be a scalar")); OP_REQUIRES(c, tensor.dims() == 4 && (tensor.dim_size(3) == 1 || tensor.dim_size(3) == 3 || tensor.dim_size(3) == 4), errors::InvalidArgument( "Tensor must be 4-D with last dim 1, 3, or 4, not ", tensor.shape().DebugString())); const string& base_tag = tags.scalar<tstring>()(); OP_REQUIRES(c, tensor.dim_size(0) < (1LL << 31) && tensor.dim_size(1) < (1LL << 31) && tensor.dim_size(2) < (1LL << 31) && (tensor.dim_size(1) * tensor.dim_size(2)) < (1LL << 29), errors::InvalidArgument("Tensor too large for summary ", tensor.shape().DebugString())); const int batch_size = static_cast<int>(tensor.dim_size(0)); const int h = static_cast<int>(tensor.dim_size(1)); const int w = static_cast<int>(tensor.dim_size(2)); const int hw = h * w; const int depth = static_cast<int>(tensor.dim_size(3)); OP_REQUIRES(c, hw > 0 && depth > 0, errors::InvalidArgument( "input tensor must have non-zero dims. Found: [", batch_size, ", ", h, ", ", w, ", ", depth, "].")); Summary s; if (tensor.dtype() == DT_UINT8) { auto ith_image = [&tensor, batch_size, hw, depth](int i) { auto values = tensor.shaped<uint8, 3>({batch_size, hw, depth}); return typename TTypes<uint8>::ConstMatrix( &values(i, 0, 0), Eigen::DSizes<Eigen::DenseIndex, 2>(hw, depth)); }; OP_REQUIRES_OK( c, AddImages(base_tag, batch_size, w, h, depth, ith_image, &s)); } else if (tensor.dtype() == DT_HALF) { NormalizeAndAddImages<Eigen::half>(c, tensor, h, w, hw, depth, batch_size, base_tag, &s); } else if (tensor.dtype() == DT_FLOAT) { NormalizeAndAddImages<float>(c, tensor, h, w, hw, depth, batch_size, base_tag, &s); } else { NormalizeAndAddImages<double>(c, tensor, h, w, hw, depth, batch_size, base_tag, &s); } Tensor* summary_tensor = nullptr; OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor)); CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()())); } template <class T> void NormalizeAndAddImages(OpKernelContext* c, const Tensor& tensor, int h, int w, int hw, int depth, int batch_size, const string& base_tag, Summary* s) { OP_REQUIRES(c, bad_color_.dim_size(0) >= depth, errors::InvalidArgument( "expected depth <= bad_color.size, got depth = ", depth, ", bad_color.size = ", bad_color_.dim_size(0))); auto bad_color_full = bad_color_.vec<uint8>(); typename TTypes<uint8>::ConstVec bad_color(bad_color_full.data(), depth); Uint8Image image(hw, depth); auto ith_image = [&tensor, &image, bad_color, batch_size, hw, depth](int i) { auto tensor_eigen = tensor.template shaped<T, 3>({batch_size, hw, depth}); typename TTypes<T>::ConstMatrix values( &tensor_eigen(i, 0, 0), Eigen::DSizes<Eigen::DenseIndex, 2>(hw, depth)); NormalizeFloatImage<T>(hw, depth, values, bad_color, &image); return image; }; OP_REQUIRES_OK(c, AddImages(base_tag, batch_size, w, h, depth, ith_image, s)); } Status AddImages(const string& tag, int batch_size, int w, int h, int depth, const std::function<Uint8Image(int)>& ith_image, Summary* s) { const int N = std::min<int>(max_images_, batch_size); for (int i = 0; i < N; ++i) { Summary::Value* v = s->add_value(); if (max_images_ > 1) { v->set_tag(strings::StrCat(tag, "/image/", i)); } else { v->set_tag(strings::StrCat(tag, "/image")); } auto image = ith_image(i); Summary::Image* si = v->mutable_image(); si->set_height(h); si->set_width(w); si->set_colorspace(depth); const int channel_bits = 8; const int compression = -1; if (!png::WriteImageToBuffer( image.data(), w, h, w * depth, depth, channel_bits, compression, si->mutable_encoded_image_string(), nullptr)) { return errors::Internal("PNG encoding failed"); } } return absl::OkStatus(); } template <class T> static void NormalizeFloatImage(int hw, int depth, typename TTypes<T>::ConstMatrix values, typename TTypes<uint8>::ConstVec bad_color, Uint8Image* image) { if (!image->size()) return; float image_min = std::numeric_limits<float>::infinity(); float image_max = -image_min; for (int i = 0; i < hw; i++) { bool finite = true; for (int j = 0; j < depth; j++) { if (!Eigen::numext::isfinite(values(i, j))) { finite = false; break; } } if (finite) { for (int j = 0; j < depth; j++) { float value(values(i, j)); image_min = std::min(image_min, value); image_max = std::max(image_max, value); } } } const float kZeroThreshold = 1e-6; T scale, offset; if (image_min < 0) { float max_val = std::max(std::abs(image_min), std::abs(image_max)); scale = T(max_val < kZeroThreshold ? 0.0f : 127.0f / max_val); offset = T(128.0f); } else { scale = T(image_max < kZeroThreshold ? 0.0f : 255.0f / image_max); offset = T(0.0f); } for (int i = 0; i < hw; i++) { bool finite = true; for (int j = 0; j < depth; j++) { if (!Eigen::numext::isfinite(values(i, j))) { finite = false; break; } } if (finite) { image->chip<0>(i) = (values.template chip<0>(i) * scale + offset) .template cast<uint8>(); } else { image->chip<0>(i) = bad_color; } } } private: int32 max_images_; Tensor bad_color_; }; REGISTER_KERNEL_BUILDER(Name("ImageSummary").Device(DEVICE_CPU), SummaryImageOp); }
#include <functional> #include <memory> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/histogram/histogram.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { static void EXPECT_SummaryMatches(const Summary& actual, const string& expected_str) { Summary expected; CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected)); EXPECT_EQ(expected.DebugString(), actual.DebugString()); } class SummaryImageOpTest : public OpsTestBase { protected: void MakeOp(int max_images) { TF_ASSERT_OK(NodeDefBuilder("myop", "ImageSummary") .Input(FakeInput()) .Input(FakeInput()) .Attr("max_images", max_images) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } void CheckAndRemoveEncodedImages(Summary* summary) { for (int i = 0; i < summary->value_size(); ++i) { Summary::Value* value = summary->mutable_value(i); ASSERT_TRUE(value->has_image()) << "No image for value: " << value->tag(); ASSERT_FALSE(value->image().encoded_image_string().empty()) << "No encoded_image_string for value: " << value->tag(); if (VLOG_IS_ON(2)) { TF_CHECK_OK(WriteStringToFile( Env::Default(), strings::StrCat("/tmp/", value->tag(), ".png"), value->image().encoded_image_string())); } value->mutable_image()->clear_encoded_image_string(); } } }; TEST_F(SummaryImageOpTest, ThreeGrayImagesOutOfFive4dInput) { MakeOp(3 ); AddInputFromArray<tstring>(TensorShape({}), {"tag"}); AddInputFromArray<float>(TensorShape({5, 2, 1, 1}), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}); TF_ASSERT_OK(RunOpKernel()); Tensor* out_tensor = GetOutput(0); ASSERT_EQ(0, out_tensor->dims()); Summary summary; ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()()); CheckAndRemoveEncodedImages(&summary); EXPECT_SummaryMatches(summary, R"( value { tag: 'tag/image/0' image { width: 1 height: 2 colorspace: 1} } value { tag: 'tag/image/1' image { width: 1 height: 2 colorspace: 1} } value { tag: 'tag/image/2' image { width: 1 height: 2 colorspace: 1} } )"); } TEST_F(SummaryImageOpTest, OneGrayImage4dInput) { MakeOp(1 ); AddInputFromArray<tstring>(TensorShape({}), {"tag"}); AddInputFromArray<float>(TensorShape({5 , 2, 1, 1 }), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}); TF_ASSERT_OK(RunOpKernel()); Tensor* out_tensor = GetOutput(0); ASSERT_EQ(0, out_tensor->dims()); Summary summary; ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()()); CheckAndRemoveEncodedImages(&summary); EXPECT_SummaryMatches(summary, R"( value { tag: 'tag/image' image { width: 1 height: 2 colorspace: 1} })"); } TEST_F(SummaryImageOpTest, OneColorImage4dInput) { MakeOp(1 ); AddInputFromArray<tstring>(TensorShape({}), {"tag"}); AddInputFromArray<float>( TensorShape({1 , 5 , 2 , 3 }), { 1.0f, 0.1f, 0.2f, 1.0f, 0.3f, 0.4f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, }); TF_ASSERT_OK(RunOpKernel()); Tensor* out_tensor = GetOutput(0); ASSERT_EQ(0, out_tensor->dims()); Summary summary; ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()()); CheckAndRemoveEncodedImages(&summary); EXPECT_SummaryMatches(summary, R"( value { tag: 'tag/image' image { width: 2 height: 5 colorspace: 3} })"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_image_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_image_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
05ba9773-8dd8-4974-a475-170f6c840a77
cpp
tensorflow/tensorflow
random_op
tensorflow/core/kernels/random_op.cc
tensorflow/core/kernels/random_op_test.cc
#define EIGEN_USE_THREADS #include <algorithm> #include <cmath> #include <memory> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/kernels/random_op_cpu.h" #include "tensorflow/core/lib/hash/crc32c.h" #include "tensorflow/core/lib/random/random_distributions.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/util/guarded_philox_random.h" #include "tensorflow/core/util/work_sharder.h" #if EIGEN_COMP_GNUC && __cplusplus > 199711L #define DISABLE_FLOAT_EQUALITY_WARNING \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"") #define ENABLE_FLOAT_EQUALITY_WARNING _Pragma("GCC diagnostic pop") #else #define DISABLE_FLOAT_EQUALITY_WARNING #define ENABLE_FLOAT_EQUALITY_WARNING #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace { static Status AllocateOutputWithShape(OpKernelContext* ctx, const Tensor& shape, int index, Tensor** output) { TensorShape tensor_shape; TF_RETURN_IF_ERROR(tensor::MakeShape(shape, &tensor_shape)); return ctx->allocate_output(index, tensor_shape, output); } template <typename Device, class Distribution> class PhiloxRandomOp : public OpKernel { public: typedef typename Distribution::ResultElementType T; explicit PhiloxRandomOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, generator_.Init(ctx)); } void Compute(OpKernelContext* ctx) override { const Tensor& shape = ctx->input(0); Tensor* output; OP_REQUIRES_OK(ctx, AllocateOutputWithShape(ctx, shape, 0, &output)); auto output_flat = output->flat<T>(); functor::FillPhiloxRandom<Device, Distribution>()( ctx, ctx->eigen_device<Device>(), nullptr, nullptr, generator_.ReserveRandomOutputs(output_flat.size(), 256), output_flat.data(), output_flat.size(), Distribution()); } private: GuardedPhiloxRandom generator_; }; template <typename Device, class IntType> class RandomUniformIntOp : public OpKernel { public: explicit RandomUniformIntOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, generator_.Init(ctx)); } void Compute(OpKernelContext* ctx) override { const Tensor& shape = ctx->input(0); const Tensor& minval = ctx->input(1); const Tensor& maxval = ctx->input(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(minval.shape()), errors::InvalidArgument("minval must be 0-D, got shape ", minval.shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(maxval.shape()), errors::InvalidArgument("maxval must be 0-D, got shape ", maxval.shape().DebugString())); Tensor* output; OP_REQUIRES_OK(ctx, AllocateOutputWithShape(ctx, shape, 0, &output)); if (output->NumElements() == 0) return; IntType lo = minval.scalar<IntType>()(); IntType hi = maxval.scalar<IntType>()(); OP_REQUIRES( ctx, lo < hi, errors::InvalidArgument("Need minval < maxval, got ", lo, " >= ", hi)); typedef random::UniformDistribution<random::PhiloxRandom, IntType> Distribution; Distribution dist(lo, hi); auto output_flat = output->flat<IntType>(); functor::FillPhiloxRandom<Device, Distribution>()( ctx, ctx->eigen_device<Device>(), nullptr, nullptr, generator_.ReserveRandomOutputs(output_flat.size(), 256), output_flat.data(), output_flat.size(), dist); } private: GuardedPhiloxRandom generator_; }; template <typename T> class RandomGammaOp : public OpKernel { public: explicit RandomGammaOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, generator_.Init(context)); } void Compute(OpKernelContext* ctx) override { const Tensor& shape_t = ctx->input(0); const Tensor& alpha_t = ctx->input(1); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(shape_t.shape()) && (shape_t.dtype() == DataType::DT_INT32 || shape_t.dtype() == DataType::DT_INT64), errors::InvalidArgument( "shape must be a vector of {int32,int64}, got shape: ", shape_t.DebugString())); TensorShape samples_shape; if (shape_t.dtype() == DataType::DT_INT32) { auto vec = shape_t.flat<int32>(); OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(), &samples_shape)); } else if (shape_t.dtype() == DataType::DT_INT64) { auto vec = shape_t.flat<int64_t>(); OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(), &samples_shape)); } const int64_t samples_per_alpha = samples_shape.num_elements(); OP_REQUIRES_OK(ctx, samples_shape.AppendShapeWithStatus(alpha_t.shape())); Tensor* samples_t = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, samples_shape, &samples_t)); if (samples_shape.num_elements() == 0) return; using random::PhiloxRandom; typedef random::NormalDistribution<PhiloxRandom, double> Normal; typedef random::UniformDistribution<PhiloxRandom, double> Uniform; #define UNIFORM(X) \ if (uniform_remaining == 0) { \ uniform_remaining = Uniform::kResultElementCount; \ uniform_result = uniform(&gen); \ } \ uniform_remaining--; \ double X = uniform_result[uniform_remaining] static constexpr int kReservedSamplesPerOutput = 256; const auto alpha_flat = alpha_t.flat<T>().data(); const int64_t num_alphas = alpha_t.NumElements(); OP_REQUIRES(ctx, num_alphas > 0, errors::InvalidArgument( "Input alpha should have non-zero element count, got: ", num_alphas)); auto samples_flat = samples_t->flat<T>().data(); PhiloxRandom rng = generator_.ReserveRandomOutputs( samples_per_alpha * num_alphas, kReservedSamplesPerOutput); auto DoWork = [samples_per_alpha, num_alphas, &rng, samples_flat, alpha_flat](int64_t start_output, int64_t limit_output) { using Eigen::numext::exp; using Eigen::numext::log; using Eigen::numext::log1p; using Eigen::numext::pow; Normal normal; Uniform uniform; typename Normal::ResultType norm_result; typename Uniform::ResultType uniform_result; for (int64_t output_idx = start_output; output_idx < limit_output; ) { int64_t alpha_idx = output_idx / samples_per_alpha; T* const samples_alpha_offset = samples_flat + alpha_idx; const double alpha = static_cast<double>(alpha_flat[alpha_idx]); DISABLE_FLOAT_EQUALITY_WARNING if (alpha == static_cast<double>(1.0)) { ENABLE_FLOAT_EQUALITY_WARNING for (int64_t sample_idx = output_idx % samples_per_alpha; sample_idx < samples_per_alpha && output_idx < limit_output; sample_idx++, output_idx++) { PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16_t uniform_remaining = 0; UNIFORM(u); const double res = -log1p(-u); samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res); } } else { const bool alpha_less_than_one = alpha < 1; const double d = alpha + (alpha_less_than_one ? 2.0 / 3 : -1.0 / 3); const double c = 1.0 / 3 / sqrt(d); for (int64_t sample_idx = output_idx % samples_per_alpha; sample_idx < samples_per_alpha && output_idx < limit_output; sample_idx++, output_idx++) { PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16_t norm_remaining = 0; int16_t uniform_remaining = 0; while (true) { if (norm_remaining == 0) { norm_remaining = Normal::kResultElementCount; norm_result = normal(&gen); } norm_remaining--; const double x = norm_result[norm_remaining]; double v = 1 + c * x; if (v <= 0) { continue; } v = v * v * v; UNIFORM(u); if ((u < 1 - 0.0331 * (x * x) * (x * x)) || (log(u) < 0.5 * x * x + d * (1 - v + log(v)))) { double res = d * v; if (alpha_less_than_one) { UNIFORM(b); res *= pow(b, 1 / alpha); } samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res); break; } } } } } }; #undef UNIFORM static const int kElementCost = 85 + 2 * Normal::kElementCost + Uniform::kElementCost + 3 * PhiloxRandom::kElementCost; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_alphas * samples_per_alpha, kElementCost, DoWork); } private: GuardedPhiloxRandom generator_; RandomGammaOp(const RandomGammaOp&) = delete; void operator=(const RandomGammaOp&) = delete; }; } #define REGISTER(TYPE) \ template struct functor::FillPhiloxRandom< \ CPUDevice, random::UniformDistribution<random::PhiloxRandom, TYPE>>; \ template struct functor::FillPhiloxRandom< \ CPUDevice, random::NormalDistribution<random::PhiloxRandom, TYPE>>; \ template struct functor::FillPhiloxRandom< \ CPUDevice, \ random::TruncatedNormalDistribution< \ random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>; \ REGISTER_KERNEL_BUILDER( \ Name("RandomUniform") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .TypeConstraint<TYPE>("dtype"), \ PhiloxRandomOp<CPUDevice, random::UniformDistribution< \ random::PhiloxRandom, TYPE>>); \ REGISTER_KERNEL_BUILDER( \ Name("RandomStandardNormal") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .TypeConstraint<TYPE>("dtype"), \ PhiloxRandomOp<CPUDevice, \ random::NormalDistribution<random::PhiloxRandom, TYPE>>); \ REGISTER_KERNEL_BUILDER( \ Name("TruncatedNormal") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .TypeConstraint<TYPE>("dtype"), \ PhiloxRandomOp< \ CPUDevice, \ random::TruncatedNormalDistribution< \ random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>); \ REGISTER_KERNEL_BUILDER( \ Name("RandomGamma").Device(DEVICE_CPU).TypeConstraint<TYPE>("T"), \ RandomGammaOp<TYPE>) #define REGISTER_FULL_INT(IntType) \ template struct functor::FillPhiloxRandom< \ CPUDevice, \ random::UniformFullIntDistribution<random::PhiloxRandom, IntType>> #define REGISTER_INT(IntType) \ REGISTER_FULL_INT(IntType); \ template struct functor::FillPhiloxRandom< \ CPUDevice, random::UniformDistribution<random::PhiloxRandom, IntType>>; \ REGISTER_KERNEL_BUILDER(Name("RandomUniformInt") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .HostMemory("minval") \ .HostMemory("maxval") \ .TypeConstraint<IntType>("Tout"), \ RandomUniformIntOp<CPUDevice, IntType>); TF_CALL_half(REGISTER); TF_CALL_bfloat16(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); TF_CALL_int32(REGISTER_INT); TF_CALL_int64(REGISTER_INT); TF_CALL_uint32(REGISTER_FULL_INT); TF_CALL_uint64(REGISTER_FULL_INT); #undef REGISTER #undef REGISTER_INT #undef REGISTER_FULL_INT #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER( \ Name("RandomUniform") \ .Device(DEVICE_GPU) \ .HostMemory("shape") \ .TypeConstraint<int32>("T") \ .TypeConstraint<TYPE>("dtype"), \ PhiloxRandomOp<GPUDevice, random::UniformDistribution< \ random::PhiloxRandom, TYPE>>); \ REGISTER_KERNEL_BUILDER( \ Name("RandomStandardNormal") \ .Device(DEVICE_GPU) \ .HostMemory("shape") \ .TypeConstraint<int32>("T") \ .TypeConstraint<TYPE>("dtype"), \ PhiloxRandomOp<GPUDevice, \ random::NormalDistribution<random::PhiloxRandom, TYPE>>); \ REGISTER_KERNEL_BUILDER( \ Name("TruncatedNormal") \ .Device(DEVICE_GPU) \ .HostMemory("shape") \ .TypeConstraint<int32>("T") \ .TypeConstraint<TYPE>("dtype"), \ PhiloxRandomOp< \ GPUDevice, \ random::TruncatedNormalDistribution< \ random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>); #define REGISTER_FULL_INT(IntType) \ template struct functor::FillPhiloxRandom< \ GPUDevice, \ random::UniformFullIntDistribution<random::PhiloxRandom, IntType>> #define REGISTER_INT(IntType) \ REGISTER_FULL_INT(IntType); \ template struct functor::FillPhiloxRandom< \ GPUDevice, random::UniformDistribution<random::PhiloxRandom, IntType>>; \ REGISTER_KERNEL_BUILDER(Name("RandomUniformInt") \ .Device(DEVICE_GPU) \ .HostMemory("shape") \ .HostMemory("minval") \ .HostMemory("maxval") \ .TypeConstraint<int32>("T") \ .TypeConstraint<IntType>("Tout"), \ RandomUniformIntOp<GPUDevice, IntType>); TF_CALL_half(REGISTER); TF_CALL_bfloat16(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); TF_CALL_int32(REGISTER_INT); TF_CALL_int64(REGISTER_INT); TF_CALL_uint32(REGISTER_FULL_INT); TF_CALL_uint64(REGISTER_FULL_INT); #undef REGISTER #undef REGISTER_INT #undef REGISTER_FULL_INT #endif }
#include <random> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/math/math_util.h" #include "tensorflow/core/lib/random/philox_random.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { Tensor VecShape(int64_t v) { if (v >= std::numeric_limits<int32>::max()) { Tensor shape(DT_INT64, TensorShape({1})); shape.vec<int64_t>()(0) = v; return shape; } else { Tensor shape(DT_INT32, TensorShape({1})); shape.vec<int32>()(0) = v; return shape; } } Graph* RandomUniform(int64_t n) { Graph* g = new Graph(OpRegistry::Global()); test::graph::RandomUniform(g, test::graph::Constant(g, VecShape(n)), DT_FLOAT); return g; } Graph* RandomNormal(int64_t n) { Graph* g = new Graph(OpRegistry::Global()); test::graph::RandomGaussian(g, test::graph::Constant(g, VecShape(n)), DT_FLOAT); return g; } Graph* TruncatedNormal(int64_t n) { Graph* g = new Graph(OpRegistry::Global()); test::graph::TruncatedNormal(g, test::graph::Constant(g, VecShape(n)), DT_FLOAT); return g; } #define BM_RNG(DEVICE, RNG) \ void BM_##DEVICE##_##RNG(::testing::benchmark::State& state) { \ const int arg = state.range(0); \ \ test::Benchmark(#DEVICE, RNG(arg), false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * arg); \ } \ BENCHMARK(BM_##DEVICE##_##RNG)->Range(1 << 20, 8 << 20); BM_RNG(cpu, RandomUniform); BM_RNG(cpu, RandomNormal); BM_RNG(cpu, TruncatedNormal); BM_RNG(gpu, RandomUniform); BM_RNG(gpu, RandomNormal); BM_RNG(gpu, TruncatedNormal); Tensor VecAlphas(int64_t n) { Tensor alphas(DT_DOUBLE, TensorShape({n})); for (int i = 0; i < n; i++) { alphas.vec<double>()(i) = 0.25 + MathUtil::IPow(1.1, i % 2 == 0 ? i : n - i); } return alphas; } void BM_cpu_RandomGamma(::testing::benchmark::State& state) { const int nsamp = state.range(0); const int nalpha = state.range(1); Graph* g = new Graph(OpRegistry::Global()); test::graph::RandomGamma(g, test::graph::Constant(g, VecShape(nsamp)), test::graph::Constant(g, VecAlphas(nalpha))); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * nsamp * nalpha); } BENCHMARK(BM_cpu_RandomGamma)->RangePair(1 << 14, 4 << 15, 2, 50); void BM_PhiloxRandom(::testing::benchmark::State& state) { int count = 2 << 20; random::PhiloxRandom gen(0x12345); for (auto s : state) { for (int j = 0; j < count; j += 4) { auto samples = gen(); tensorflow::testing::DoNotOptimize(samples); } } state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * count); } BENCHMARK(BM_PhiloxRandom); void BM_StdMTRandom(::testing::benchmark::State& state) { int count = 2 << 20; std::mt19937 gen(0x12345); for (auto s : state) { for (int j = 0; j < count; ++j) { uint_fast32_t sample = gen(); tensorflow::testing::DoNotOptimize(sample); } } state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * count); } BENCHMARK(BM_StdMTRandom); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
52ab6f11-3534-46d9-8fc7-1fd3a10cf0e4
cpp
tensorflow/tensorflow
fused_batch_norm_op
tensorflow/core/kernels/fused_batch_norm_op.cc
tensorflow/core/kernels/fused_batch_norm_op_test.cc
#include <array> #include <atomic> #define EIGEN_USE_THREADS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define EIGEN_USE_GPU #if GOOGLE_CUDA #include "third_party/gpus/cudnn/cudnn.h" #endif #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/gpu_utils.h" #include "tensorflow/core/platform/stream_executor.h" #include "tensorflow/core/util/stream_executor_util.h" #endif #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/kernels/cast_op.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/kernels/fused_batch_norm_op.h" #include "tensorflow/core/kernels/redux_functor.h" #include "tensorflow/core/kernels/transpose_functor.h" #include "tensorflow/core/platform/blocking_counter.h" #include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { using CPUDevice = Eigen::ThreadPoolDevice; using GPUDevice = Eigen::GpuDevice; namespace functor { #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM using se::DeviceMemory; using se::ScratchAllocator; using se::Stream; using tsl::StatusOr; #endif string ToString(FusedBatchNormActivationMode activation_mode) { switch (activation_mode) { case FusedBatchNormActivationMode::kIdentity: return "Identity"; case FusedBatchNormActivationMode::kRelu: return "Relu"; } } Status ParseActivationMode(OpKernelConstruction* context, FusedBatchNormActivationMode* activation_mode) { string activation_mode_str; TF_RETURN_IF_ERROR(context->GetAttr("activation_mode", &activation_mode_str)); if (activation_mode_str == "Identity") { *activation_mode = FusedBatchNormActivationMode::kIdentity; return absl::OkStatus(); } if (activation_mode_str == "Relu") { *activation_mode = FusedBatchNormActivationMode::kRelu; return absl::OkStatus(); } return errors::InvalidArgument("Unsupported activation mode: ", activation_mode_str); } template <typename Device, typename T, typename U, bool is_training> struct FusedBatchNorm; template <typename Device, typename T, typename U> struct FusedBatchNormGrad; template <typename T, typename U> struct FusedBatchNorm<CPUDevice, T, U, true> { void operator()(OpKernelContext* context, const Tensor& x_input, const Tensor& scale_input, const Tensor& offset_input, const Tensor& running_mean_input, const Tensor& running_variance_input, const Tensor* side_input, U epsilon, U exponential_avg_factor, FusedBatchNormActivationMode activation_mode, Tensor* y_output, Tensor* running_mean_output, Tensor* running_var_output, Tensor* saved_batch_mean_output, Tensor* saved_batch_var_output, TensorFormat tensor_format, bool use_reserved_space) { OP_REQUIRES(context, side_input == nullptr, errors::Internal( "The CPU implementation of FusedBatchNorm does not support " "side input.")); OP_REQUIRES(context, activation_mode == FusedBatchNormActivationMode::kIdentity, errors::Internal("The CPU implementation of FusedBatchNorm " "does not support activations.")); if (use_reserved_space) { Tensor* dummy_reserve_space = nullptr; OP_REQUIRES_OK(context, context->allocate_output(5, {}, &dummy_reserve_space)); dummy_reserve_space->flat<U>()(0) = U(); } if (x_input.shape().num_elements() == 0) { functor::SetNanFunctor<CPUDevice, U> f; f(context->eigen_device<CPUDevice>(), running_mean_output->flat<U>()); f(context->eigen_device<CPUDevice>(), running_var_output->flat<U>()); return; } Tensor transformed_x; Tensor transformed_y; if (tensor_format == FORMAT_NCHW) { const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N'); const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H'); const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W'); const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C'); TensorShape transformed_x_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths, &transformed_x_shape)); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_x_shape, &transformed_x)); TensorShape transformed_y_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths, &transformed_y_shape)); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_y_shape, &transformed_y)); std::array<int32, 4> perm = {0, 2, 3, 1}; OP_REQUIRES_OK( context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(), x_input, perm, &transformed_x)); } else { transformed_x = x_input; transformed_y = *y_output; } typename TTypes<T, 4>::Tensor x(transformed_x.tensor<T, 4>()); typename TTypes<U>::ConstVec scale(scale_input.vec<U>()); typename TTypes<U>::ConstVec offset(offset_input.vec<U>()); typename TTypes<U>::ConstVec old_mean(running_mean_input.vec<U>()); typename TTypes<U>::ConstVec old_variance(running_variance_input.vec<U>()); typename TTypes<T, 4>::Tensor y(transformed_y.tensor<T, 4>()); typename TTypes<U>::Vec new_mean(running_mean_output->vec<U>()); typename TTypes<U>::Vec new_variance(running_var_output->vec<U>()); typename TTypes<U>::Vec saved_batch_mean(saved_batch_mean_output->vec<U>()); typename TTypes<U>::Vec saved_batch_var(saved_batch_var_output->vec<U>()); const CPUDevice& d = context->eigen_device<CPUDevice>(); const int depth = x.dimension(3); const int size = x.size(); const int rest_size = size / depth; Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth); Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth; one_by_depth.set(1, depth); Eigen::IndexList<Eigen::type2index<0>> reduce_dims; Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec; bcast_spec.set(0, rest_size); auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>(); const int rest_size_minus_one = (rest_size > 1) ? (rest_size - 1) : 1; U rest_size_inv = static_cast<U>(1.0f / static_cast<U>(rest_size)); U rest_size_adjust = static_cast<U>(rest_size) / static_cast<U>(rest_size_minus_one); Eigen::Tensor<U, 1, Eigen::RowMajor> batch_mean(depth); Eigen::Tensor<U, 1, Eigen::RowMajor> batch_variance(depth); batch_mean.device(d) = (x_rest_by_depth.sum(reduce_dims) * rest_size_inv); auto x_centered = x_rest_by_depth - batch_mean.reshape(one_by_depth).broadcast(bcast_spec); batch_variance.device(d) = x_centered.square().sum(reduce_dims) * rest_size_inv; auto scaling_factor = ((batch_variance + epsilon).rsqrt() * scale) .eval() .reshape(one_by_depth) .broadcast(bcast_spec); auto x_scaled = x_centered * scaling_factor; auto x_shifted = (x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec)) .template cast<T>(); y.reshape(rest_by_depth).device(d) = x_shifted; if (exponential_avg_factor == U(1.0)) { saved_batch_var.device(d) = batch_variance; saved_batch_mean.device(d) = batch_mean; new_variance.device(d) = batch_variance * rest_size_adjust; new_mean.device(d) = batch_mean; } else { U one_minus_factor = U(1) - exponential_avg_factor; saved_batch_var.device(d) = batch_variance; saved_batch_mean.device(d) = batch_mean; new_variance.device(d) = one_minus_factor * old_variance + (exponential_avg_factor * rest_size_adjust) * batch_variance; new_mean.device(d) = one_minus_factor * old_mean + exponential_avg_factor * batch_mean; } if (tensor_format == FORMAT_NCHW) { const std::array<int32, 4> perm = {0, 3, 1, 2}; const Status s = ::tensorflow::DoTranspose( context->eigen_device<CPUDevice>(), transformed_y, perm, y_output); if (!s.ok()) { context->SetStatus(errors::InvalidArgument("Transpose failed: ", s)); } } } }; template <typename T, typename U> struct FusedBatchNorm<CPUDevice, T, U, false> { void operator()(OpKernelContext* context, const Tensor& x_input, const Tensor& scale_input, const Tensor& offset_input, const Tensor& estimated_mean_input, const Tensor& estimated_variance_input, const Tensor* side_input, U epsilon, U exponential_avg_factor, FusedBatchNormActivationMode activation_mode, Tensor* y_output, Tensor* batch_mean_output, Tensor* batch_var_output, Tensor* saved_mean_output, Tensor* saved_var_output, TensorFormat tensor_format, bool use_reserved_space) { OP_REQUIRES(context, side_input == nullptr, errors::Internal( "The CPU implementation of FusedBatchNorm does not support " "side input.")); OP_REQUIRES(context, activation_mode == FusedBatchNormActivationMode::kIdentity, errors::Internal("The CPU implementation of FusedBatchNorm " "does not support activations.")); if (use_reserved_space) { Tensor* dummy_reserve_space = nullptr; OP_REQUIRES_OK(context, context->allocate_output(5, {}, &dummy_reserve_space)); dummy_reserve_space->flat<U>()(0) = U(); } if (x_input.shape().num_elements() == 0) { functor::SetNanFunctor<CPUDevice, U> f; f(context->eigen_device<CPUDevice>(), batch_mean_output->flat<U>()); f(context->eigen_device<CPUDevice>(), batch_var_output->flat<U>()); return; } Tensor transformed_x; Tensor transformed_y; if (tensor_format == FORMAT_NCHW) { const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N'); const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H'); const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W'); const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C'); TensorShape transformed_x_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths, &transformed_x_shape)); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_x_shape, &transformed_x)); TensorShape transformed_y_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths, &transformed_y_shape)); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_y_shape, &transformed_y)); std::array<int32, 4> perm = {0, 2, 3, 1}; OP_REQUIRES_OK( context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(), x_input, perm, &transformed_x)); } else { transformed_x = x_input; transformed_y = *y_output; } typename TTypes<T, 4>::Tensor x(transformed_x.tensor<T, 4>()); typename TTypes<U>::ConstVec scale(scale_input.vec<U>()); typename TTypes<U>::ConstVec offset(offset_input.vec<U>()); typename TTypes<U>::ConstVec estimated_mean(estimated_mean_input.vec<U>()); typename TTypes<U>::ConstVec estimated_variance( estimated_variance_input.vec<U>()); typename TTypes<T, 4>::Tensor y(transformed_y.tensor<T, 4>()); typename TTypes<U>::Vec batch_mean(batch_mean_output->vec<U>()); typename TTypes<U>::Vec batch_variance(batch_var_output->vec<U>()); const CPUDevice& d = context->eigen_device<CPUDevice>(); const int depth = x.dimension(3); OP_REQUIRES( context, depth != 0, errors::Internal("The 4th element in the input shape cannot be 0.")); const int size = x.size(); const int rest_size = size / depth; Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth); Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth; one_by_depth.set(1, depth); Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec; bcast_spec.set(0, rest_size); auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>(); auto x_centered = x_rest_by_depth - estimated_mean.reshape(one_by_depth).broadcast(bcast_spec); auto scaling_factor = ((estimated_variance + epsilon).rsqrt() * scale) .eval() .reshape(one_by_depth) .broadcast(bcast_spec); auto x_scaled = x_centered * scaling_factor; auto x_shifted = (x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec)) .template cast<T>(); y.reshape(rest_by_depth).device(d) = x_shifted; batch_mean.device(d) = estimated_mean; batch_variance.device(d) = estimated_variance; if (tensor_format == FORMAT_NCHW) { const std::array<int32, 4> perm = {0, 3, 1, 2}; const Status s = ::tensorflow::DoTranspose( context->eigen_device<CPUDevice>(), transformed_y, perm, y_output); if (!s.ok()) { context->SetStatus(errors::InvalidArgument("Transpose failed: ", s)); } } } }; template <typename T, typename U> struct FusedBatchNormGrad<CPUDevice, T, U> { void operator()(OpKernelContext* context, const Tensor& y_backprop_input, const Tensor& x_input, const Tensor& scale_input, const Tensor* offset_input, const Tensor& mean_input, const Tensor& variance_input, const Tensor* y_input, U epsilon, FusedBatchNormActivationMode activation_mode, Tensor* x_backprop_output, Tensor* scale_backprop_output, Tensor* offset_backprop_output, Tensor* side_input_backprop_output, bool use_reserved_space, TensorFormat tensor_format) { OP_REQUIRES(context, y_input == nullptr && activation_mode == FusedBatchNormActivationMode::kIdentity, errors::Internal( "The CPU implementation of FusedBatchNormGrad does not " "support activations.")); OP_REQUIRES(context, side_input_backprop_output == nullptr, errors::Internal("The CPU implementation of FusedBatchNormGrad " "does not support side input.")); Tensor transformed_y_backprop_input; Tensor transformed_x_input; Tensor transformed_x_backprop_output; if (tensor_format == FORMAT_NCHW) { const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N'); const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H'); const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W'); const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C'); TensorShape transformed_y_backprop_input_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths, &transformed_y_backprop_input_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_y_backprop_input_shape, &transformed_y_backprop_input)); TensorShape transformed_x_input_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths, &transformed_x_input_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_x_input_shape, &transformed_x_input)); TensorShape transformed_x_backprop_output_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths, &transformed_x_backprop_output_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, transformed_x_backprop_output_shape, &transformed_x_backprop_output)); std::array<int32, 4> perm = {0, 2, 3, 1}; OP_REQUIRES_OK( context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(), y_backprop_input, perm, &transformed_y_backprop_input)); OP_REQUIRES_OK(context, ::tensorflow::DoTranspose( context->eigen_device<CPUDevice>(), x_input, perm, &transformed_x_input)); } else { transformed_y_backprop_input = y_backprop_input; transformed_x_input = x_input; transformed_x_backprop_output = *x_backprop_output; } typename TTypes<T, 4>::Tensor y_backprop( transformed_y_backprop_input.tensor<T, 4>()); typename TTypes<T, 4>::Tensor x(transformed_x_input.tensor<T, 4>()); typename TTypes<U>::ConstVec scale(scale_input.vec<U>()); typename TTypes<U>::ConstVec mean(mean_input.vec<U>()); typename TTypes<U>::ConstVec variance(variance_input.vec<U>()); typename TTypes<T, 4>::Tensor x_backprop( transformed_x_backprop_output.tensor<T, 4>()); typename TTypes<U>::Vec offset_backprop(offset_backprop_output->vec<U>()); const CPUDevice& d = context->eigen_device<CPUDevice>(); const int depth = x.dimension(3); const int size = x.size(); const int rest_size = size / depth; Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth); Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth; one_by_depth.set(1, depth); Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec; bcast_spec.set(0, rest_size); auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>(); U rest_size_inv = static_cast<U>(1.0f / static_cast<U>(rest_size)); using ScalarSum = Eigen::internal::scalar_sum_op<U>; const functor::ReduceOuterDimensions<T, U, U, ScalarSum> redux_sum_t; const functor::ReduceOuterDimensions<U, U, U, ScalarSum> redux_sum_u; auto scratch_dtype = DataTypeToEnum<U>::value; Tensor scratch_one_by_depth; OP_REQUIRES_OK(context, context->allocate_temp(scratch_dtype, {depth}, &scratch_one_by_depth)); Tensor scratch_rest_by_depth; if (std::is_same<T, U>::value) { OP_REQUIRES(context, scratch_rest_by_depth.CopyFrom(transformed_x_backprop_output, {rest_size, depth}), errors::Internal("Failed to copy a tensor")); } else { OP_REQUIRES_OK(context, context->allocate_temp(scratch_dtype, {rest_size, depth}, &scratch_rest_by_depth)); } typename TTypes<U, 2>::Tensor scratch_tensor( scratch_rest_by_depth.tensor<U, 2>()); typename TTypes<U>::Vec scratch_vector(scratch_one_by_depth.vec<U>()); auto x_mean_rest_by_depth = mean.reshape(one_by_depth).broadcast(bcast_spec); auto x_centered = (x_rest_by_depth - x_mean_rest_by_depth); auto coef0_one_by_depth = (variance.reshape(one_by_depth) + epsilon).rsqrt(); auto coef0_rest_by_depth = coef0_one_by_depth.broadcast(bcast_spec); auto x_scaled = x_centered * coef0_rest_by_depth; auto y_backprop_rest_by_depth = y_backprop.reshape(rest_by_depth).template cast<U>(); scratch_tensor.device(d) = y_backprop_rest_by_depth * x_scaled; redux_sum_u(d, rest_by_depth, scratch_rest_by_depth, scale_backprop_output); redux_sum_t(d, rest_by_depth, transformed_y_backprop_input, offset_backprop_output); auto y_backprop_sum = offset_backprop; auto y_backprop_sum_one_by_depth = y_backprop_sum.reshape(one_by_depth); auto y_backprop_mean_one_by_depth = y_backprop_sum_one_by_depth * rest_size_inv; auto y_backprop_mean_rest_by_depth = y_backprop_mean_one_by_depth.broadcast(bcast_spec); auto y_backprop_centered = y_backprop_rest_by_depth - y_backprop_mean_rest_by_depth; scratch_tensor.device(d) = y_backprop_rest_by_depth * x_centered; redux_sum_u(d, rest_by_depth, scratch_rest_by_depth, &scratch_one_by_depth); auto y_backprop_centered_mean = scratch_vector.reshape(one_by_depth) / static_cast<U>(rest_size); auto coef1 = (scale.reshape(one_by_depth) * coef0_one_by_depth) .broadcast(bcast_spec); auto coef2 = (coef0_one_by_depth.square() * y_backprop_centered_mean) .broadcast(bcast_spec); x_backprop.reshape(rest_by_depth).device(d) = (coef1 * (y_backprop_centered - x_centered * coef2)).template cast<T>(); if (tensor_format == FORMAT_NCHW) { std::array<int32, 4> perm = {0, 3, 1, 2}; OP_REQUIRES_OK( context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(), transformed_x_backprop_output, perm, x_backprop_output)); } } }; template <typename T, typename U> struct FusedBatchNormFreezeGrad<CPUDevice, T, U> { void operator()(OpKernelContext* context, const Tensor& y_backprop_input, const Tensor& x_input, const Tensor& scale_input, const Tensor& pop_mean_input, const Tensor& pop_variance_input, U epsilon, Tensor* x_backprop_output, Tensor* scale_backprop_output, Tensor* offset_backprop_output) { typename TTypes<T, 4>::ConstTensor y_backprop( y_backprop_input.tensor<T, 4>()); typename TTypes<T, 4>::ConstTensor input(x_input.tensor<T, 4>()); typename TTypes<U>::ConstVec scale(scale_input.vec<U>()); typename TTypes<U>::ConstVec pop_mean(pop_mean_input.vec<U>()); typename TTypes<U>::ConstVec pop_var(pop_variance_input.vec<U>()); typename TTypes<T, 4>::Tensor x_backprop(x_backprop_output->tensor<T, 4>()); typename TTypes<U>::Vec scale_backprop(scale_backprop_output->vec<U>()); const int depth = pop_mean.dimension(0); const int rest_size = input.size() / depth; const CPUDevice& d = context->eigen_device<CPUDevice>(); Tensor scratch1_vec, scratch2_vec; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value, {depth}, &scratch1_vec)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value, {depth}, &scratch2_vec)); Tensor scratch3_tensor; if (std::is_same<T, U>::value) { OP_REQUIRES( context, scratch3_tensor.CopyFrom(*x_backprop_output, {rest_size, depth}), errors::Internal("Failed to copy a tensor")); } else { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value, {rest_size, depth}, &scratch3_tensor)); } typename TTypes<U>::Vec scratch1(scratch1_vec.vec<U>()); typename TTypes<U>::Vec scratch2(scratch2_vec.vec<U>()); typename TTypes<U, 2>::Tensor scratch3(scratch3_tensor.tensor<U, 2>()); Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth); Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth; one_by_depth.set(1, depth); Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> rest_by_one; rest_by_one.set(0, rest_size); using ScalarSum = Eigen::internal::scalar_sum_op<U>; const functor::ReduceOuterDimensions<T, U, U, ScalarSum> redux_sum_t; const functor::ReduceOuterDimensions<U, U, U, ScalarSum> redux_sum_u; auto y_backprop_rest_by_depth = y_backprop.reshape(rest_by_depth).template cast<U>(); auto input_rest_by_depth = input.reshape(rest_by_depth).template cast<U>(); redux_sum_t(d, rest_by_depth, y_backprop_input, offset_backprop_output); scratch1 = (pop_var + pop_var.constant(epsilon)).rsqrt(); scratch3.device(d) = y_backprop_rest_by_depth * (input_rest_by_depth - pop_mean.reshape(one_by_depth).broadcast(rest_by_one)); redux_sum_u(d, rest_by_depth, scratch3_tensor, &scratch2_vec); x_backprop.reshape(rest_by_depth).device(d) = (y_backprop_rest_by_depth * ((scratch1.reshape(one_by_depth) * scale.reshape(one_by_depth)) .broadcast(rest_by_one))) .template cast<T>(); scale_backprop = scratch2 * scratch1; } }; #if !GOOGLE_CUDA namespace { bool BatchnormSpatialPersistentEnabled() { return false; } } #endif #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace { se::dnn::ActivationMode AsDnnActivationMode( const FusedBatchNormActivationMode activation_mode) { switch (activation_mode) { case FusedBatchNormActivationMode::kIdentity: return se::dnn::ActivationMode::kNone; case FusedBatchNormActivationMode::kRelu: return se::dnn::ActivationMode::kRelu; } } #if GOOGLE_CUDA bool BatchnormSpatialPersistentEnabled() { #if CUDNN_VERSION >= 7402 static bool is_enabled = [] { bool is_enabled = false; TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar( "TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT", false, &is_enabled)); return is_enabled; }(); return is_enabled; #else return false; #endif } #endif } template <typename U, typename T> DeviceMemory<U> CastDeviceMemory(Tensor* tensor) { return DeviceMemory<U>::MakeFromByteSize( tensor->template flat<T>().data(), tensor->template flat<T>().size() * sizeof(T)); } template <typename T> class CudnnBatchNormAllocatorInTemp : public ScratchAllocator { public: ~CudnnBatchNormAllocatorInTemp() override = default; explicit CudnnBatchNormAllocatorInTemp(OpKernelContext* context) : context_(context) {} int64_t GetMemoryLimitInBytes() override { return std::numeric_limits<int64_t>::max(); } StatusOr<DeviceMemory<uint8>> AllocateBytes(int64_t byte_size) override { Tensor temporary_memory; const DataType tf_data_type = DataTypeToEnum<T>::v(); int64_t allocate_count = Eigen::divup(byte_size, static_cast<int64_t>(sizeof(T))); Status allocation_status(context_->allocate_temp( tf_data_type, TensorShape({allocate_count}), &temporary_memory)); if (!allocation_status.ok()) { return allocation_status; } allocated_tensors_.push_back(temporary_memory); total_byte_size_ += byte_size; return DeviceMemory<uint8>::MakeFromByteSize( temporary_memory.template flat<T>().data(), temporary_memory.template flat<T>().size() * sizeof(T)); } int64_t TotalByteSize() const { return total_byte_size_; } Tensor get_allocated_tensor(int index) const { return allocated_tensors_[index]; } private: int64_t total_byte_size_ = 0; OpKernelContext* context_; std::vector<Tensor> allocated_tensors_; }; template <typename T> class CudnnBatchNormAllocatorInOutput : public ScratchAllocator { public: ~CudnnBatchNormAllocatorInOutput() override { if (!output_allocated) { Tensor* dummy_reserve_space = nullptr; OP_REQUIRES_OK(context_, context_->allocate_output(output_index_, {}, &dummy_reserve_space)); } } CudnnBatchNormAllocatorInOutput(OpKernelContext* context, int output_index) : context_(context), output_index_(output_index) {} int64_t GetMemoryLimitInBytes() override { return std::numeric_limits<int64_t>::max(); } StatusOr<DeviceMemory<uint8>> AllocateBytes(int64_t byte_size) override { output_allocated = true; DCHECK(total_byte_size_ == 0) << "Reserve space allocator can only be called once"; int64_t allocate_count = Eigen::divup(byte_size, static_cast<int64_t>(sizeof(T))); Tensor* temporary_memory = nullptr; Status allocation_status(context_->allocate_output( output_index_, TensorShape({allocate_count}), &temporary_memory)); if (!allocation_status.ok()) { return allocation_status; } total_byte_size_ += byte_size; auto memory_uint8 = DeviceMemory<uint8>::MakeFromByteSize( temporary_memory->template flat<T>().data(), temporary_memory->template flat<T>().size() * sizeof(T)); return StatusOr<DeviceMemory<uint8>>(memory_uint8); } int64_t TotalByteSize() { return total_byte_size_; } private: int64_t total_byte_size_ = 0; OpKernelContext* context_; int output_index_; bool output_allocated = false; }; template <typename T, typename U, bool is_training> struct FusedBatchNormImplGPU { void operator()(OpKernelContext* context, const Tensor& x, const Tensor& scale, const Tensor& offset, const Tensor& estimated_mean, const Tensor& estimated_variance, const Tensor* side_input, U epsilon, U exponential_avg_factor, FusedBatchNormActivationMode activation_mode, Tensor* y, Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean, Tensor* saved_inv_var, TensorFormat tensor_format, bool use_reserved_space) { auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available")); const int64_t batch_size = GetTensorDim(x, tensor_format, 'N'); const int64_t channels = GetTensorDim(x, tensor_format, 'C'); const int64_t height = GetTensorDim(x, tensor_format, 'H'); const int64_t width = GetTensorDim(x, tensor_format, 'W'); #if GOOGLE_CUDA const bool fast_nhwc_batch_norm = !is_training || (BatchnormSpatialPersistentEnabled() && (DataTypeToEnum<T>::value == DT_HALF || DataTypeToEnum<T>::value == DT_BFLOAT16) && use_reserved_space); #else const bool fast_nhwc_batch_norm = false; #endif TensorFormat compute_format = fast_nhwc_batch_norm && tensor_format == FORMAT_NHWC ? FORMAT_NHWC : FORMAT_NCHW; VLOG(2) << "FusedBatchNorm:" << " batch_size: " << batch_size << " channels: " << channels << " height: " << height << " width:" << width << " x shape: " << x.shape().DebugString() << " scale shape: " << scale.shape().DebugString() << " offset shape: " << offset.shape().DebugString() << " activation mode: " << ToString(activation_mode) << " tensor format: " << ToString(tensor_format) << " compute format: " << ToString(compute_format); auto maybe_make_dummy_output = [context, use_reserved_space]() -> Status { if (use_reserved_space) { Tensor* dummy_reserve_space = nullptr; return context->allocate_output(5, {}, &dummy_reserve_space); } return OkStatus(); }; if (x.shape().num_elements() == 0) { OP_REQUIRES_OK(context, maybe_make_dummy_output()); functor::SetNanFunctor<GPUDevice, U> f; f(context->eigen_device<GPUDevice>(), batch_mean->flat<U>()); f(context->eigen_device<GPUDevice>(), batch_var->flat<U>()); return; } const bool has_side_input = side_input != nullptr; const bool has_activation = activation_mode != FusedBatchNormActivationMode::kIdentity; if (!is_training && (has_side_input || has_activation)) { OP_REQUIRES_OK(context, maybe_make_dummy_output()); FusedBatchNormInferenceFunctor<GPUDevice, T, U> inference_functor; if (has_side_input) { inference_functor(context, tensor_format, x.tensor<T, 4>(), scale.vec<U>(), offset.vec<U>(), estimated_mean.vec<U>(), estimated_variance.vec<U>(), side_input->tensor<T, 4>(), epsilon, activation_mode, y->tensor<T, 4>()); } else { typename TTypes<T, 4>::ConstTensor empty_tensor(nullptr, 0, 0, 0, 0); inference_functor(context, tensor_format, x.tensor<T, 4>(), scale.vec<U>(), offset.vec<U>(), estimated_mean.vec<U>(), estimated_variance.vec<U>(), empty_tensor, epsilon, activation_mode, y->tensor<T, 4>()); } return; } Tensor x_maybe_transformed = x; Tensor x_transformed; Tensor y_transformed; se::DeviceMemory<T> y_ptr; if (tensor_format == compute_format) { y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(*y); } else if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) { TensorShape x_transformed_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( compute_format, batch_size, height, width, channels, &x_transformed_shape)); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, x_transformed_shape, &x_transformed)); functor::NHWCToNCHW<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(x_maybe_transformed).tensor<T, 4>(), x_transformed.tensor<T, 4>()); x_maybe_transformed = x_transformed; TensorShape y_transformed_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( compute_format, batch_size, height, width, channels, &y_transformed_shape)); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, y_transformed_shape, &y_transformed)); y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(y_transformed); } else { context->SetStatus(errors::Internal( "Unsupported tensor format: ", ToString(tensor_format), " and compute format: ", ToString(compute_format))); return; } const se::dnn::DataLayout data_layout = compute_format == FORMAT_NHWC ? se::dnn::DataLayout::kBatchYXDepth : se::dnn::DataLayout::kBatchDepthYX; se::dnn::BatchDescriptor x_desc; x_desc.set_count(batch_size) .set_feature_map_count(channels) .set_height(height) .set_width(width) .set_layout(data_layout); se::dnn::BatchDescriptor scale_offset_desc; scale_offset_desc.set_count(1) .set_feature_map_count(channels) .set_height(1) .set_width(1) .set_layout(se::dnn::DataLayout::kBatchDepthYX); auto x_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_maybe_transformed); auto scale_ptr = StreamExecutorUtil::AsDeviceMemory<U>(scale); auto offset_ptr = StreamExecutorUtil::AsDeviceMemory<U>(offset); auto estimated_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(estimated_mean); auto estimated_variance_ptr = StreamExecutorUtil::AsDeviceMemory<U>(estimated_variance); auto side_input_ptr = side_input != nullptr ? StreamExecutorUtil::AsDeviceMemory<T>(*side_input) : se::DeviceMemory<T>(); auto batch_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_mean); auto batch_var_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_var); auto saved_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*saved_mean); auto saved_inv_var_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*saved_inv_var); std::unique_ptr<functor::CudnnBatchNormAllocatorInOutput<U>> reserve_space_allocator; std::unique_ptr<functor::CudnnBatchNormAllocatorInTemp<uint8>> workspace_allocator; if (use_reserved_space) { reserve_space_allocator.reset( new functor::CudnnBatchNormAllocatorInOutput<U>(context, 5)); workspace_allocator.reset( new functor::CudnnBatchNormAllocatorInTemp<uint8>(context)); } if (!batch_mean->SharesBufferWith(estimated_mean) && exponential_avg_factor != 1.0f) { OP_REQUIRES_OK( context, stream->MemcpyD2D(&batch_mean_ptr, estimated_mean_ptr, estimated_mean.NumElements() * sizeof(U))); } if (!batch_var->SharesBufferWith(estimated_variance) && exponential_avg_factor != 1.0f) { OP_REQUIRES_OK( context, stream->MemcpyD2D(&batch_var_ptr, estimated_variance_ptr, estimated_variance.NumElements() * sizeof(U))); } auto dnn = stream->parent()->AsDnn(); if (dnn == nullptr) { context->SetStatus(absl::InternalError("No DNN support for stream")); return; } bool cudnn_launch_status = dnn->DoBatchNormalizationForward( stream, x_ptr, scale_ptr, offset_ptr, estimated_mean_ptr, estimated_variance_ptr, side_input_ptr, x_desc, scale_offset_desc, static_cast<double>(epsilon), static_cast<double>(exponential_avg_factor), AsDnnActivationMode(activation_mode), &y_ptr, &batch_mean_ptr, &batch_var_ptr, &saved_mean_ptr, &saved_inv_var_ptr, is_training, reserve_space_allocator.get(), workspace_allocator.get()); if (!cudnn_launch_status) { context->SetStatus( errors::Internal("cuDNN launch failure : input shape (", x.shape().DebugString(), ")")); return; } if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) { functor::NCHWToNHWC<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(y_transformed).tensor<T, 4>(), y->tensor<T, 4>()); } } }; template <typename T, typename U, bool is_training> struct FusedBatchNorm<GPUDevice, T, U, is_training> { void operator()(OpKernelContext* context, const Tensor& x, const Tensor& scale, const Tensor& offset, const Tensor& estimated_mean, const Tensor& estimated_variance, const Tensor* side_input, U epsilon, U exponential_avg_factor, FusedBatchNormActivationMode activation_mode, Tensor* y, Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean, Tensor* saved_inv_var, TensorFormat tensor_format, bool use_reserved_space) { FusedBatchNormImplGPU<T, U, is_training>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon, exponential_avg_factor, activation_mode, y, batch_mean, batch_var, saved_mean, saved_inv_var, tensor_format, use_reserved_space); } }; template <bool is_training> struct FusedBatchNorm<GPUDevice, Eigen::bfloat16, float, is_training> { void operator()(OpKernelContext* context, const Tensor& x, const Tensor& scale, const Tensor& offset, const Tensor& estimated_mean, const Tensor& estimated_variance, const Tensor* side_input, float epsilon, float exponential_avg_factor, FusedBatchNormActivationMode activation_mode, Tensor* y, Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean, Tensor* saved_inv_var, TensorFormat tensor_format, bool use_reserved_space) { auto* stream = context->op_device_context()->stream(); const bool cast_to_float = !IsBF16SupportedInOps(stream); if (cast_to_float) { Tensor casted_x = x; Tensor casted_side_input; Tensor casted_y = *y; const GPUDevice& device = context->eigen_device<GPUDevice>(); functor::CastFunctor<GPUDevice, float, Eigen::bfloat16> cast; OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, x.shape(), &casted_x)); cast(device, casted_x.template flat<float>(), x.template flat<Eigen::bfloat16>()); if (side_input != nullptr) { OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, side_input->shape(), &casted_side_input)); cast(device, casted_side_input.template flat<float>(), side_input->template flat<Eigen::bfloat16>()); } OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, y->shape(), &casted_y)); FusedBatchNormImplGPU<float, float, is_training>()( context, casted_x, scale, offset, estimated_mean, estimated_variance, (side_input != nullptr) ? &casted_side_input : nullptr, epsilon, exponential_avg_factor, activation_mode, &casted_y, batch_mean, batch_var, saved_mean, saved_inv_var, tensor_format, use_reserved_space); functor::CastFunctor<GPUDevice, Eigen::bfloat16, float> cast_back; const Tensor& casted_y_const = casted_y; cast_back(device, y->template flat<Eigen::bfloat16>(), casted_y_const.template flat<float>()); return; } FusedBatchNormImplGPU<Eigen::bfloat16, float, is_training>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon, exponential_avg_factor, activation_mode, y, batch_mean, batch_var, saved_mean, saved_inv_var, tensor_format, use_reserved_space); } }; template <typename T, typename U> struct FusedBatchNormGradImplGPU { void operator()(OpKernelContext* context, const Tensor& y_backprop, const Tensor& x, const Tensor& scale, const Tensor* offset, const Tensor& mean, const Tensor& inv_variance, const Tensor* y, U epsilon, FusedBatchNormActivationMode activation_mode, Tensor* x_backprop, Tensor* scale_backprop, Tensor* offset_backprop, Tensor* side_input_backprop, bool use_reserved_space, TensorFormat tensor_format) { auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available")); const int64_t batch_size = GetTensorDim(x, tensor_format, 'N'); const int64_t channels = GetTensorDim(x, tensor_format, 'C'); const int64_t height = GetTensorDim(x, tensor_format, 'H'); const int64_t width = GetTensorDim(x, tensor_format, 'W'); #if GOOGLE_CUDA const bool fast_nhwc_batch_norm = BatchnormSpatialPersistentEnabled() && (DataTypeToEnum<T>::value == DT_HALF || DataTypeToEnum<T>::value == DT_BFLOAT16) && use_reserved_space; #else const bool fast_nhwc_batch_norm = false; #endif TensorFormat compute_format = fast_nhwc_batch_norm && tensor_format == FORMAT_NHWC ? FORMAT_NHWC : FORMAT_NCHW; VLOG(2) << "FusedBatchNormGrad:" << " batch_size: " << batch_size << " channels: " << channels << " height: " << height << " width: " << width << " y_backprop shape: " << y_backprop.shape().DebugString() << " x shape: " << x.shape().DebugString() << " scale shape: " << scale.shape().DebugString() << " activation mode: " << ToString(activation_mode) << " tensor format: " << ToString(tensor_format) << " compute format: " << ToString(compute_format); Tensor y_backprop_maybe_transformed = y_backprop; Tensor x_maybe_transformed = x; Tensor y_backprop_transformed; Tensor x_transformed; Tensor x_backprop_transformed; se::DeviceMemory<T> x_backprop_ptr; if (tensor_format == compute_format) { x_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<T>(*x_backprop); } else if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) { TensorShape y_backprop_transformed_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NCHW, batch_size, height, width, channels, &y_backprop_transformed_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, y_backprop_transformed_shape, &y_backprop_transformed)); functor::NHWCToNCHW<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(y_backprop_maybe_transformed) .tensor<T, 4>(), y_backprop_transformed.tensor<T, 4>()); y_backprop_maybe_transformed = y_backprop_transformed; TensorShape x_transformed_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(FORMAT_NCHW, batch_size, height, width, channels, &x_transformed_shape)); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, x_transformed_shape, &x_transformed)); functor::NHWCToNCHW<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(x_maybe_transformed).tensor<T, 4>(), x_transformed.tensor<T, 4>()); x_maybe_transformed = x_transformed; TensorShape x_backprop_transformed_shape; OP_REQUIRES_OK(context, ShapeFromFormatWithStatus( FORMAT_NCHW, batch_size, height, width, channels, &x_backprop_transformed_shape)); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, x_backprop_transformed_shape, &x_backprop_transformed)); x_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_backprop_transformed); } else { context->SetStatus(errors::Internal( "Unsupported tensor format: ", ToString(tensor_format), " and compute format: ", ToString(compute_format))); return; } const se::dnn::DataLayout data_layout = compute_format == FORMAT_NHWC ? se::dnn::DataLayout::kBatchYXDepth : se::dnn::DataLayout::kBatchDepthYX; se::dnn::BatchDescriptor x_desc; x_desc.set_count(batch_size) .set_feature_map_count(channels) .set_height(height) .set_width(width) .set_layout(data_layout); se::dnn::BatchDescriptor scale_offset_desc; scale_offset_desc.set_count(1) .set_feature_map_count(channels) .set_height(1) .set_width(1) .set_layout(se::dnn::DataLayout::kBatchDepthYX); auto y_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<T>(y_backprop_maybe_transformed); auto x_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_maybe_transformed); auto scale_ptr = StreamExecutorUtil::AsDeviceMemory<U>(scale); auto offset_ptr = offset != nullptr ? StreamExecutorUtil::AsDeviceMemory<U>(*offset) : se::DeviceMemory<U>(); auto mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(mean); auto inv_variance_ptr = StreamExecutorUtil::AsDeviceMemory<U>(inv_variance); auto y_ptr = y != nullptr ? StreamExecutorUtil::AsDeviceMemory<T>(*y) : se::DeviceMemory<T>(); auto scale_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*scale_backprop); auto offset_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*offset_backprop); auto side_input_backprop_ptr = side_input_backprop != nullptr ? StreamExecutorUtil::AsDeviceMemory<T>(*side_input_backprop) : se::DeviceMemory<T>(); std::unique_ptr<functor::CudnnBatchNormAllocatorInTemp<uint8>> workspace_allocator; DeviceMemory<uint8>* reserve_space_data_ptr = nullptr; DeviceMemory<uint8> reserve_space_data; #if CUDNN_VERSION >= 7402 if (use_reserved_space) { const Tensor& reserve_space = context->input(5); workspace_allocator.reset( new functor::CudnnBatchNormAllocatorInTemp<uint8>(context)); if (reserve_space.dims() != 0) { reserve_space_data = functor::CastDeviceMemory<uint8, U>( const_cast<Tensor*>(&reserve_space)); reserve_space_data_ptr = &reserve_space_data; } } #endif auto dnn = stream->parent()->AsDnn(); if (dnn == nullptr) { context->SetStatus(absl::InternalError("No DNN support for stream")); return; } bool cudnn_launch_status = dnn->DoBatchNormalizationBackward( stream, y_backprop_ptr, x_ptr, scale_ptr, offset_ptr, mean_ptr, inv_variance_ptr, y_ptr, x_desc, scale_offset_desc, static_cast<double>(epsilon), AsDnnActivationMode(activation_mode), &x_backprop_ptr, &scale_backprop_ptr, &offset_backprop_ptr, &side_input_backprop_ptr, reserve_space_data_ptr, workspace_allocator.get()); if (!cudnn_launch_status) { context->SetStatus( errors::Internal("cuDNN launch failure : input shape (", x.shape().DebugString(), ")")); } if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) { functor::NCHWToNHWC<GPUDevice, T, 4>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(x_backprop_transformed).tensor<T, 4>(), x_backprop->tensor<T, 4>()); } } }; template <typename T, typename U> struct FusedBatchNormGrad<GPUDevice, T, U> { void operator()(OpKernelContext* context, const Tensor& y_backprop, const Tensor& x, const Tensor& scale, const Tensor* offset, const Tensor& mean, const Tensor& inv_variance, const Tensor* y, U epsilon, FusedBatchNormActivationMode activation_mode, Tensor* x_backprop, Tensor* scale_backprop, Tensor* offset_backprop, Tensor* side_input_backprop, bool use_reserved_space, TensorFormat tensor_format) { FusedBatchNormGradImplGPU<T, U>()( context, y_backprop, x, scale, offset, mean, inv_variance, y, epsilon, activation_mode, x_backprop, scale_backprop, offset_backprop, side_input_backprop, use_reserved_space, tensor_format); } }; template <> struct FusedBatchNormGrad<GPUDevice, Eigen::bfloat16, float> { void operator()(OpKernelContext* context, const Tensor& y_backprop, const Tensor& x, const Tensor& scale, const Tensor* offset, const Tensor& mean, const Tensor& inv_variance, const Tensor* y, float epsilon, FusedBatchNormActivationMode activation_mode, Tensor* x_backprop, Tensor* scale_backprop, Tensor* offset_backprop, Tensor* side_input_backprop, bool use_reserved_space, TensorFormat tensor_format) { auto* stream = context->op_device_context()->stream(); const bool cast_to_float = !IsBF16SupportedInOps(stream); if (cast_to_float) { Tensor casted_y_backprop = y_backprop; Tensor casted_x = x; Tensor casted_y; Tensor casted_x_backprop = *x_backprop; Tensor casted_side_input_backprop; const GPUDevice& device = context->eigen_device<GPUDevice>(); functor::CastFunctor<GPUDevice, float, Eigen::bfloat16> cast; OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, y_backprop.shape(), &casted_y_backprop)); cast(device, casted_y_backprop.template flat<float>(), y_backprop.template flat<Eigen::bfloat16>()); OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, x.shape(), &casted_x)); cast(device, casted_x.template flat<float>(), x.template flat<Eigen::bfloat16>()); if (y != nullptr) { OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, y->shape(), &casted_y)); cast(device, casted_y.template flat<float>(), y->template flat<Eigen::bfloat16>()); } OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, x_backprop->shape(), &casted_x_backprop)); if (side_input_backprop != nullptr) { OP_REQUIRES_OK(context, context->allocate_temp( DT_FLOAT, side_input_backprop->shape(), &casted_side_input_backprop)); } FusedBatchNormGradImplGPU<float, float>()( context, casted_y_backprop, casted_x, scale, offset, mean, inv_variance, (y != nullptr) ? &casted_y : nullptr, epsilon, activation_mode, &casted_x_backprop, scale_backprop, offset_backprop, (side_input_backprop != nullptr) ? &casted_side_input_backprop : nullptr, use_reserved_space, tensor_format); functor::CastFunctor<GPUDevice, Eigen::bfloat16, float> cast_back; const Tensor& casted_x_backprop_const = casted_x_backprop; cast_back(device, x_backprop->template flat<Eigen::bfloat16>(), casted_x_backprop_const.template flat<float>()); if (side_input_backprop != nullptr) { const Tensor& casted_side_input_backprop_const = casted_side_input_backprop; cast_back(device, side_input_backprop->template flat<Eigen::bfloat16>(), casted_side_input_backprop_const.template flat<float>()); } return; } FusedBatchNormGradImplGPU<Eigen::bfloat16, float>()( context, y_backprop, x, scale, offset, mean, inv_variance, y, epsilon, activation_mode, x_backprop, scale_backprop, offset_backprop, side_input_backprop, use_reserved_space, tensor_format); } }; #define DECLARE_GPU_SPEC(T, U) \ template <> \ void FusedBatchNormFreezeGrad<GPUDevice, T, U>::operator()( \ OpKernelContext* context, const Tensor& y_backprop_input, \ const Tensor& x_input, const Tensor& scale_input, \ const Tensor& mean_input, const Tensor& variance_input, U epsilon, \ Tensor* x_backprop_output, Tensor* scale_backprop_output, \ Tensor* offset_backprop_output); \ extern template struct FusedBatchNormFreezeGrad<GPUDevice, T, U>; \ template <> \ void FusedBatchNormInferenceFunctor<GPUDevice, T, U>::operator()( \ OpKernelContext* context, TensorFormat tensor_format, \ typename TTypes<T, 4>::ConstTensor in, \ typename TTypes<U>::ConstVec scale, typename TTypes<U>::ConstVec offset, \ typename TTypes<U>::ConstVec estimated_mean, \ typename TTypes<U>::ConstVec estimated_variance, \ typename TTypes<T, 4>::ConstTensor side_input, U epsilon, \ FusedBatchNormActivationMode activation_mode, \ typename TTypes<T, 4>::Tensor out); \ extern template struct FusedBatchNormInferenceFunctor<GPUDevice, T, U>; DECLARE_GPU_SPEC(float, float); DECLARE_GPU_SPEC(Eigen::half, float); DECLARE_GPU_SPEC(Eigen::bfloat16, float); #undef DECLARE_GPU_SPEC #endif } template <typename Device, typename T, typename U> class FusedBatchNormOpBase : public OpKernel { using FbnActivationMode = functor::FusedBatchNormActivationMode; protected: explicit FusedBatchNormOpBase(OpKernelConstruction* context, bool is_batch_norm_ex = false) : OpKernel(context) { float epsilon; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); epsilon_ = U(epsilon); float exponential_avg_factor; OP_REQUIRES_OK(context, context->GetAttr("exponential_avg_factor", &exponential_avg_factor)); exponential_avg_factor_ = U(exponential_avg_factor); string tensor_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format)); OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_)); if (!is_batch_norm_ex) { has_side_input_ = false; activation_mode_ = FbnActivationMode::kIdentity; } else { OP_REQUIRES_OK(context, ParseActivationMode(context, &activation_mode_)); int num_side_inputs; OP_REQUIRES_OK(context, context->GetAttr("num_side_inputs", &num_side_inputs)); OP_REQUIRES(context, num_side_inputs >= 0 && num_side_inputs <= 1, errors::InvalidArgument( "FusedBatchNorm accepts at most one side input.")); has_side_input_ = (num_side_inputs == 1); if (has_side_input_ && is_training_) { OP_REQUIRES( context, activation_mode_ != FbnActivationMode::kIdentity, errors::InvalidArgument("Identity activation is not supported with " "non-empty side input")); } } if (activation_mode_ != FbnActivationMode::kIdentity && is_training_) { OP_REQUIRES(context, DataTypeToEnum<T>::value == DT_HALF, errors::InvalidArgument("FusedBatchNorm with activation " "supports only DT_HALF data type.")); OP_REQUIRES(context, tensor_format_ == FORMAT_NHWC, errors::InvalidArgument("FusedBatchNorm with activation " "supports only NHWC tensor format.")); OP_REQUIRES(context, functor::BatchnormSpatialPersistentEnabled(), errors::InvalidArgument( "FusedBatchNorm with activation must run with cuDNN " "spatial persistence mode enabled.")); } } virtual void ComputeWithReservedSpace(OpKernelContext* context, bool use_reserved_space) { Tensor x = context->input(0); const Tensor& scale = context->input(1); const Tensor& offset = context->input(2); const Tensor& estimated_mean = context->input(3); const Tensor& estimated_variance = context->input(4); const Tensor* side_input = has_side_input_ ? &context->input(5) : nullptr; OP_REQUIRES(context, x.dims() == 4 || x.dims() == 5, errors::InvalidArgument("input must be 4 or 5-dimensional", x.shape().DebugString())); OP_REQUIRES(context, scale.dims() == 1, errors::InvalidArgument("scale must be 1-dimensional", scale.shape().DebugString())); OP_REQUIRES(context, offset.dims() == 1, errors::InvalidArgument("offset must be 1-dimensional", offset.shape().DebugString())); OP_REQUIRES(context, estimated_mean.dims() == 1, errors::InvalidArgument("estimated_mean must be 1-dimensional", estimated_mean.shape().DebugString())); OP_REQUIRES( context, estimated_variance.dims() == 1, errors::InvalidArgument("estimated_variance must be 1-dimensional", estimated_variance.shape().DebugString())); bool use_reshape = (x.dims() == 5); auto x_shape = x.shape(); TensorShape dest_shape; if (use_reshape) { const int64_t in_batch = GetTensorDim(x, tensor_format_, 'N'); int64_t in_planes = GetTensorDim(x, tensor_format_, '0'); int64_t in_rows = GetTensorDim(x, tensor_format_, '1'); int64_t in_cols = GetTensorDim(x, tensor_format_, '2'); const int64_t in_depth = GetTensorDim(x, tensor_format_, 'C'); OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(tensor_format_, in_batch, {{in_planes, in_rows * in_cols}}, in_depth, &dest_shape)); OP_REQUIRES(context, x.CopyFrom(x, dest_shape), errors::InvalidArgument("Error during tensor copy.")); } const auto num_channels = GetTensorDim(x, tensor_format_, 'C'); OP_REQUIRES( context, scale.NumElements() == num_channels, errors::InvalidArgument("scale must have the same number of elements " "as the channels of x, got ", scale.NumElements(), " and ", num_channels)); OP_REQUIRES( context, offset.NumElements() == num_channels, errors::InvalidArgument("offset must have the same number of elements " "as the channels of x, got ", offset.NumElements(), " and ", num_channels)); if (!is_training_ || exponential_avg_factor_ != 1.) { std::string prefix_msg = is_training_ ? "When exponential_avg_factor != 1" : "When is_training=false"; OP_REQUIRES(context, estimated_mean.NumElements() == num_channels, errors::InvalidArgument( prefix_msg, ", mean must have the same number " "of elements as the channels of x, got ", estimated_mean.NumElements(), " and ", num_channels)); OP_REQUIRES(context, estimated_variance.NumElements() == num_channels, errors::InvalidArgument( prefix_msg, ", variance must have the same " "number of elements as the channels of x, got ", estimated_variance.NumElements(), " and ", num_channels)); } if (has_side_input_) { OP_REQUIRES(context, side_input->shape() == x.shape(), errors::InvalidArgument( "side_input shape must be equal to input shape: ", side_input->shape().DebugString(), " != ", x.shape().DebugString())); } if (activation_mode_ != FbnActivationMode::kIdentity) { OP_REQUIRES( context, !is_training_ || num_channels % 4 == 0, errors::InvalidArgument("FusedBatchNorm with activation requires " "channel dimension to be a multiple of 4.")); } Tensor* y = nullptr; auto alloc_shape = use_reshape ? dest_shape : x_shape; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, alloc_shape, &y)); Tensor* batch_mean = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {3}, 1, scale.shape(), &batch_mean)); Tensor* batch_var = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {4}, 2, scale.shape(), &batch_var)); Tensor* saved_mean = nullptr; OP_REQUIRES_OK(context, context->allocate_output(3, scale.shape(), &saved_mean)); Tensor* saved_maybe_inv_var = nullptr; OP_REQUIRES_OK(context, context->allocate_output(4, scale.shape(), &saved_maybe_inv_var)); if (is_training_) { functor::FusedBatchNorm<Device, T, U, true>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon_, exponential_avg_factor_, activation_mode_, y, batch_mean, batch_var, saved_mean, saved_maybe_inv_var, tensor_format_, use_reserved_space); } else { functor::FusedBatchNorm<Device, T, U, false>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon_, exponential_avg_factor_, activation_mode_, y, batch_mean, batch_var, saved_mean, saved_maybe_inv_var, tensor_format_, use_reserved_space); } if (use_reshape) { OP_REQUIRES(context, y->CopyFrom(*y, x_shape), errors::InvalidArgument("Error during tensor copy.")); } } private: U epsilon_; U exponential_avg_factor_; TensorFormat tensor_format_; bool is_training_; bool has_side_input_; FbnActivationMode activation_mode_; }; template <typename Device, typename T, typename U> class FusedBatchNormOp : public FusedBatchNormOpBase<Device, T, U> { public: explicit FusedBatchNormOp(OpKernelConstruction* context) : FusedBatchNormOpBase<Device, T, U>(context) {} void Compute(OpKernelContext* context) override { FusedBatchNormOpBase<Device, T, U>::ComputeWithReservedSpace(context, false); } }; template <typename Device, typename T, typename U> class FusedBatchNormOpV3 : public FusedBatchNormOpBase<Device, T, U> { public: explicit FusedBatchNormOpV3(OpKernelConstruction* context) : FusedBatchNormOpBase<Device, T, U>(context) {} void Compute(OpKernelContext* context) override { FusedBatchNormOpBase<Device, T, U>::ComputeWithReservedSpace(context, true); } }; template <typename Device, typename T, typename U> class FusedBatchNormOpEx : public FusedBatchNormOpBase<Device, T, U> { static constexpr bool kWithSideInputAndActivation = true; public: explicit FusedBatchNormOpEx(OpKernelConstruction* context) : FusedBatchNormOpBase<Device, T, U>(context, kWithSideInputAndActivation) {} void Compute(OpKernelContext* context) override { FusedBatchNormOpBase<Device, T, U>::ComputeWithReservedSpace(context, true); } }; template <typename Device, typename T, typename U> class FusedBatchNormGradOpBase : public OpKernel { using FbnActivationMode = functor::FusedBatchNormActivationMode; protected: explicit FusedBatchNormGradOpBase(OpKernelConstruction* context, bool is_batch_norm_grad_ex = false) : OpKernel(context) { float epsilon; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); epsilon_ = U(epsilon); string tensor_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format)); OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_)); if (!is_batch_norm_grad_ex) { has_side_input_ = false; activation_mode_ = FbnActivationMode::kIdentity; } else { OP_REQUIRES_OK(context, ParseActivationMode(context, &activation_mode_)); int num_side_inputs; OP_REQUIRES_OK(context, context->GetAttr("num_side_inputs", &num_side_inputs)); OP_REQUIRES(context, num_side_inputs >= 0 && num_side_inputs <= 1, errors::InvalidArgument( "FusedBatchNormGrad accepts at most one side input.")); has_side_input_ = (num_side_inputs == 1); if (has_side_input_ && is_training_) { OP_REQUIRES( context, activation_mode_ != FbnActivationMode::kIdentity, errors::InvalidArgument("Identity activation is not supported with " "non-empty side input")); } } if (activation_mode_ != FbnActivationMode::kIdentity && is_training_) { OP_REQUIRES(context, DataTypeToEnum<T>::value == DT_HALF, errors::InvalidArgument("FusedBatchNormGrad with activation " "supports only DT_HALF data type.")); OP_REQUIRES(context, tensor_format_ == FORMAT_NHWC, errors::InvalidArgument("FusedBatchNormGrad with activation " "supports only NHWC tensor format.")); OP_REQUIRES(context, functor::BatchnormSpatialPersistentEnabled(), errors::InvalidArgument( "FusedBatchNormGrad with activation must run with cuDNN " "spatial persistence mode enabled.")); } } virtual void ComputeWithReservedSpace(OpKernelContext* context, bool use_reserved_space) { Tensor y_backprop = context->input(0); Tensor x = context->input(1); const Tensor& scale = context->input(2); const Tensor& saved_mean_or_pop_mean = context->input(3); const Tensor& saved_maybe_inv_var_or_pop_var = context->input(4); bool use_activation = activation_mode_ != FbnActivationMode::kIdentity; const Tensor* offset = use_activation ? &context->input(6) : nullptr; const Tensor* y = use_activation ? &context->input(7) : nullptr; OP_REQUIRES(context, y_backprop.dims() == 4 || y_backprop.dims() == 5, errors::InvalidArgument("input must be 4 or 5-dimensional", y_backprop.shape().DebugString())); OP_REQUIRES(context, x.dims() == 4 || x.dims() == 5, errors::InvalidArgument("input must be 4 or 5-dimensional", x.shape().DebugString())); OP_REQUIRES(context, scale.dims() == 1, errors::InvalidArgument("scale must be 1-dimensional", scale.shape().DebugString())); OP_REQUIRES( context, saved_mean_or_pop_mean.dims() == 1, errors::InvalidArgument("saved mean must be 1-dimensional", saved_mean_or_pop_mean.shape().DebugString())); OP_REQUIRES(context, saved_maybe_inv_var_or_pop_var.dims() == 1, errors::InvalidArgument( "saved variance must be 1-dimensional", saved_maybe_inv_var_or_pop_var.shape().DebugString())); OP_REQUIRES( context, x.shape() == y_backprop.shape(), errors::InvalidArgument( "x and y_backprop must have same shape, but x has shape ", x.shape(), " and y_backprop has shape ", y_backprop.shape())); if (use_activation) { OP_REQUIRES( context, x.dim_size(3) % 4 == 0, errors::InvalidArgument("FusedBatchNormGrad with activation requires " "channel dimension to be a multiple of 4.")); OP_REQUIRES(context, offset->dims() == 1, errors::InvalidArgument("offset must be 1-dimensional", offset->shape().DebugString())); } bool use_reshape = (x.dims() == 5); auto x_shape = x.shape(); TensorShape dest_shape; if (use_reshape) { const int64_t in_batch = GetTensorDim(x, tensor_format_, 'N'); int64_t in_planes = GetTensorDim(x, tensor_format_, '0'); int64_t in_rows = GetTensorDim(x, tensor_format_, '1'); int64_t in_cols = GetTensorDim(x, tensor_format_, '2'); const int64_t in_depth = GetTensorDim(x, tensor_format_, 'C'); OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(tensor_format_, in_batch, {{in_planes, in_rows * in_cols}}, in_depth, &dest_shape)); OP_REQUIRES(context, x.CopyFrom(x, dest_shape), errors::InvalidArgument("Error during tensor copy.")); OP_REQUIRES(context, y_backprop.CopyFrom(y_backprop, dest_shape), errors::InvalidArgument("Error during tensor copy.")); } const auto num_channels = GetTensorDim(x, tensor_format_, 'C'); OP_REQUIRES( context, scale.NumElements() == num_channels, errors::InvalidArgument("scale must have the same number of elements " "as the channels of x, got ", scale.NumElements(), " and ", num_channels)); OP_REQUIRES( context, saved_mean_or_pop_mean.NumElements() == num_channels, errors::InvalidArgument("reserve_space_1 must have the same number of " "elements as the channels of x, got ", saved_mean_or_pop_mean.NumElements(), " and ", num_channels)); OP_REQUIRES( context, saved_maybe_inv_var_or_pop_var.NumElements() == num_channels, errors::InvalidArgument("reserve_space_2 must have the same number of " "elements as the channels of x, got ", saved_maybe_inv_var_or_pop_var.NumElements(), " and ", num_channels)); Tensor* x_backprop = nullptr; auto alloc_shape = use_reshape ? dest_shape : x_shape; OP_REQUIRES_OK(context, context->allocate_output(0, alloc_shape, &x_backprop)); const TensorShape& scale_offset_shape = scale.shape(); Tensor* scale_backprop = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, scale_offset_shape, &scale_backprop)); Tensor* offset_backprop = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, scale_offset_shape, &offset_backprop)); Tensor* placeholder_1 = nullptr; OP_REQUIRES_OK( context, context->allocate_output(3, TensorShape({0}), &placeholder_1)); Tensor* placeholder_2 = nullptr; OP_REQUIRES_OK( context, context->allocate_output(4, TensorShape({0}), &placeholder_2)); Tensor* side_input_backprop = nullptr; if (has_side_input_) { OP_REQUIRES_OK(context, context->allocate_output(5, alloc_shape, &side_input_backprop)); } if (x.shape().num_elements() == 0) { functor::SetZeroFunctor<Device, U> f; f(context->eigen_device<Device>(), scale_backprop->flat<U>()); f(context->eigen_device<Device>(), offset_backprop->flat<U>()); return; } if (is_training_) { functor::FusedBatchNormGrad<Device, T, U>()( context, y_backprop, x, scale, offset, saved_mean_or_pop_mean, saved_maybe_inv_var_or_pop_var, y, epsilon_, activation_mode_, x_backprop, scale_backprop, offset_backprop, side_input_backprop, use_reserved_space, tensor_format_); } else { OP_REQUIRES( context, activation_mode_ == FbnActivationMode::kIdentity && !has_side_input_, errors::InvalidArgument( "FusedBatchNormGrad with activation is only supported " "when is_training=True.")); OP_REQUIRES(context, tensor_format_ == FORMAT_NHWC, errors::InvalidArgument( "The implementation of " "FusedBatchNormGrad with is_training=False only support " "NHWC tensor format for now.")); functor::FusedBatchNormFreezeGrad<Device, T, U>()( context, y_backprop, x, scale, saved_mean_or_pop_mean, saved_maybe_inv_var_or_pop_var, epsilon_, x_backprop, scale_backprop, offset_backprop); } if (use_reshape) { OP_REQUIRES(context, x_backprop->CopyFrom(*x_backprop, x_shape), errors::InvalidArgument("Error during tensor copy.")); } } private: U epsilon_; TensorFormat tensor_format_; bool is_training_; bool has_side_input_; FbnActivationMode activation_mode_; }; template <typename Device, typename T, typename U> class FusedBatchNormGradOp : public FusedBatchNormGradOpBase<Device, T, U> { public: explicit FusedBatchNormGradOp(OpKernelConstruction* context) : FusedBatchNormGradOpBase<Device, T, U>(context) {} void Compute(OpKernelContext* context) override { FusedBatchNormGradOpBase<Device, T, U>::ComputeWithReservedSpace(context, false); } }; template <typename Device, typename T, typename U> class FusedBatchNormGradOpV3 : public FusedBatchNormGradOpBase<Device, T, U> { public: explicit FusedBatchNormGradOpV3(OpKernelConstruction* context) : FusedBatchNormGradOpBase<Device, T, U>(context) {} void Compute(OpKernelContext* context) override { FusedBatchNormGradOpBase<Device, T, U>::ComputeWithReservedSpace(context, true); } }; template <typename Device, typename T, typename U> class FusedBatchNormGradOpEx : public FusedBatchNormGradOpBase<Device, T, U> { static constexpr bool kWithSideInputAndActivation = true; public: explicit FusedBatchNormGradOpEx(OpKernelConstruction* context) : FusedBatchNormGradOpBase<Device, T, U>(context, kWithSideInputAndActivation) {} void Compute(OpKernelContext* context) override { FusedBatchNormGradOpBase<Device, T, U>::ComputeWithReservedSpace(context, true); } }; REGISTER_KERNEL_BUILDER( Name("FusedBatchNorm").Device(DEVICE_CPU).TypeConstraint<float>("T"), FusedBatchNormOp<CPUDevice, float, float>); REGISTER_KERNEL_BUILDER( Name("FusedBatchNormGrad").Device(DEVICE_CPU).TypeConstraint<float>("T"), FusedBatchNormGradOp<CPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2") .Device(DEVICE_CPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormOp<CPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2") .Device(DEVICE_CPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOp<CPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2") .Device(DEVICE_CPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormOp<CPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2") .Device(DEVICE_CPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOp<CPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2") .Device(DEVICE_CPU) .TypeConstraint<bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormOp<CPUDevice, bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2") .Device(DEVICE_CPU) .TypeConstraint<bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOp<CPUDevice, bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3") .Device(DEVICE_CPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormOpV3<CPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3") .Device(DEVICE_CPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpV3<CPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3") .Device(DEVICE_CPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormOpV3<CPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3") .Device(DEVICE_CPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpV3<CPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3") .Device(DEVICE_CPU) .TypeConstraint<bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormOpV3<CPUDevice, bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3") .Device(DEVICE_CPU) .TypeConstraint<bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpV3<CPUDevice, bfloat16, float>); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM REGISTER_KERNEL_BUILDER( Name("FusedBatchNorm").Device(DEVICE_GPU).TypeConstraint<float>("T"), FusedBatchNormOp<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER( Name("FusedBatchNormGrad").Device(DEVICE_GPU).TypeConstraint<float>("T"), FusedBatchNormGradOp<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormOp<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOp<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormOp<GPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2") .Device(DEVICE_GPU) .TypeConstraint<Eigen::bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormOp<GPUDevice, Eigen::bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOp<GPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER( Name("FusedBatchNormGradV2") .Device(DEVICE_GPU) .TypeConstraint<Eigen::bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOp<GPUDevice, Eigen::bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormOpV3<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormOpEx<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpV3<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormGradEx") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpEx<GPUDevice, float, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormOpV3<GPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3") .Device(DEVICE_GPU) .TypeConstraint<Eigen::bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormOpV3<GPUDevice, Eigen::bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormOpEx<GPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx") .Device(DEVICE_GPU) .TypeConstraint<Eigen::bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormOpEx<GPUDevice, Eigen::bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpV3<GPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER( Name("FusedBatchNormGradV3") .Device(DEVICE_GPU) .TypeConstraint<Eigen::bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpV3<GPUDevice, Eigen::bfloat16, float>); REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormGradEx") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpEx<GPUDevice, Eigen::half, float>); REGISTER_KERNEL_BUILDER( Name("_FusedBatchNormGradEx") .Device(DEVICE_GPU) .TypeConstraint<Eigen::bfloat16>("T") .TypeConstraint<float>("U"), FusedBatchNormGradOpEx<GPUDevice, Eigen::bfloat16, float>); #endif }
#include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { class FusedBatchNormOpTest : public OpsTestBase {}; TEST_F(FusedBatchNormOpTest, Training) { TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("exponential_avg_factor", 1.0) .Attr("epsilon", 0.001) .Attr("is_training", true) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1, 1, 6, 2}), {5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}); AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0}); AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0}); AddInputFromArray<float>(TensorShape({0}), {}); AddInputFromArray<float>(TensorShape({0}), {}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83, 3.17, 3.17, 5.51, 5.51, 7.86, 7.86}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01); Tensor expected_mean(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected_mean, {10, 10}); test::ExpectTensorNear<float>(expected_mean, *GetOutput(1), 0.01); Tensor expected_variance(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected_variance, {14.00, 14.00}); test::ExpectTensorNear<float>(expected_variance, *GetOutput(2), 0.01); } TEST_F(FusedBatchNormOpTest, TrainingRunningMean) { TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("exponential_avg_factor", 0.5) .Attr("epsilon", 0.001) .Attr("is_training", true) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1, 1, 6, 2}), {5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}); AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0}); AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0}); AddInputFromArray<float>(TensorShape({2}), {6.0, 6.0}); AddInputFromArray<float>(TensorShape({2}), {16.0, 16.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83, 3.17, 3.17, 5.51, 5.51, 7.86, 7.86}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01); Tensor expected_mean(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected_mean, {8, 8}); test::ExpectTensorNear<float>(expected_mean, *GetOutput(1), 0.01); Tensor expected_variance(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected_variance, {15.00, 15.00}); test::ExpectTensorNear<float>(expected_variance, *GetOutput(2), 0.01); } TEST_F(FusedBatchNormOpTest, Inference) { TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("epsilon", 0.001) .Attr("is_training", false) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1, 1, 6, 2}), {5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}); AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0}); AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0}); AddInputFromArray<float>(TensorShape({2}), {10, 10}); AddInputFromArray<float>(TensorShape({2}), {11.67f, 11.67f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83, 3.17, 3.17, 5.51, 5.51, 7.86, 7.86}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01); } TEST_F(FusedBatchNormOpTest, InferenceIgnoreAvgFactor) { TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("exponential_avg_factor", 0.5) .Attr("epsilon", 0.001) .Attr("is_training", false) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1, 1, 6, 2}), {5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}); AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0}); AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0}); AddInputFromArray<float>(TensorShape({2}), {10, 10}); AddInputFromArray<float>(TensorShape({2}), {11.67f, 11.67f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83, 3.17, 3.17, 5.51, 5.51, 7.86, 7.86}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01); } TEST_F(FusedBatchNormOpTest, EmptyInput) { TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("epsilon", 0.001) .Attr("is_training", true) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1, 1, 0, 0}), {}); AddInputFromArray<float>(TensorShape({0}), {}); AddInputFromArray<float>(TensorShape({0}), {}); AddInputFromArray<float>(TensorShape({0}), {}); AddInputFromArray<float>(TensorShape({0}), {}); TF_ASSERT_OK(RunOpKernel()); EXPECT_EQ(GetOutput(0)->shape(), TensorShape({1, 1, 0, 0})); } class FusedBatchNormGradOpTest : public OpsTestBase {}; TEST_F(FusedBatchNormGradOpTest, Simple) { TF_EXPECT_OK(NodeDefBuilder("batch_norm_grad_op", "FusedBatchNormGrad") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("epsilon", 0.001) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1, 1, 6, 2}), {2, 2, 9, 9, -4, -4, 5, 5, 8, 8, 7, 7}); AddInputFromArray<float>(TensorShape({1, 1, 6, 2}), {1, 1, 7, 7, 4, 4, -3, -3, -11, -11, 13, 13}); AddInputFromArray<float>(TensorShape({2}), {4, 4}); AddInputFromArray<float>(TensorShape({2}), {1.833f, 1.833f}); AddInputFromArray<float>(TensorShape({2}), {57.472f, 57.472f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_x(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2})); test::FillValues<float>(&expected_x, {-1.34, -1.34, 2.47, 2.47, -4.44, -4.44, 0.17, 0.17, 1.60, 1.60, 1.53, 1.53}); test::ExpectTensorNear<float>(expected_x, *GetOutput(0), 0.01); Tensor expected_scale(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected_scale, {-1.6488, -1.6488}); test::ExpectTensorNear<float>(expected_scale, *GetOutput(1), 0.01); Tensor expected_offset(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected_offset, {27, 27}); test::ExpectTensorNear<float>(expected_offset, *GetOutput(2), 0.01); } using fp32 = float; using fp16 = Eigen::half; using bf16 = bfloat16; template <typename T> static Graph* FusedBatchNormInference(int n, int h, int w, int c, bool is_training, TensorFormat data_format) { Graph* g = new Graph(OpRegistry::Global()); DataType dtype = DataTypeToEnum<T>::value; Tensor x_t(dtype, data_format == FORMAT_NHWC ? TensorShape({n, h, w, c}) : TensorShape({n, c, h, w})); x_t.flat<T>().setRandom(); Tensor other_t(DT_FLOAT, TensorShape({c})); other_t.flat<float>().setRandom(); Tensor empty_t(DT_FLOAT, TensorShape({0})); Node* x = test::graph::Constant(g, x_t, "x"); Node* other = test::graph::Constant(g, other_t, "other"); Node* empty = test::graph::Constant(g, empty_t, "empty"); Node* fused_batch_norm; TF_CHECK_OK(NodeBuilder(g->NewName("fused_batch_norm"), "FusedBatchNormV3") .Input(x) .Input(other) .Input(other) .Input(is_training ? empty : other) .Input(is_training ? empty : other) .Attr("T", dtype) .Attr("U", DT_FLOAT) .Attr("epsilon", 0.001) .Attr("is_training", is_training) .Attr("data_format", ToString(data_format)) .Finalize(g, &fused_batch_norm)); return g; } template <typename T> static Graph* FusedBatchNormGrad(int n, int h, int w, int c, bool is_training, TensorFormat data_format) { Graph* g = new Graph(OpRegistry::Global()); DataType dtype = DataTypeToEnum<T>::value; TensorShape shape = data_format == FORMAT_NHWC ? TensorShape({n, h, w, c}) : TensorShape({n, c, h, w}); Tensor y_backprop_t(dtype, shape); y_backprop_t.flat<T>().setRandom(); Tensor x_t(dtype, shape); x_t.flat<T>().setRandom(); Tensor other_t(DT_FLOAT, TensorShape({c})); other_t.flat<float>().setRandom(); Node* y_backprop = test::graph::Constant(g, y_backprop_t, "y_backprop"); Node* x = test::graph::Constant(g, x_t, "x"); Node* other = test::graph::Constant(g, other_t, "other"); Node* fused_batch_norm; TF_CHECK_OK( NodeBuilder(g->NewName("fused_batch_norm_grad"), "FusedBatchNormGradV3") .Input(y_backprop) .Input(x) .Input(other) .Input(other) .Input(other) .Input(other) .Attr("T", dtype) .Attr("U", DT_FLOAT) .Attr("epsilon", 0.001) .Attr("is_training", is_training) .Attr("data_format", ToString(data_format)) .Finalize(g, &fused_batch_norm)); return g; } #define BM_NAME(NAME, N, H, W, C, T, IT, FORMAT, DEVICE) \ BM_##NAME##_##N##_##H##_##W##_##C##_##IT##_##FORMAT##_##T##_##DEVICE #define BM_FusedBatchNorm(N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE) \ static void BM_NAME(FusedBatchNorm, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)(::testing::benchmark::State & state) { \ test::Benchmark( \ #DEVICE, \ FusedBatchNormInference<T>(N, H, W, C, IS_TRAINING, FORMAT_##FORMAT), \ false) \ .Run(state); \ state.SetItemsProcessed(state.iterations() * N * H * W * C); \ } \ BENCHMARK( \ BM_NAME(FusedBatchNorm, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)) \ ->UseRealTime(); BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NHWC, cpu); BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NHWC, cpu); BM_FusedBatchNorm(64, 14, 14, 256, bf16, false, NHWC, cpu); BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NHWC, cpu); BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NHWC, cpu); BM_FusedBatchNorm(64, 14, 14, 256, bf16, true, NHWC, cpu); #ifdef GOOGLE_CUDA BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NHWC, gpu); BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NHWC, gpu); BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NCHW, gpu); BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NCHW, gpu); BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NHWC, gpu); BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NHWC, gpu); BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NCHW, gpu); BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NCHW, gpu); #endif #define BM_FusedBatchNormGrad(N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE) \ static void BM_NAME(FusedBatchNormGrad, N, H, W, C, T, IS_TRAINING, FORMAT, \ DEVICE)(::testing::benchmark::State & state) { \ test::Benchmark( \ #DEVICE, \ FusedBatchNormGrad<T>(N, H, W, C, IS_TRAINING, FORMAT_##FORMAT), \ false) \ .Run(state); \ state.SetItemsProcessed(state.iterations() * N * H * W * C); \ } \ BENCHMARK( \ BM_NAME(FusedBatchNormGrad, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)) \ ->UseRealTime(); #define BM_FusedBatchNormGradResnetShapes(T, IS_TRAINING, FORMAT, DEVICE) \ BM_FusedBatchNormGrad(64, 56, 56, 64, T, IS_TRAINING, FORMAT, DEVICE); \ BM_FusedBatchNormGrad(64, 56, 56, 128, T, IS_TRAINING, FORMAT, DEVICE); \ BM_FusedBatchNormGrad(64, 56, 56, 256, T, IS_TRAINING, FORMAT, DEVICE); \ \ BM_FusedBatchNormGrad(64, 28, 28, 128, T, IS_TRAINING, FORMAT, DEVICE); \ BM_FusedBatchNormGrad(64, 28, 28, 256, T, IS_TRAINING, FORMAT, DEVICE); \ BM_FusedBatchNormGrad(64, 28, 28, 512, T, IS_TRAINING, FORMAT, DEVICE); \ \ BM_FusedBatchNormGrad(64, 14, 14, 128, T, IS_TRAINING, FORMAT, DEVICE); \ BM_FusedBatchNormGrad(64, 14, 14, 256, T, IS_TRAINING, FORMAT, DEVICE); \ BM_FusedBatchNormGrad(64, 14, 14, 1024, T, IS_TRAINING, FORMAT, DEVICE) BM_FusedBatchNormGradResnetShapes(fp32, true, NHWC, cpu); BM_FusedBatchNormGradResnetShapes(fp32, false, NHWC, cpu); BM_FusedBatchNormGradResnetShapes(bf16, true, NHWC, cpu); BM_FusedBatchNormGradResnetShapes(bf16, false, NHWC, cpu); #ifdef GOOGLE_CUDA BM_FusedBatchNormGradResnetShapes(fp32, true, NHWC, gpu); BM_FusedBatchNormGradResnetShapes(fp16, true, NHWC, gpu); BM_FusedBatchNormGradResnetShapes(fp32, true, NCHW, gpu); BM_FusedBatchNormGradResnetShapes(fp16, true, NCHW, gpu); BM_FusedBatchNormGradResnetShapes(fp32, false, NHWC, gpu); BM_FusedBatchNormGradResnetShapes(fp16, false, NHWC, gpu); #endif }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fused_batch_norm_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fused_batch_norm_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6373881d-be51-4cc8-ab57-a967d8737d44
cpp
tensorflow/tensorflow
one_hot_op
tensorflow/compiler/tf2xla/kernels/one_hot_op.cc
tensorflow/core/kernels/one_hot_op_test.cc
#include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { class OneHotOp : public XlaOpKernel { public: explicit OneHotOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_)); } void Compile(XlaOpKernelContext* ctx) override { const TensorShape indices_shape = ctx->InputShape(0); const TensorShape depth_shape = ctx->InputShape(1); const TensorShape on_value_shape = ctx->InputShape(2); const TensorShape off_value_shape = ctx->InputShape(3); const int indices_dims = indices_shape.dims(); const int output_dims = indices_dims + 1; OP_REQUIRES( ctx, axis_ == -1 || (axis_ >= 0 && axis_ < output_dims), errors::InvalidArgument("Expected axis to be -1 or between [0, ", output_dims, "). But received: ", axis_)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(depth_shape), errors::InvalidArgument("depth must be a scalar, but got: ", depth_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(on_value_shape), errors::InvalidArgument("on_value must be a scalar, but got: ", on_value_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(off_value_shape), errors::InvalidArgument("off_value must be a scalar, but got: ", off_value_shape.DebugString())); const int axis = (axis_ == -1) ? indices_dims : axis_; int64_t depth; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(1, &depth)); OP_REQUIRES( ctx, depth >= 0, errors::InvalidArgument("depth must be non-negative, got: ", depth)); xla::XlaOp one_hot; OP_REQUIRES_OK( ctx, XlaHelpers::OneHot(ctx->builder(), depth, axis, input_type(0), indices_shape, ctx->Input(0), ctx->Input(2), ctx->Input(3), &one_hot)); ctx->SetOutput(0, one_hot); } private: int32 axis_; OneHotOp(const OneHotOp&) = delete; void operator=(const OneHotOp&) = delete; }; REGISTER_XLA_OP(Name("OneHot").CompileTimeConstantInput("depth"), OneHotOp); } }
#include <random> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* OneHot(int batch_size, int num_classes, int axis) { Graph* g = new Graph(OpRegistry::Global()); Tensor indices(DT_INT32, TensorShape({batch_size})); std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(0, num_classes - 1); auto indices_t = indices.flat<int32>(); for (int i = 0; i < batch_size; ++i) { indices_t(i) = dist(gen); } Tensor depth(DT_INT32, TensorShape({})); depth.scalar<int32>()() = num_classes; Tensor on_value(DT_FLOAT, TensorShape({})); on_value.scalar<float>()() = 1.0f; Tensor off_value(DT_FLOAT, TensorShape({})); off_value.scalar<float>()() = 0.0f; test::graph::Multi(g, "OneHot", { test::graph::Constant(g, indices), test::graph::Constant(g, depth), test::graph::Constant(g, on_value), test::graph::Constant(g, off_value), }) ->AddAttr("axis", axis); return g; } #define BM_OneHot(BATCH, CLASS, AXIS, DEVICE) \ static void BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, OneHot(BATCH, CLASS, AXIS), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \ CLASS); \ } \ BENCHMARK(BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE); BM_OneHot(32, 512, 1, cpu); BM_OneHot(64, 512, 1, cpu); BM_OneHot(128, 512, 1, cpu); BM_OneHot(32, 1024, 1, cpu); BM_OneHot(64, 1024, 1, cpu); BM_OneHot(128, 1024, 1, cpu); BM_OneHot(32, 10000, 1, cpu); BM_OneHot(64, 10000, 1, cpu); BM_OneHot(128, 10000, 1, cpu); BM_OneHot(32, 512, 0, cpu); BM_OneHot(64, 512, 0, cpu); BM_OneHot(128, 512, 0, cpu); BM_OneHot(32, 1024, 0, cpu); BM_OneHot(64, 1024, 0, cpu); BM_OneHot(128, 1024, 0, cpu); BM_OneHot(32, 10000, 0, cpu); BM_OneHot(64, 10000, 0, cpu); BM_OneHot(128, 10000, 0, cpu); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/one_hot_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/one_hot_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6a81e78e-057b-4561-b148-0e71ff2b604c
cpp
tensorflow/tensorflow
constant_op
tensorflow/core/kernels/constant_op.cc
tensorflow/core/kernels/constant_op_test.cc
#define EIGEN_USE_THREADS #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) #define EIGEN_USE_GPU #endif #include "tensorflow/core/kernels/constant_op.h" #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/variant_op_registry.h" #include "tensorflow/core/graph/graph_node_util.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h" namespace tensorflow { namespace { NodeDef StripTensorDataFromNodeDef(OpKernelConstruction* ctx) { const NodeDef& original = ctx->def(); if (std::is_base_of<protobuf::Message, NodeDef>()) { DCHECK_EQ(reinterpret_cast<const protobuf::Message*>(&original) ->GetDescriptor() ->field_count(), 7) << "The NodeDef format has changed, and the attr-stripping code may " "need to be updated."; } NodeDef ret; ret.set_name(original.name()); ret.set_op(original.op()); ret.set_device(original.device()); AddNodeAttr("dtype", ctx->output_type(0), &ret); MergeDebugInfo(original, &ret); if (original.has_experimental_type()) { *ret.mutable_experimental_type() = original.experimental_type(); } return ret; } } ConstantOp::ConstantOp(OpKernelConstruction* ctx) : OpKernel(ctx, StripTensorDataFromNodeDef(ctx), false), tensor_(ctx->output_type(0)) { const TensorProto* proto = nullptr; tsl::profiler::ScopedMemoryDebugAnnotation op_annotation(name_view().data()); OP_REQUIRES_OK(ctx, ctx->GetAttr("value", &proto)); OP_REQUIRES_OK(ctx, ctx->device()->MakeTensorFromProto( *proto, AllocatorAttributes(), &tensor_)); OP_REQUIRES( ctx, ctx->output_type(0) == tensor_.dtype(), errors::InvalidArgument("Type mismatch between value (", DataTypeString(tensor_.dtype()), ") and dtype (", DataTypeString(ctx->output_type(0)), ")")); } void ConstantOp::Compute(OpKernelContext* ctx) { ctx->set_output(0, tensor_); if (TF_PREDICT_FALSE(ctx->track_allocations())) { ctx->record_persistent_memory_allocation(tensor_.AllocatedBytes()); } } ConstantOp::~ConstantOp() {} REGISTER_KERNEL_BUILDER(Name("Const").Device(DEVICE_CPU), ConstantOp); REGISTER_KERNEL_BUILDER(Name("Const").Device(DEVICE_TPU_SYSTEM), ConstantOp); #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) #define REGISTER_KERNEL(D, TYPE) \ REGISTER_KERNEL_BUILDER( \ Name("Const").Device(DEVICE_##D).TypeConstraint<TYPE>("dtype"), \ ConstantOp); REGISTER_KERNEL(GPU, Eigen::half); REGISTER_KERNEL(GPU, bfloat16); REGISTER_KERNEL(GPU, float); REGISTER_KERNEL(GPU, double); REGISTER_KERNEL(GPU, uint8); REGISTER_KERNEL(GPU, int8); REGISTER_KERNEL(GPU, qint8); REGISTER_KERNEL(GPU, uint16); REGISTER_KERNEL(GPU, int16); REGISTER_KERNEL(GPU, qint16); REGISTER_KERNEL(GPU, quint16); REGISTER_KERNEL(GPU, uint32); REGISTER_KERNEL(GPU, qint32); REGISTER_KERNEL(GPU, int64_t); REGISTER_KERNEL(GPU, uint64); REGISTER_KERNEL(GPU, complex64); REGISTER_KERNEL(GPU, complex128); REGISTER_KERNEL(GPU, bool); REGISTER_KERNEL(GPU, Variant); #undef REGISTER_KERNEL #endif #define REGISTER_DEFAULT_KERNEL(TYPE) \ REGISTER_KERNEL_BUILDER( \ Name("Const").Device(DEVICE_DEFAULT).TypeConstraint<TYPE>("dtype"), \ ConstantOp); TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_DEFAULT_KERNEL); TF_CALL_QUANTIZED_TYPES(REGISTER_DEFAULT_KERNEL); TF_CALL_qint16(REGISTER_DEFAULT_KERNEL); TF_CALL_quint16(REGISTER_DEFAULT_KERNEL); TF_CALL_bool(REGISTER_DEFAULT_KERNEL); TF_CALL_variant(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Device, typename T, typename Index> class FillOp : public OpKernel { public: explicit FillOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& Tdims = context->input(0); OP_REQUIRES( context, (TensorShapeUtils::IsVector(Tdims.shape()) || TensorShapeUtils::IsScalar(Tdims.shape())), errors::InvalidArgument("dims must represent a vector, got shape ", Tdims.shape().DebugString())); const Tensor& Tvalue = context->input(1); OP_REQUIRES( context, TensorShapeUtils::IsScalar(Tvalue.shape()) || (TensorShapeUtils::IsVector(Tvalue.shape()) && Tvalue.shape().dim_size(0) == 1), errors::InvalidArgument("value must represent a scalar, got shape ", Tvalue.shape().DebugString())); auto dims = Tdims.flat<Index>(); TensorShape shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( reinterpret_cast<const Index*>(dims.data()), dims.size(), &shape)); Tensor* out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, shape, &out)); functor::FillFunctor<Device, T> functor; functor(context->eigen_device<Device>(), out->flat<T>(), Tvalue.scalar<T>()); } }; #define REGISTER_KERNEL(D, TYPE) \ REGISTER_KERNEL_BUILDER(Name("Fill") \ .Device(DEVICE_##D) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint<int32>("index_type") \ .HostMemory("dims"), \ FillOp<D##Device, TYPE, int32>); \ REGISTER_KERNEL_BUILDER(Name("Fill") \ .Device(DEVICE_##D) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint<int64_t>("index_type") \ .HostMemory("dims"), \ FillOp<D##Device, TYPE, int64>); #define REGISTER_CPU_KERNEL(TYPE) REGISTER_KERNEL(CPU, TYPE) TF_CALL_ALL_TYPES(REGISTER_CPU_KERNEL); REGISTER_KERNEL(CPU, quint8); REGISTER_KERNEL(CPU, quint16); REGISTER_KERNEL(CPU, qint8); REGISTER_KERNEL(CPU, qint16); REGISTER_KERNEL(CPU, qint32); REGISTER_KERNEL(CPU, int4); REGISTER_KERNEL(CPU, uint4); #undef REGISTER_CPU_KERNEL #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) REGISTER_KERNEL(GPU, Eigen::half); REGISTER_KERNEL(GPU, bfloat16); REGISTER_KERNEL(GPU, float); REGISTER_KERNEL(GPU, double); REGISTER_KERNEL(GPU, complex64); REGISTER_KERNEL(GPU, complex128); REGISTER_KERNEL(GPU, uint8); REGISTER_KERNEL(GPU, int8); REGISTER_KERNEL(GPU, uint16); REGISTER_KERNEL(GPU, int16); REGISTER_KERNEL(GPU, int64_t); REGISTER_KERNEL(GPU, bool); REGISTER_KERNEL(GPU, int4); REGISTER_KERNEL(GPU, uint4); #endif REGISTER_KERNEL_BUILDER(Name("Fill") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .TypeConstraint<int32>("index_type") .HostMemory("dims") .HostMemory("value") .HostMemory("output"), FillOp<CPUDevice, int32, int32>); #undef REGISTER_KERNEL template <typename Device, typename T> class ZerosLikeOp : public OpKernel { public: explicit ZerosLikeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); const Device& d = ctx->eigen_device<Device>(); if (std::is_same<T, Variant>::value) { OP_REQUIRES( ctx, input.dims() == 0, errors::InvalidArgument("ZerosLike non-scalar Tensor with " "dtype=DT_VARIANT is not supported.")); const Variant& v = input.scalar<Variant>()(); int numa_node = ctx->device()->NumaNode(); Tensor out(cpu_allocator(numa_node), DT_VARIANT, TensorShape({})); Variant* out_v = &(out.scalar<Variant>()()); OP_REQUIRES_OK(ctx, UnaryOpVariant<Device>( ctx, ZEROS_LIKE_VARIANT_UNARY_OP, v, out_v)); ctx->set_output(0, out); } else { Tensor* out = nullptr; OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output( {0}, 0, input.shape(), &out)); functor::SetZeroFunctor<Device, T> f; f(d, out->flat<T>()); } } }; #define REGISTER_KERNEL(type, dev) \ REGISTER_KERNEL_BUILDER( \ Name("ZerosLike").Device(DEVICE_##dev).TypeConstraint<type>("T"), \ ZerosLikeOp<dev##Device, type>) #define REGISTER_CPU(type) REGISTER_KERNEL(type, CPU) TF_CALL_POD_STRING_TYPES(REGISTER_CPU); REGISTER_CPU(Variant); #undef REGISTER_CPU #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) #if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) REGISTER_KERNEL(bool, GPU); REGISTER_KERNEL(Eigen::half, GPU); REGISTER_KERNEL(float, GPU); REGISTER_KERNEL(double, GPU); REGISTER_KERNEL(int64_t, GPU); REGISTER_KERNEL(complex64, GPU); REGISTER_KERNEL(complex128, GPU); #endif REGISTER_KERNEL(bfloat16, GPU); REGISTER_KERNEL(Variant, GPU); #endif #undef REGISTER_KERNEL REGISTER_KERNEL_BUILDER(Name("ZerosLike") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .HostMemory("y"), ZerosLikeOp<CPUDevice, int32>); template <typename Device, typename T> class OnesLikeOp : public OpKernel { public: explicit OnesLikeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); Tensor* out = nullptr; OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output( {0}, 0, input.shape(), &out)); functor::SetOneFunctor<Device, T> f; f(ctx->eigen_device<Device>(), out->flat<T>()); } }; #define REGISTER_KERNEL(type, dev) \ REGISTER_KERNEL_BUILDER( \ Name("OnesLike").Device(DEVICE_##dev).TypeConstraint<type>("T"), \ OnesLikeOp<dev##Device, type>) #define REGISTER_CPU(type) REGISTER_KERNEL(type, CPU) TF_CALL_POD_TYPES(REGISTER_CPU); #undef REGISTER_CPU #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) #if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) REGISTER_KERNEL(bool, GPU); REGISTER_KERNEL(Eigen::half, GPU); REGISTER_KERNEL(float, GPU); REGISTER_KERNEL(double, GPU); REGISTER_KERNEL(int64_t, GPU); REGISTER_KERNEL(complex64, GPU); REGISTER_KERNEL(complex128, GPU); #endif REGISTER_KERNEL(bfloat16, GPU); #endif #undef REGISTER_KERNEL REGISTER_KERNEL_BUILDER(Name("OnesLike") .Device(DEVICE_DEFAULT) .TypeConstraint<int32>("T") .HostMemory("y"), OnesLikeOp<CPUDevice, int32>); PlaceholderOp::PlaceholderOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("shape", &expected_shape_)); } void PlaceholderOp::Compute(OpKernelContext* ctx) { if (expected_shape_.dims() > 0) { OP_REQUIRES(ctx, false, errors::InvalidArgument( "You must feed a value for placeholder tensor '", name(), "' with dtype ", DataTypeString(output_type(0)), " and shape ", expected_shape_.DebugString())); } else { OP_REQUIRES(ctx, false, errors::InvalidArgument( "You must feed a value for placeholder tensor '", name(), "' with dtype ", DataTypeString(output_type(0)))); } } REGISTER_KERNEL_BUILDER(Name("Placeholder").Device(DEVICE_CPU), PlaceholderOp); REGISTER_KERNEL_BUILDER(Name("PlaceholderV2").Device(DEVICE_CPU), PlaceholderOp); REGISTER_KERNEL_BUILDER(Name("Placeholder").Device(DEVICE_DEFAULT), PlaceholderOp); REGISTER_KERNEL_BUILDER(Name("PlaceholderV2").Device(DEVICE_DEFAULT), PlaceholderOp); }
#include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" namespace tensorflow { class ConstantOpTest : public OpsTestBase { protected: void PersistentMemoryTrackingTest(bool on_gpu); }; void ConstantOpTest::PersistentMemoryTrackingTest(bool on_gpu) { DataType data_type = DT_INT32; std::initializer_list<int64_t> dims = {2, 3, 4, 5}; Tensor tensor(data_type, TensorShape(dims)); for (int i = 0; i < 2 * 3 * 4 * 5; ++i) { tensor.flat<int32>()(i) = i; } NodeDef const_node; TF_ASSERT_OK(NodeDefBuilder("some_node", "Const") .Attr("dtype", data_type) .Attr("value", tensor) .Finalize(&const_node)); string device_string = "CPU"; DeviceType device_type = DEVICE_CPU; if (on_gpu) { device_string = "GPU"; DeviceType device_type = DEVICE_GPU; } std::unique_ptr<Device> device(DeviceFactory::NewDevice( device_string, {}, "/job:worker/replica:0/task:0")); Status status; std::unique_ptr<OpKernel> op(CreateOpKernel(device_type, device.get(), cpu_allocator(), const_node, TF_GRAPH_DEF_VERSION, &status)); TF_ASSERT_OK(status); OpKernelContext::Params params; params.device = device.get(); params.frame_iter = FrameAndIter(0, 0); params.op_kernel = op.get(); params.track_allocations = true; OpKernelContext ctx(&params); op->Compute(&ctx); TF_EXPECT_OK(ctx.status()); if (on_gpu) { EXPECT_EQ(ctx.persistent_memory_allocated(), 512); } else { EXPECT_EQ(ctx.persistent_memory_allocated(), 480); } for (auto allocator_pair : ctx.ConsumeWrappedAllocators()) { allocator_pair.second->GetRecordsAndUnRef(); } } TEST_F(ConstantOpTest, PersistentMemoryTracking) { PersistentMemoryTrackingTest(false); #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) PersistentMemoryTrackingTest(true); #endif } static Graph* ManyConsts(int num, bool sequential) { Graph* g = new Graph(OpRegistry::Global()); Node* prev = nullptr; for (int i = 0; i < num; ++i) { Tensor c(DT_FLOAT, TensorShape({})); c.scalar<float>()() = i; Node* curr = test::graph::Constant(g, c); if (sequential && prev != nullptr) { g->AddControlEdge(prev, curr); } prev = curr; } return g; } static void BM_ManyConsts_Parallel(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", ManyConsts(num, false ), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); } BENCHMARK(BM_ManyConsts_Parallel)->Range(1, 1 << 10); static void BM_ManyConsts_Sequential(::testing::benchmark::State& state) { const int num = state.range(0); test::Benchmark("cpu", ManyConsts(num, true ), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num); } BENCHMARK(BM_ManyConsts_Sequential)->Range(1, 1 << 10); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/constant_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/constant_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b9bed595-ebdb-4728-a1ce-8505c533f791
cpp
tensorflow/tensorflow
split_v_op
tensorflow/core/kernels/split_v_op.cc
tensorflow/core/kernels/split_v_op_test.cc
#define EIGEN_USE_THREADS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define EIGEN_USE_GPU #endif #if !defined(PLUGGABLE_DEVICE_SUPPORTED_MACOS) && defined(__APPLE__) && \ !defined(ANDROID) && !defined(__ANDROID__) && \ (!defined(TARGET_OS_IOS) || !TARGET_OS_IOS) #define PLUGGABLE_DEVICE_SUPPORTED_MACOS 1 #endif #include <numeric> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/split_lib.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/util/work_sharder.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h" #include "tensorflow/core/kernels/gpu_device_array.h" #include "tensorflow/core/kernels/split_lib_gpu.h" #include "tensorflow/core/platform/stream_executor.h" #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Device, typename T, typename Tlen> class SplitVOpBase : public OpKernel { public: explicit SplitVOpBase(OpKernelConstruction* c) : OpKernel(c) {} void ComputeEasyCases(OpKernelContext* context, bool* done, std::vector<Tlen>* split_sizes_vec) { const int32_t num_split = context->num_outputs(); const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const Tensor& split_tensor = context->input(1); const Tensor& split_dim_tensor = context->input(2); OP_REQUIRES(context, split_dim_tensor.NumElements() == 1, errors::InvalidArgument("split_dim_tensor must have " "exactly one element.")); const int32_t split_dim_orig = split_dim_tensor.flat<int32>()(0); const int32_t split_dim = split_dim_orig < 0 ? split_dim_orig + input.dims() : split_dim_orig; OP_REQUIRES( context, split_tensor.dims() == 1 && split_tensor.NumElements() == num_split, errors::InvalidArgument("size of the split_tensor must be 1-D and have " "the same elements as outputs got ", split_tensor.dims(), " -D and ", split_tensor.NumElements(), " elements")); auto split_sizes_d = split_tensor.vec<Tlen>(); split_sizes_vec->resize(split_sizes_d.size()); std::copy(split_sizes_d.data(), split_sizes_d.data() + split_sizes_d.size(), split_sizes_vec->begin()); OP_REQUIRES( context, num_split > 0, errors::InvalidArgument( "Number of ways to split should be > 0, but got ", num_split)); OP_REQUIRES( context, 0 <= split_dim && split_dim < input.dims(), errors::InvalidArgument("-input rank(-", input.dims(), ") <= split_dim < input rank (", input.dims(), "), but got ", split_dim_orig)); Tlen input_size_split_dim = input_shape.dim_size(split_dim); if (num_split == 1) { context->set_output(0, context->input(0)); OP_REQUIRES( context, (*split_sizes_vec)[0] == input_size_split_dim, errors::InvalidArgument("If there is only one output, it must have " "the same size as the input. Input size: ", input_size_split_dim, " output size: ", (*split_sizes_vec)[0])); *done = true; return; } int neg_one_dim = -1; Tlen determined_size = 0; for (int d = 0; d < split_sizes_vec->size(); ++d) { Tlen size = (*split_sizes_vec)[d]; if (size == -1) { OP_REQUIRES(context, neg_one_dim == -1, errors::InvalidArgument("There can only be one -1 in the " "input.")); neg_one_dim = d; } else { determined_size += size; } } OP_REQUIRES( context, (neg_one_dim == -1 && determined_size == input_size_split_dim) || (neg_one_dim >= 0 && determined_size <= input_size_split_dim), errors::InvalidArgument("Determined shape must either match " "input shape along split_dim exactly if " "fully specified, or be less than the size of " "the input along split_dim if not fully " "specified. Got: ", determined_size)); if (neg_one_dim >= 0) { (*split_sizes_vec)[neg_one_dim] = input_size_split_dim - determined_size; } for (int i = 0; i < split_sizes_vec->size(); ++i) { const Tlen& split_size = (*split_sizes_vec)[i]; OP_REQUIRES(context, split_size >= Tlen(0), errors::InvalidArgument("Split size at index ", i, " must be >= 0. Got: ", split_size)); } if (SplitHasAlignedOutputsInFirstDimension( input_shape, split_dim, absl::MakeConstSpan(*split_sizes_vec))) { Tlen start = 0; for (int i = 0; i < num_split; ++i) { context->set_output(i, input.Slice(start, start + (*split_sizes_vec)[i])); start += (*split_sizes_vec)[i]; } *done = true; return; } } template <typename IndexType> std::tuple<IndexType, IndexType, IndexType> SetDims( const TensorShape& input_shape, const int32_t split_dim) const { static_assert(std::is_integral<IndexType>::value, "IndexType must be an integer type"); int32_t prefix_dim_size = 1; for (int i = 0; i < split_dim; ++i) { prefix_dim_size *= input_shape.dim_size(i); } IndexType split_dim_size = static_cast<IndexType>(input_shape.dim_size(split_dim)); IndexType suffix_dim_size = 1; for (int i = split_dim + 1; i < input_shape.dims(); ++i) { suffix_dim_size *= static_cast<IndexType>(input_shape.dim_size(i)); } return std::make_tuple(prefix_dim_size, split_dim_size, suffix_dim_size); } private: static bool SplitHasAlignedOutputsInFirstDimension( const TensorShape& input_shape, int32_t split_dim, absl::Span<const Tlen> split_sizes) { if (split_dim != 0) { return false; } Tlen start = 0; for (const Tlen split_size : split_sizes) { if (!IsDim0SliceAligned<T>(input_shape, start, start + split_size)) { return false; } start += split_size; } return true; } }; template <typename T, typename Tlen, typename InputReshapedType, int NDims> class SplitVOpCPUImpl { public: template <typename MakeSizesType, typename ReshapeResultType> void operator()(OpKernelContext* context, const InputReshapedType& input_reshaped, const std::vector<int64_t>& split_start_points, const TensorShape& input_shape, int32_t split_dim, Eigen::DenseIndex prefix_dim_size, Eigen::DenseIndex split_dim_size, Eigen::DenseIndex suffix_dim_size, std::vector<Tlen>& split_sizes_vec, const MakeSizesType& make_sizes, const ReshapeResultType& reshape_result) const { constexpr uint64 kMinimumSplitNum = 4; Eigen::DSizes<Eigen::DenseIndex, NDims> indices; for (int i = 0; i < NDims; ++i) { indices[i] = 0; } const auto num_threads = context->device()->tensorflow_cpu_worker_threads()->num_threads; const auto input_element_count = input_shape.num_elements(); const int num_split = split_start_points.size(); const bool use_parallelism_between_outputs = (num_split >= kMinimumSplitNum && input_element_count >= std::min(num_threads, num_split) * 4096 && input_element_count < num_split * 180 * 1024); auto range_output_func = [&indices, context, &input_shape, split_dim, &split_sizes_vec, &split_start_points, use_parallelism_between_outputs, &input_reshaped, &make_sizes, &reshape_result](int64_t start, int64_t limit) { for (int64_t i = start; i < limit; ++i) { TensorShape output_shape(input_shape); output_shape.set_dim(split_dim, split_sizes_vec[i]); Tensor* result = nullptr; OP_REQUIRES_OK(context, context->allocate_output(i, output_shape, &result)); const auto sizes = make_sizes(split_sizes_vec[i]); if (sizes.TotalSize() > 0) { auto result_shaped = reshape_result(result, split_sizes_vec[i]); auto current_indices = indices; current_indices[NDims - 2] = split_start_points[i]; if (use_parallelism_between_outputs) { result_shaped = input_reshaped.slice(current_indices, sizes); } else { functor::Split<CPUDevice, T, NDims>()( context->eigen_device<CPUDevice>(), result_shaped, input_reshaped, current_indices, sizes); } } } }; if (use_parallelism_between_outputs) { Shard(num_split, context->device()->tensorflow_cpu_worker_threads()->workers, num_split, input_element_count / num_split, range_output_func); } else { range_output_func(0, num_split); } } }; template <typename T, typename Tlen> class SplitVOpCPU : public SplitVOpBase<CPUDevice, T, Tlen> { public: typedef SplitVOpBase<CPUDevice, T, Tlen> Base; explicit SplitVOpCPU(OpKernelConstruction* c) : Base(c) {} void Compute(OpKernelContext* context) override { bool done = false; std::vector<Tlen> split_sizes_vec; Base::ComputeEasyCases(context, &done, &split_sizes_vec); if (!context->status().ok() || done) { return; } const int32_t num_split = Base::num_outputs(); const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const int32_t split_dim_orig = context->input(2).flat<int32>()(0); const int32_t split_dim = split_dim_orig < 0 ? split_dim_orig + input.dims() : split_dim_orig; OP_REQUIRES( context, FastBoundsCheck(input.NumElements(), std::numeric_limits<Eigen::DenseIndex>::max()), errors::InvalidArgument("Split requires input size < ", std::numeric_limits<Eigen::DenseIndex>::max())); Eigen::DenseIndex prefix_dim_size; Eigen::DenseIndex split_dim_size; Eigen::DenseIndex suffix_dim_size; std::tie(prefix_dim_size, split_dim_size, suffix_dim_size) = Base::template SetDims<Eigen::DenseIndex>(input_shape, split_dim); std::vector<int64_t> split_start_points(num_split); for (int i = 0; i < num_split; ++i) { if (i == 0) { split_start_points[i] = 0; } else { split_start_points[i] = split_start_points[i - 1] + split_sizes_vec[i - 1]; } } if (prefix_dim_size == 1) { auto input_reshaped = input.shaped<T, 2>({split_dim_size, suffix_dim_size}); auto make_sizes = [&](Eigen::DenseIndex split_size) { return Eigen::DSizes<Eigen::DenseIndex, 2>{split_size, suffix_dim_size}; }; auto reshape_result = [&](Tensor* result, Tlen split_size) { return result->shaped<T, 2>({split_size, suffix_dim_size}); }; SplitVOpCPUImpl<T, Tlen, decltype(input_reshaped), 2>{}( context, input_reshaped, split_start_points, input_shape, split_dim, prefix_dim_size, split_dim_size, suffix_dim_size, split_sizes_vec, make_sizes, reshape_result); } else { auto input_reshaped = input.shaped<T, 3>( {prefix_dim_size, split_dim_size, suffix_dim_size}); auto make_sizes = [&](Eigen::DenseIndex split_size) { return Eigen::DSizes<Eigen::DenseIndex, 3>{prefix_dim_size, split_size, suffix_dim_size}; }; auto reshape_result = [&](Tensor* result, Tlen split_size) { return result->shaped<T, 3>( {prefix_dim_size, split_size, suffix_dim_size}); }; SplitVOpCPUImpl<T, Tlen, decltype(input_reshaped), 3>{}( context, input_reshaped, split_start_points, input_shape, split_dim, prefix_dim_size, split_dim_size, suffix_dim_size, split_sizes_vec, make_sizes, reshape_result); } } }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T, typename Tlen> class SplitVOpGPU : public SplitVOpBase<GPUDevice, T, Tlen> { public: typedef SplitVOpBase<GPUDevice, T, Tlen> Base; explicit SplitVOpGPU(OpKernelConstruction* c) : Base(c) {} void Compute(OpKernelContext* context) override { bool done = false; std::vector<Tlen> split_sizes_vec; Base::ComputeEasyCases(context, &done, &split_sizes_vec); if (!context->status().ok() || done) { return; } const int32_t num_split = Base::num_outputs(); const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const int32_t split_dim_orig = context->input(2).flat<int32>()(0); const int32_t split_dim = split_dim_orig < 0 ? split_dim_orig + input.dims() : split_dim_orig; OP_REQUIRES( context, FastBoundsCheck(input.NumElements(), std::numeric_limits<int32>::max()), errors::InvalidArgument("Split on GPU requires input size " "< max int32")); int32_t prefix_dim_size; int32_t split_dim_size; int32_t suffix_dim_size; std::tie(prefix_dim_size, split_dim_size, suffix_dim_size) = Base::template SetDims<int32>(input_shape, split_dim); if (num_split > 16) { GpuDeviceArrayOnHost<T*> ptrs(context, num_split); OP_REQUIRES_OK(context, ptrs.Init()); GpuDeviceArrayOnHost<Tlen> offsets(context, num_split + 1); OP_REQUIRES_OK(context, offsets.Init()); Tlen offset = 0; int entry = split_sizes_vec[0]; bool fixed_size = std::all_of(split_sizes_vec.begin(), split_sizes_vec.end(), [&entry](int n) { return n == entry; }); for (int i = 0; i < num_split; ++i) { TensorShape output_shape(input_shape); output_shape.set_dim(split_dim, split_sizes_vec[i]); Tensor* result = nullptr; OP_REQUIRES_OK(context, context->allocate_output(i, output_shape, &result)); ptrs.Set(i, result->flat<T>().data()); offsets.Set(i, offset); offset += split_sizes_vec[i] * suffix_dim_size; } offsets.Set(num_split, offset); OP_REQUIRES_OK(context, ptrs.Finalize()); OP_REQUIRES_OK(context, offsets.Finalize()); if (input.NumElements() > 0) { SplitVOpGPULaunch<T, Tlen>().Run( context->eigen_device<GPUDevice>(), fixed_size, input.flat<T>().data(), prefix_dim_size, input.NumElements() / prefix_dim_size, offsets.data(), ptrs.data()); OP_REQUIRES( context, context->op_device_context()->stream()->ok(), errors::Internal("Launch of gpu kernel for SplitVOp failed")); } } else { Eigen::DenseIndex prefix_dim_size; Eigen::DenseIndex split_dim_size; Eigen::DenseIndex suffix_dim_size; std::tie(prefix_dim_size, split_dim_size, suffix_dim_size) = Base::template SetDims<Eigen::DenseIndex>(input_shape, split_dim); auto input_reshaped = input.shaped<T, 2>( {prefix_dim_size, split_dim_size * suffix_dim_size}); Eigen::DSizes<Eigen::DenseIndex, 2> indices{0, 0}; for (int i = 0; i < num_split; ++i) { TensorShape output_shape(input_shape); output_shape.set_dim(split_dim, split_sizes_vec[i]); Tensor* result = nullptr; OP_REQUIRES_OK(context, context->allocate_output(i, output_shape, &result)); Eigen::DSizes<Eigen::DenseIndex, 2> sizes{ prefix_dim_size, split_sizes_vec[i] * suffix_dim_size}; if (sizes.TotalSize() > 0) { auto result_shaped = result->shaped<T, 2>( {prefix_dim_size, split_sizes_vec[i] * suffix_dim_size}); functor::SplitCustom<GPUDevice, T>()( context->eigen_device<GPUDevice>(), result_shaped, input_reshaped, indices, sizes); } indices[1] += split_sizes_vec[i] * suffix_dim_size; } } } }; #endif #define REGISTER_SPLIT(type, len_type) \ REGISTER_KERNEL_BUILDER(Name("SplitV") \ .Device(DEVICE_CPU) \ .TypeConstraint<len_type>("Tlen") \ .TypeConstraint<type>("T") \ .HostMemory("size_splits") \ .HostMemory("split_dim"), \ SplitVOpCPU<type, len_type>); #define REGISTER_SPLIT_LEN(type) \ REGISTER_SPLIT(type, int8); \ REGISTER_SPLIT(type, int32); \ REGISTER_SPLIT(type, int64_t); TF_CALL_ALL_TYPES(REGISTER_SPLIT_LEN); TF_CALL_float8_e5m2(REGISTER_SPLIT_LEN); TF_CALL_float8_e4m3fn(REGISTER_SPLIT_LEN); TF_CALL_int4(REGISTER_SPLIT_LEN); TF_CALL_uint4(REGISTER_SPLIT_LEN); REGISTER_SPLIT_LEN(quint8) #undef REGISTER_SPLIT_LEN #undef REGISTER_SPLIT #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU(type, len_type) \ REGISTER_KERNEL_BUILDER(Name("SplitV") \ .Device(DEVICE_GPU) \ .TypeConstraint<len_type>("Tlen") \ .TypeConstraint<type>("T") \ .HostMemory("size_splits") \ .HostMemory("split_dim"), \ SplitVOpGPU<type, len_type>); #define REGISTER_GPU_LEN(type) \ REGISTER_GPU(type, int8); \ REGISTER_GPU(type, int32); \ REGISTER_GPU(type, int64_t); TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_LEN); TF_CALL_COMPLEX_TYPES(REGISTER_GPU_LEN); #undef REGISTER_GPU_LEN #undef REGISTER_GPU #define REGISTER_GPU_int32(len_type) \ REGISTER_KERNEL_BUILDER(Name("SplitV") \ .Device(DEVICE_GPU) \ .TypeConstraint<int32>("T") \ .TypeConstraint<len_type>("Tlen") \ .HostMemory("size_splits") \ .HostMemory("split_dim") \ .HostMemory("value") \ .HostMemory("output"), \ SplitVOpCPU<int32, len_type>); REGISTER_GPU_int32(int32); REGISTER_GPU_int32(int64_t); #undef REGISTER_GPU_int32 #if defined(PLUGGABLE_DEVICE_SUPPORTED_MACOS) #define REGISTER_DEFAULT_KERNEL(len_type) \ REGISTER_KERNEL_BUILDER(Name("SplitV") \ .Device(DEVICE_DEFAULT) \ .TypeConstraint<int32>("T") \ .TypeConstraint<len_type>("Tlen") \ .HostMemory("size_splits") \ .HostMemory("split_dim") \ .HostMemory("value") \ .HostMemory("output"), \ SplitVOpCPU<int32, len_type>); TF_CALL_int32(REGISTER_DEFAULT_KERNEL); TF_CALL_int64(REGISTER_DEFAULT_KERNEL); #undef REGISTER_DEFAULT_KERNEL #endif #endif }
#include <stdlib.h> #include <initializer_list> #include <iterator> #include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/bfloat16.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static std::vector<int64_t> GenerateRandomIntsWithSum(int64_t sum, int count) { CHECK_GE(count, 1); CHECK_GE(sum, count); std::vector<int64_t> temp(count); for (int i = 0; i + 1 < count; ++i) { temp[i] = lrand48() % (sum - count); } temp[count - 1] = sum - count; std::sort(temp.begin(), std::prev(temp.end())); std::vector<int64_t> result(count); std::adjacent_difference(temp.begin(), temp.end(), result.begin()); for (int i = 0; i < count; ++i) { ++result[i]; } CHECK(std::all_of(result.begin(), result.end(), [sum](int64_t x) { return x >= 1 && x <= sum; })); CHECK_EQ( std::accumulate(result.begin(), result.end(), static_cast<int64_t>(0)), sum); CHECK_EQ(result.size(), count); return result; } static Graph* MakeGraph(int split_dim, const std::vector<int64_t>& size_splits, std::initializer_list<int64_t> total_size) { Graph* g = new Graph(OpRegistry::Global()); TensorShape in_shape(total_size); Tensor in(DataTypeToEnum<float>::value, in_shape); in.flat<float>().setRandom(); Tensor split_dim_tensor = test::AsScalar<int32>(split_dim); Tensor size_splits_tensor = test::AsTensor<int64_t>(size_splits); Node* splitv; TF_CHECK_OK(NodeBuilder(g->NewName("splitv"), "SplitV") .Input(test::graph::Constant(g, in)) .Input(test::graph::Constant(g, size_splits_tensor)) .Input(test::graph::Constant(g, split_dim_tensor)) .Attr("num_split", static_cast<int64_t>(size_splits.size())) .Finalize(g, &splitv)); return g; } #define BM_SPLITV_1D(num_split, total_size) \ static void BM_SplitV_1d_##num_split##_##total_size( \ ::testing::benchmark::State& state) { \ auto label = \ strings::Printf("1-D %d chunks totaling %d", num_split, total_size); \ state.SetLabel(label); \ auto g = MakeGraph( 0, \ GenerateRandomIntsWithSum(total_size, num_split), \ {total_size}); \ test::Benchmark("cpu", g, false).Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \ total_size); \ } \ BENCHMARK(BM_SplitV_1d_##num_split##_##total_size)->UseRealTime(); #define BM_SPLITV_2D(split_dim, num_split, dim0, dim1) \ static void BM_SplitV_2d_##split_dim##_##num_split##dim0##dim1( \ ::testing::benchmark::State& state) { \ std::vector<int64_t> total_size_vec{dim0, dim1}; \ auto label = strings::Printf("2-D %d chunks in dim %d totaling (%d * %d)", \ num_split, split_dim, dim0, dim1); \ state.SetLabel(label); \ auto g = MakeGraph( \ split_dim, \ GenerateRandomIntsWithSum(total_size_vec[split_dim], num_split), \ {dim0, dim1}); \ test::Benchmark("cpu", g, false).Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * dim0 * \ dim1); \ } \ BENCHMARK(BM_SplitV_2d_##split_dim##_##num_split##dim0##dim1)->UseRealTime(); #define BM_SPLITV_3D(split_dim, num_split, dim0, dim1, dim2) \ static void BM_SplitV_3d_##split_dim##_##num_split##dim0##dim1##dim2( \ ::testing::benchmark::State& state) { \ std::vector<int64_t> total_size_vec{dim0, dim1, dim2}; \ auto label = \ strings::Printf("3-D %d chunks in dim %d totaling (%d * %d * %d)", \ num_split, split_dim, dim0, dim1, dim2); \ state.SetLabel(label); \ auto g = MakeGraph( \ split_dim, \ GenerateRandomIntsWithSum(total_size_vec[split_dim], num_split), \ {dim0, dim1, dim2}); \ test::Benchmark("cpu", g, false).Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * dim0 * \ dim1 * dim2); \ } \ BENCHMARK(BM_SplitV_3d_##split_dim##_##num_split##dim0##dim1##dim2) \ ->UseRealTime(); BM_SPLITV_1D(5, 20); BM_SPLITV_1D(262144, 1000000); BM_SPLITV_1D(1, 100000); BM_SPLITV_1D(5, 100000); BM_SPLITV_1D(5, 250000); BM_SPLITV_1D(5, 500000); BM_SPLITV_1D(5, 1000000); BM_SPLITV_1D(10, 4194304); BM_SPLITV_1D(2, 4194304); BM_SPLITV_1D(100, 10240); BM_SPLITV_1D(32768, 1048576); BM_SPLITV_2D(0, 1024, 10247, 10); BM_SPLITV_2D(0, 1024, 100000, 10); BM_SPLITV_2D(0, 512, 1024, 256); BM_SPLITV_2D(0, 20, 100000, 5); BM_SPLITV_2D(0, 2, 7, 524288); BM_SPLITV_2D(0, 100, 4096, 512); BM_SPLITV_2D(1, 1024, 15, 10240); BM_SPLITV_2D(1, 1024, 10, 100000); BM_SPLITV_2D(1, 512, 1024, 2563); BM_SPLITV_2D(1, 20, 100000, 52); BM_SPLITV_2D(1, 2, 3, 524288); BM_SPLITV_2D(1, 100, 4096, 512); BM_SPLITV_3D(0, 1024, 10247, 10, 1024); BM_SPLITV_3D(0, 987, 1000, 10, 512); BM_SPLITV_3D(0, 512, 1024, 256, 128); BM_SPLITV_3D(0, 20, 100000, 5, 256); BM_SPLITV_3D(0, 2, 7, 524288, 10); BM_SPLITV_3D(0, 100, 4096, 512, 1); BM_SPLITV_3D(1, 1024, 15, 10240, 1024); BM_SPLITV_3D(1, 512, 10, 1024, 512); BM_SPLITV_3D(1, 512, 1024, 2563, 128); BM_SPLITV_3D(1, 20, 1000, 52, 256); BM_SPLITV_3D(1, 2, 3, 524288, 10); BM_SPLITV_3D(1, 100, 4096, 512, 1); BM_SPLITV_3D(2, 512, 15, 10240, 1024); BM_SPLITV_3D(2, 128, 10, 1000, 512); BM_SPLITV_3D(2, 63, 1024, 2563, 128); BM_SPLITV_3D(2, 20, 1000, 52, 256); BM_SPLITV_3D(2, 2, 3, 524288, 10); BM_SPLITV_3D(2, 1, 4096, 512, 1); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/split_v_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/split_v_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
83a1dfda-633f-4317-b2d6-68d361a98e54
cpp
tensorflow/tensorflow
cross_op
tensorflow/compiler/tf2xla/kernels/cross_op.cc
tensorflow/core/kernels/cross_op_test.cc
#include <vector> #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" namespace tensorflow { namespace { class CrossOp : public XlaOpKernel { public: explicit CrossOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* ctx) override { TensorShape in0_shape = ctx->InputShape(0); TensorShape in1_shape = ctx->InputShape(1); OP_REQUIRES(ctx, in0_shape == in1_shape, errors::InvalidArgument("Both inputs must be of same shape: ", in0_shape.DebugString(), " vs. ", in1_shape.DebugString())); OP_REQUIRES(ctx, in0_shape.dims() >= 1, errors::InvalidArgument("Input must be at least 1D", in0_shape.DebugString())); auto inner_dim = in0_shape.dim_size(in0_shape.dims() - 1); OP_REQUIRES(ctx, inner_dim == 3, errors::FailedPrecondition( "Cross-products are only defined for 3-element vectors.")); std::vector<int64_t> starts(in0_shape.dims(), 0); std::vector<int64_t> limits; const auto& dim_sizes = in0_shape.dim_sizes(); limits.reserve(dim_sizes.size()); for (auto dim_size : in0_shape.dim_sizes()) { limits.push_back(dim_size); } std::vector<int64_t> strides(in0_shape.dims(), 1); xla::XlaBuilder* b = ctx->builder(); auto in0 = ctx->Input(0); auto in1 = ctx->Input(1); starts.back() = 0; limits.back() = 1; auto u1 = xla::Slice(in0, starts, limits, strides); auto v1 = xla::Slice(in1, starts, limits, strides); starts.back() = 1; limits.back() = 2; auto u2 = xla::Slice(in0, starts, limits, strides); auto v2 = xla::Slice(in1, starts, limits, strides); starts.back() = 2; limits.back() = 3; auto u3 = xla::Slice(in0, starts, limits, strides); auto v3 = xla::Slice(in1, starts, limits, strides); auto s1 = xla::Sub(xla::Mul(u2, v3), xla::Mul(u3, v2)); auto s2 = xla::Sub(xla::Mul(u3, v1), xla::Mul(u1, v3)); auto s3 = xla::Sub(xla::Mul(u1, v2), xla::Mul(u2, v1)); auto output = xla::ConcatInDim(b, {s1, s2, s3}, in0_shape.dims() - 1); ctx->SetOutput(0, output); } private: CrossOp(const CrossOp&) = delete; void operator=(const CrossOp&) = delete; }; REGISTER_XLA_OP(Name("Cross"), CrossOp); } }
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class CrossOpTest : public OpsTestBase { protected: CrossOpTest() { TF_EXPECT_OK(NodeDefBuilder("cross_op", "Cross") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TEST_F(CrossOpTest, Zero) { AddInputFromArray<float>(TensorShape({3}), {0, 0, 0}); AddInputFromArray<float>(TensorShape({3}), {0, 0, 0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected, {0, 0, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CrossOpTest, RightHandRule) { AddInputFromArray<float>(TensorShape({2, 3}), {1, 0, 0, 0, 1, 0}); AddInputFromArray<float>(TensorShape({2, 3}), {0, 1, 0, 1, 0, 0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected, {{0, 0, 1, 0, 0, -1}}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CrossOpTest, ArbitraryNonintegral) { const float u1 = -0.669, u2 = -0.509, u3 = 0.125; const float v1 = -0.477, v2 = 0.592, v3 = -0.110; const float s1 = u2 * v3 - u3 * v2; const float s2 = u3 * v1 - u1 * v3; const float s3 = u1 * v2 - u2 * v1; AddInputFromArray<float>(TensorShape({3}), {u1, u2, u3}); AddInputFromArray<float>(TensorShape({3}), {v1, v2, v3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected, {s1, s2, s3}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-6); } class CrossOpIntTest : public OpsTestBase { protected: CrossOpIntTest() { TF_EXPECT_OK(NodeDefBuilder("cross_int_op", "Cross") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TEST_F(CrossOpIntTest, RightHandRule) { AddInputFromArray<int>(TensorShape({2, 3}), {2, 0, 0, 0, 2, 0}); AddInputFromArray<int>(TensorShape({2, 3}), {0, 2, 0, 2, 0, 0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2, 3})); test::FillValues<int>(&expected, {{0, 0, 4, 0, 0, -4}}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/cross_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/cross_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3ca3fc07-51cc-43e8-9de3-a65284addc16
cpp
tensorflow/tensorflow
quantize_op
tensorflow/core/kernels/quantize_op.cc
tensorflow/core/kernels/quantize_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/type_traits.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/cwise_ops.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" namespace { enum { QUANTIZE_MODE_MIN_COMBINED, QUANTIZE_MODE_MIN_FIRST, QUANTIZE_MODE_SCALED, }; enum { ROUND_HALF_AWAY_FROM_ZERO, ROUND_HALF_TO_EVEN, }; } namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename Device, typename T> class QuantizeV2Op : public OpKernel { public: explicit QuantizeV2Op(OpKernelConstruction* ctx) : OpKernel(ctx) { half_range_ = !std::is_signed<T>::value ? 0.0f : (static_cast<double>(std::numeric_limits<T>::max()) - static_cast<double>(std::numeric_limits<T>::min()) + 1) / 2.0f; string mode_string; OP_REQUIRES_OK(ctx, ctx->GetAttr("mode", &mode_string)); OP_REQUIRES(ctx, (mode_string == "MIN_COMBINED" || mode_string == "MIN_FIRST" || mode_string == "SCALED"), errors::InvalidArgument("Mode string must be 'MIN_COMBINED'," " 'MIN_FIRST', or 'SCALED', is '" + mode_string + "'")); if (mode_string == "MIN_COMBINED") { mode_ = QUANTIZE_MODE_MIN_COMBINED; } else if (mode_string == "MIN_FIRST") { mode_ = QUANTIZE_MODE_MIN_FIRST; } else if (mode_string == "SCALED") { mode_ = QUANTIZE_MODE_SCALED; } string round_mode_string; OP_REQUIRES_OK(ctx, ctx->GetAttr("round_mode", &round_mode_string)); OP_REQUIRES(ctx, (round_mode_string == "HALF_AWAY_FROM_ZERO" || round_mode_string == "HALF_TO_EVEN"), errors::InvalidArgument("Round mode string must be " "'HALF_AWAY_FROM_ZERO' or " "'HALF_TO_EVEN', is '" + round_mode_string + "'")); if (round_mode_string == "HALF_AWAY_FROM_ZERO") { round_mode_ = ROUND_HALF_AWAY_FROM_ZERO; } else if (round_mode_string == "HALF_TO_EVEN") { OP_REQUIRES(ctx, mode_string == "SCALED", errors::InvalidArgument("Round mode 'HALF_TO_EVEN' " "only supported for mode 'SCALED', " "b ut mode is '" + mode_string + "'.")); round_mode_ = ROUND_HALF_TO_EVEN; } OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_)); OP_REQUIRES_OK( ctx, ctx->GetAttr("ensure_minimum_range", &ensure_minimum_range_)); } void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); const Tensor& input_min_range = ctx->input(1); const Tensor& input_max_range = ctx->input(2); int num_slices = 1; if (axis_ > -1) { OP_REQUIRES( ctx, input.dims() > axis_, errors::InvalidArgument( "Axis is on a zero-based index, so its value must always be less " "than number of input's dims, but given axis value was ", axis_, " and input's dims was ", input.dims())); num_slices = input.dim_size(axis_); OP_REQUIRES(ctx, input_min_range.dims() == 1, errors::InvalidArgument( "If axis is specified, min_range must be a 1-D tensor " "whose size matches the axis dimension of the input and " "output tensors, but min_range dims are ", input_min_range.dims())); OP_REQUIRES(ctx, input_min_range.dim_size(0) == num_slices, errors::InvalidArgument( "If axis is specified, min_range must be a 1-D tensor " "whose size matches the axis dimension of the input and " "output tensors, but min_range is a 1-D tensor of size ", input_min_range.dim_size(0), " and input's axis dimension is of size ", num_slices)); OP_REQUIRES(ctx, input_max_range.dims() == 1, errors::InvalidArgument( "If axis is specified, max_range must be a 1-D tensor " "whose size matches the axis dimension of the input and " "output tensors, but max_range dims are ", input_max_range.dims())); OP_REQUIRES(ctx, input_max_range.dim_size(0) == num_slices, errors::InvalidArgument( "If axis is specified, max_range must be a 1-D tensor " "whose size matches the axis dimension of the input and " "output tensors, but max_range is a 1-D tensor of size ", input_max_range.dim_size(0), " and input's axis dimension is of size ", num_slices)); } else { OP_REQUIRES(ctx, input_min_range.NumElements() == 1, errors::InvalidArgument( "If axis is not specified, min_range must contain a " "single float element, but it contains ", input_min_range.NumElements(), " elements")); OP_REQUIRES(ctx, input_max_range.NumElements() == 1, errors::InvalidArgument( "If axis is not specified, max_range must contain a " "single float element, but it contains ", input_max_range.NumElements(), " elements")); } const TensorShape& minmax_shape = ctx->input(1).shape(); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output)); Tensor* output_min_tensor = nullptr; Tensor* output_max_tensor = nullptr; if (num_slices == 1) { OP_REQUIRES_OK(ctx, ctx->allocate_output(1, {}, &output_min_tensor)); OP_REQUIRES_OK(ctx, ctx->allocate_output(2, {}, &output_max_tensor)); const float min_range = input_min_range.template flat<float>()(0); const float max_range = input_max_range.template flat<float>()(0); QuantizeTensor(ctx, input, min_range, max_range, output, output_min_tensor, output_max_tensor); return; } OP_REQUIRES(ctx, mode_ != QUANTIZE_MODE_MIN_FIRST, errors::Unimplemented("MIN_FIRST mode is not implemented for " "Quantize with axis != -1.")); OP_REQUIRES_OK(ctx, ctx->allocate_output(1, minmax_shape, &output_min_tensor)); OP_REQUIRES_OK(ctx, ctx->allocate_output(2, minmax_shape, &output_max_tensor)); auto input_tensor = input.template flat_inner_outer_dims<float, 3>(axis_ - 1); int64_t pre_dim = 1, post_dim = 1; for (int i = 0; i < axis_; ++i) { pre_dim *= output->dim_size(i); } for (int i = axis_ + 1; i < output->dims(); ++i) { post_dim *= output->dim_size(i); } auto output_tensor = output->template bit_casted_shaped<T, 3>( {pre_dim, num_slices, post_dim}); auto min_ranges = input_min_range.template vec<float>(); auto max_ranges = input_max_range.template vec<float>(); for (int i = 0; i < num_slices; ++i) { QuantizeSlice(ctx->eigen_device<Device>(), ctx, input_tensor.template chip<1>(i), min_ranges(i), max_ranges(i), output_tensor.template chip<1>(i), &output_min_tensor->flat<float>()(i), &output_max_tensor->flat<float>()(i)); } } void QuantizeTensor(OpKernelContext* ctx, const Tensor& input, const float input_min_range, const float input_max_range, Tensor* output, Tensor* output_min_tensor, Tensor* output_max_tensor) { OP_REQUIRES(ctx, !(input_max_range < input_min_range), errors::InvalidArgument( "input_max_range must be larger than input_min_range.")); float min_range = std::min(0.0f, input_min_range); const float epsilon = std::max(1.0f, std::max(fabsf(input_min_range), fabsf(input_max_range))) * ensure_minimum_range_; float max_range = std::max(0.0f, std::max(input_max_range, min_range + epsilon)); if (mode_ == QUANTIZE_MODE_MIN_FIRST) { if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) { TTypes<const float>::Vec input_array = input.flat<float>(); meta::Quantize(ctx, input_array.data(), input_array.size(), min_range, max_range, output->flat<quint8>().data()); } else { FloatTensorToQuantizedInPlaceUsingEigen<T>( ctx->template eigen_device<Device>(), input, min_range, max_range, output); } output_min_tensor->flat<float>()(0) = min_range; output_max_tensor->flat<float>()(0) = max_range; } else { QuantizeSlice(ctx->eigen_device<Device>(), ctx, input.flat<float>(), input_min_range, input_max_range, output->template flat<T>(), &output_min_tensor->flat<float>()(0), &output_max_tensor->flat<float>()(0)); } } template <typename ConstVec, typename Vec> void QuantizeSlice(const Device& d, OpKernelContext* ctx, const ConstVec& input, float input_min_range, float input_max_range, Vec output, float* output_min_range, float* output_max_range) { OP_REQUIRES(ctx, !(input_max_range < input_min_range), errors::InvalidArgument( "input_max_range must be larger than input_min_range.")); float min_range = std::min(0.0f, input_min_range); const float epsilon = std::max(1.0f, std::max(fabsf(input_min_range), fabsf(input_max_range))) * ensure_minimum_range_; float max_range = std::max(0.0f, std::max(input_max_range, min_range + epsilon)); if (mode_ == QUANTIZE_MODE_MIN_COMBINED) { const float scale_factor = (static_cast<double>(std::numeric_limits<T>::max()) - static_cast<double>(std::numeric_limits<T>::min())) / (max_range - min_range); bool is_signed = std::is_signed<T>::value; if (is_signed) { output.device(d) = ((input.cwiseMin(max_range).cwiseMax(min_range) - min_range) * scale_factor - half_range_) .round() .template cast<T>(); } else { output.device(d) = ((input.cwiseMin(max_range).cwiseMax(min_range) - min_range) * scale_factor + 0.5f) .template cast<T>(); } } else if (mode_ == QUANTIZE_MODE_SCALED) { const int min_output_value = std::numeric_limits<T>::min() + (narrow_range_ ? 1 : 0); const int max_output_value = std::numeric_limits<T>::max(); const float scale_factor_from_min_side = (min_output_value * min_range > 0) ? min_output_value / min_range : std::numeric_limits<float>::max(); const float scale_factor_from_max_side = (max_output_value * max_range > 0) ? max_output_value / max_range : std::numeric_limits<float>::max(); const float scale_factor = std::min(scale_factor_from_min_side, scale_factor_from_max_side); min_range = min_output_value / scale_factor; max_range = max_output_value / scale_factor; if (round_mode_ == ROUND_HALF_TO_EVEN) { output.device(d) = (input.cwiseMin(max_range).cwiseMax(min_range) * scale_factor) .unaryExpr( Eigen::internal::scalar_round_half_to_even_op<float>()) .template cast<T>(); } else if (round_mode_ == ROUND_HALF_AWAY_FROM_ZERO) { output.device(d) = (input.cwiseMin(max_range).cwiseMax(min_range) * scale_factor) .round() .template cast<T>(); } } *output_min_range = min_range; *output_max_range = max_range; } private: float half_range_; float ensure_minimum_range_; int mode_; int round_mode_; int axis_; bool narrow_range_; }; REGISTER_KERNEL_BUILDER( Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<quint8>("T"), QuantizeV2Op<CPUDevice, quint8>); REGISTER_KERNEL_BUILDER( Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint8>("T"), QuantizeV2Op<CPUDevice, qint8>); REGISTER_KERNEL_BUILDER( Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<quint16>("T"), QuantizeV2Op<CPUDevice, quint16>); REGISTER_KERNEL_BUILDER( Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint16>("T"), QuantizeV2Op<CPUDevice, qint16>); REGISTER_KERNEL_BUILDER( Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint32>("T"), QuantizeV2Op<CPUDevice, qint32>); }
#include <random> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { class QuantizedOpTest : public OpsTestBase { protected: }; struct ParameterizedQuantizeOpTest : public OpsTestBase, public ::testing::WithParamInterface<int> { }; TEST_F(QuantizedOpTest, QuantizeV2) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "MIN_FIRST") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({7}), {0.0, 1.0, 1.25, 1.75, 127.0, 255.0, 500.0}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape({7})); test::FillValues<quint8>(&expected, {0, 1, 1, 2, 127, 255, 255}); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); } template <typename T> std::vector<T> ScalePerSliceAlongAxis(std::vector<int64_t> dims, int axis, const std::vector<T>& data) { uint32 seed = 123; std::minstd_rand rng(seed); int64_t out_size = 1; for (int dim : dims) { out_size *= dim; } int minor_size = 1; for (int i = axis + 1; i < dims.size(); ++i) { minor_size *= dims[i]; } std::vector<T> out(out_size); int num_slices = (axis == -1) ? 1 : dims[axis]; for (int out_idx = 0; out_idx < out_size; ++out_idx) { int in_idx = rng() % data.size(); T multiplier = ((out_idx / minor_size) % num_slices) + 1; out[out_idx] = data[in_idx] * multiplier; } return out; } TEST_P(ParameterizedQuantizeOpTest, QuantizeV2Quint8Scaled) { const int axis = GetParam(); TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "SCALED") .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {2, 3, 4, 5}; int num_slices = (axis == -1) ? 1 : dims[axis]; AddInputFromArray<float>( TensorShape(dims), ScalePerSliceAlongAxis<float>( dims, axis, {-255.0, 0.0, 1.0, 1.25, 1.75, 64.0, 127.0, 500.0})); std::vector<float> min_ranges(num_slices), max_ranges(num_slices); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { min_ranges[slice_idx] = (slice_idx + 1) * -255.0; max_ranges[slice_idx] = (slice_idx + 1) * 127.0; } AddInputFromArray<float>(TensorShape({num_slices}), min_ranges); AddInputFromArray<float>(TensorShape({num_slices}), max_ranges); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape(dims)); test::FillValues<quint8>( &expected, ScalePerSliceAlongAxis<quint8>(dims, -1, {0, 0, 2, 3, 4, 129, 255, 255})); auto output_min = *GetOutput(1); auto output_max = *GetOutput(2); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { EXPECT_EQ(output_min.flat<float>()(slice_idx), 0); EXPECT_EQ(output_max.flat<float>()(slice_idx), 127.0 * (slice_idx + 1)); } auto output = *GetOutput(0); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); } TEST_F(QuantizedOpTest, QuantizeV2Quint8ScaledSmallInputRange) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "SCALED") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({3}), {-1.0, 0.0, 2.0}); AddInputFromArray<float>(TensorShape({1}), {-1.0f}); AddInputFromArray<float>(TensorShape({1}), {2.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape({3})); test::FillValues<quint8>(&expected, {0, 0, 255}); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_min, {0.0}); test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1)); Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_max, {2.0}); test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2)); } TEST_P(ParameterizedQuantizeOpTest, QuantizeV2Qint8Scaled) { const int axis = GetParam(); TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<qint8>::v()) .Attr("mode", "SCALED") .Attr("narrow_range", false) .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {2, 3, 4, 5}; int num_slices = (axis == -1) ? 1 : dims[axis]; AddInputFromArray<float>( TensorShape(dims), ScalePerSliceAlongAxis<float>( dims, axis, {-128.0, 0.0, 1.0, 1.25, 1.75, 64.0, 127.0})); std::vector<float> min_ranges(num_slices), max_ranges(num_slices); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { min_ranges[slice_idx] = (slice_idx + 1) * -128.0; max_ranges[slice_idx] = (slice_idx + 1) * 100.0; } AddInputFromArray<float>(TensorShape({num_slices}), min_ranges); AddInputFromArray<float>(TensorShape({num_slices}), max_ranges); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape(dims)); test::FillValues<qint8>( &expected, ScalePerSliceAlongAxis<qint8>(dims, -1, {-128, 0, 1, 1, 2, 64, 127})); auto output_min = *GetOutput(1); auto output_max = *GetOutput(2); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { EXPECT_EQ(output_min.flat<float>()(slice_idx), -128.0 * (slice_idx + 1)); EXPECT_EQ(output_max.flat<float>()(slice_idx), 127.0 * (slice_idx + 1)); } auto output = *GetOutput(0); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } TEST_P(ParameterizedQuantizeOpTest, QuantizeV2Qint8ScaledNarrowRange) { const int axis = GetParam(); TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<qint8>::v()) .Attr("mode", "SCALED") .Attr("narrow_range", true) .Attr("axis", axis) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const std::vector<int64_t> dims = {2, 3, 4, 5}; int num_slices = (axis == -1) ? 1 : dims[axis]; AddInputFromArray<float>( TensorShape(dims), ScalePerSliceAlongAxis<float>( dims, axis, {-128.0, 0.0, 1.0, 1.25, 1.75, 64.0, 127.0})); std::vector<float> min_ranges(num_slices), max_ranges(num_slices); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { min_ranges[slice_idx] = (slice_idx + 1) * -128.0; max_ranges[slice_idx] = (slice_idx + 1) * 100.0; } AddInputFromArray<float>(TensorShape({num_slices}), min_ranges); AddInputFromArray<float>(TensorShape({num_slices}), max_ranges); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape(dims)); test::FillValues<qint8>( &expected, ScalePerSliceAlongAxis<qint8>(dims, -1, {-127, 0, 1, 1, 2, 64, 126})); auto output_min = *GetOutput(1); auto output_max = *GetOutput(2); for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) { EXPECT_EQ(output_min.flat<float>()(slice_idx), -128.0 * (slice_idx + 1)); EXPECT_EQ(output_max.flat<float>()(slice_idx), 128.0 * (slice_idx + 1)); } auto output = *GetOutput(0); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } INSTANTIATE_TEST_SUITE_P(All, ParameterizedQuantizeOpTest, ::testing::Values(-1, 1, 3)); TEST_F(QuantizedOpTest, QuantizeV2Qint8ScaledSmallInputRange) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<qint8>::v()) .Attr("mode", "SCALED") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({3}), {-0.064, 0.0, 0.127}); AddInputFromArray<float>(TensorShape({1}), {-0.064f}); AddInputFromArray<float>(TensorShape({1}), {0.127f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({3})); test::FillValues<qint8>(&expected, {-64, 0, 127}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_min, {-0.128}); test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1)); Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_max, {0.127}); test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2)); } TEST_F(QuantizedOpTest, QuantizeV2Qint8ScaledRoundToEven) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<qint8>::v()) .Attr("mode", "SCALED") .Attr("round_mode", "HALF_TO_EVEN") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({7}), {-126.5, 0.0, 1.0, 2.5, 3.5, 64.0, 127.0}); AddInputFromArray<float>(TensorShape({1}), {-128.0f}); AddInputFromArray<float>(TensorShape({1}), {-128.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({7})); test::FillValues<qint8>(&expected, {-126, 0, 1, 2, 4, 64, 127}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_min, {-128.0}); test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1)); Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_max, {127.0}); test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2)); } TEST_F(QuantizedOpTest, QuantizeV2Qint8ScaledRoundAwayFromZero) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<qint8>::v()) .Attr("mode", "SCALED") .Attr("round_mode", "HALF_AWAY_FROM_ZERO") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({7}), {-126.5, 0.0, 1.0, 2.5, 3.5, 64.0, 127.0}); AddInputFromArray<float>(TensorShape({1}), {-128.0f}); AddInputFromArray<float>(TensorShape({1}), {-128.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({7})); test::FillValues<qint8>(&expected, {-127, 0, 1, 3, 4, 64, 127}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_min, {-128.0}); test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1)); Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_output_max, {127.0}); test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2)); } TEST_F(QuantizedOpTest, QuantizeV2_32Bit) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<qint32>::v()) .Attr("mode", "MIN_FIRST") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int element_count = 8; AddInputFromArray<float>( TensorShape({element_count}), {-500.0f, 0.0f, 1.0f, 1.25f, 1.75f, 127.0f, 255.0f, 500.0f}); AddInputFromArray<float>(TensorShape({1}), {-256.0f}); AddInputFromArray<float>(TensorShape({1}), {256.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({element_count})); test::FillValues<qint32>(&expected, { std::numeric_limits<int32>::min(), 0, static_cast<int32>(1.0f * (1 << 23)), static_cast<int32>(1.25f * (1 << 23)), static_cast<int32>(1.75f * (1 << 23)), static_cast<int32>(127.0f * (1 << 23)), static_cast<int32>(255.0f * (1 << 23)), std::numeric_limits<int32>::max(), }); const int64_t epsilon = 1 << 8; const qint32* output_data = GetOutput(0)->flat<qint32>().data(); const qint32* expected_data = expected.flat<qint32>().data(); for (int i = 0; i < element_count; ++i) { const int64_t delta = output_data[i] - expected_data[i]; EXPECT_GT(epsilon, std::abs(delta)) << "output_data[" << i << "]=" << output_data[i] << ", expected_data[" << i << "]=" << expected_data[i] << ", delta=" << delta; } } TEST_F(QuantizedOpTest, QuantizeV2Ports) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "MIN_FIRST") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({6}), {1.0, 1.25, 1.75, 127.0, 255.0, 500.0}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&expected, {1, 1, 2, 127, 255, 255}); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); EXPECT_NEAR(0.0f, output_min, 1e-5f); EXPECT_NEAR(255.0f, output_max, 1e-5f); } TEST_F(QuantizedOpTest, QuantizeV2EqualRange) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "MIN_FIRST") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({6}), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}); AddInputFromArray<float>(TensorShape({1}), {0.0f}); AddInputFromArray<float>(TensorShape({1}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&expected, {0, 0, 0, 0, 0, 0}); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); EXPECT_NEAR(0.0f, output_min, 1e-5f); EXPECT_LT(0.0f, output_max); } TEST_F(QuantizedOpTest, QuantizeV2MovesMinToIncludeZero) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "MIN_FIRST") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({3}), {0.1, 0.2, 0.3}); AddInputFromArray<float>(TensorShape({1}), {0.1}); AddInputFromArray<float>(TensorShape({1}), {0.3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape({3})); test::FillValues<quint8>(&expected, {85, 170, 255}); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); EXPECT_NEAR(0.0f, output_min, 1e-5f); EXPECT_NEAR(0.3f, output_max, 1e-5f); } TEST_F(QuantizedOpTest, QuantizeV2MovesMaxToIncludeZero) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "MIN_FIRST") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({3}), {-0.1, -0.2, -0.3}); AddInputFromArray<float>(TensorShape({1}), {-0.3}); AddInputFromArray<float>(TensorShape({1}), {-0.1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape({3})); test::FillValues<quint8>(&expected, {170, 85, 0}); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); EXPECT_NEAR(-0.3f, output_min, 1e-5f); EXPECT_NEAR(0.0f, output_max, 1e-5f); } TEST_F(QuantizedOpTest, Dequantize) { TF_ASSERT_OK(NodeDefBuilder("dequantize_op", "Dequantize") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("mode", "MIN_FIRST") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<quint8>(TensorShape({6}), {1, 2, 4, 8, 16, 255}); AddInputFromArray<float>(TensorShape({1}), {0}); AddInputFromArray<float>(TensorShape({1}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({6})); test::FillValues<float>(&expected, {1.0, 2.0, 4.0, 8.0, 16.0, 255.0}); test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.5); } TEST_F(QuantizedOpTest, QuantizeV2DisableEnsureMinimumRange) { TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<qint8>::v()) .Attr("mode", "MIN_FIRST") .Attr("ensure_minimum_range", 0.0f) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({3}), {-0.000001, 0.0, 0.000042}); AddInputFromArray<float>(TensorShape({1}), {-0.000128}); AddInputFromArray<float>(TensorShape({1}), {0.000127}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({3})); test::FillValues<qint8>(&expected, {-1, 0, 42}); for (int i = 0; i < 3; ++i) { LOG(INFO) << GetOutput(0)->flat<qint8>()(i); } test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); LOG(INFO) << "output_min = " << output_min; LOG(INFO) << "output_max = " << output_max; EXPECT_NEAR(-0.000128f, output_min, 1e-7f); EXPECT_NEAR(0.000127, output_max, 1e-7f); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantize_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantize_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2facd601-ccda-4a4e-bc77-bdae4246546d
cpp
tensorflow/tensorflow
in_topk_op
tensorflow/compiler/tf2xla/kernels/in_topk_op.cc
tensorflow/core/kernels/in_topk_op_test.cc
#include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/arithmetic.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { class InTopKOp : public XlaOpKernel { public: explicit InTopKOp(OpKernelConstruction* context) : XlaOpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("T", &targets_dtype_)); OP_REQUIRES_OK(context, DataTypeToPrimitiveType(targets_dtype_, &targets_type_)); } void Compile(XlaOpKernelContext* context) override { int64_t k; OP_REQUIRES_OK(context, context->ConstantInputAsIntScalar(2, &k)); OP_REQUIRES(context, k >= 0, errors::InvalidArgument("Need k >= 0, got ", k)); const TensorShape predictions_shape = context->InputShape(0); OP_REQUIRES( context, predictions_shape.dims() == 2, errors::InvalidArgument("predictions must be == 2-D, got shape ", predictions_shape.DebugString())); const TensorShape targets_shape = context->InputShape(1); OP_REQUIRES(context, targets_shape.dims() == 1, errors::InvalidArgument("targets must be == 1-D, got shape ", targets_shape.DebugString())); int64_t batch_size = predictions_shape.dim_size(0); OP_REQUIRES(context, batch_size == targets_shape.dim_size(0), errors::InvalidArgument( "targets must have same elements as predictions rows. Had ", targets_shape.dim_size(0), ", needed ", batch_size)); xla::XlaOp predictions_r2 = context->Input(0); xla::XlaOp targets_r1 = context->Input(1); xla::XlaBuilder* xla_builder = context->builder(); xla::XlaOp iota_r1 = xla::Iota(xla_builder, targets_type_, predictions_shape.dim_size(1)); xla::XlaOp iota_r2 = xla::Broadcast(iota_r1, {batch_size}); xla::XlaOp eq_r2 = xla::Eq(targets_r1, iota_r2, {0}); xla::XlaOp zero_r0_f32 = xla::Zero(xla_builder, xla::F32); xla::XlaOp zero_r2_f32 = xla::ZerosLike(predictions_r2); xla::XlaOp select_r2 = xla::Select(eq_r2, predictions_r2, zero_r2_f32); xla::XlaOp targets_values_r1 = xla::Reduce( select_r2, zero_r0_f32, xla::CreateScalarAddComputation(xla::F32, xla_builder), {1}); xla::XlaOp gt_r2 = xla::Gt(predictions_r2, targets_values_r1, {0}); xla::XlaOp zero_r0 = xla::Zero(xla_builder, xla::S32); xla::XlaOp zero_r2 = xla::Broadcast(zero_r0, predictions_shape.dim_sizes()); xla::XlaOp one_r0 = xla::One(xla_builder, xla::S32); xla::XlaOp one_r2 = xla::Broadcast(one_r0, predictions_shape.dim_sizes()); xla::XlaOp one_hot_r2 = xla::Select(gt_r2, one_r2, zero_r2); xla::XlaOp num_gt_r1 = xla::Reduce( one_hot_r2, zero_r0, xla::CreateScalarAddComputation(xla::S32, xla_builder), {1}); xla::XlaOp result = xla::And(xla::Lt(num_gt_r1, xla::ConstantR0<int32>(xla_builder, k)), xla::IsFinite(targets_values_r1)); context->SetOutput(0, result); } protected: DataType targets_dtype_; xla::PrimitiveType targets_type_; InTopKOp(const InTopKOp&) = delete; void operator=(const InTopKOp&) = delete; }; REGISTER_XLA_OP(Name("InTopKV2") .CompileTimeConstantInput("k") .TypeConstraint("T", {DT_INT32, DT_INT64}), InTopKOp); } }
#include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { template <typename T> static Graph* InTopK(int num_targets, int num_classes, T top_k) { Graph* g = new Graph(OpRegistry::Global()); DataType dtype = DataTypeToEnum<T>::value; Tensor predictions_t(DT_FLOAT, TensorShape({num_targets, num_classes})); predictions_t.flat<float>().setRandom(); Tensor targets_t(dtype, TensorShape({num_targets})); targets_t.flat<T>().setRandom(); Tensor k_t(dtype, TensorShape({})); k_t.scalar<T>() = k_t.scalar<T>().constant(top_k); Node* predictions = test::graph::Constant(g, predictions_t, "predictions"); Node* targets = test::graph::Constant(g, targets_t, "targets"); Node* k = test::graph::Constant(g, k_t, "k"); Node* in_topk; TF_CHECK_OK(NodeBuilder(g->NewName("in_topk"), "InTopKV2") .Input(predictions) .Input(targets) .Input(k) .Attr("T", dtype) .Finalize(g, &in_topk)); return g; } #define BM_NAME(T, TARGETS, CLASSES, K, DEVICE) \ BM_InTopK##_##T##_##TARGETS##_##CLASSES##_##K##_##DEVICE #define BM_InTopK(T, TARGETS, CLASSES, K, DEVICE) \ static void BM_NAME(T, TARGETS, CLASSES, K, \ DEVICE)(::testing::benchmark::State & state) { \ test::Benchmark(#DEVICE, InTopK<T>(TARGETS, CLASSES, K), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \ TARGETS * CLASSES); \ } \ BENCHMARK(BM_NAME(T, TARGETS, CLASSES, K, DEVICE))->UseRealTime(); BM_InTopK(int64_t, 64, 1000, 10, cpu); BM_InTopK(int64_t, 64, 10000, 10, cpu); #if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) BM_InTopK(int64_t, 64, 1000, 10, gpu); BM_InTopK(int64_t, 64, 10000, 10, gpu); #endif }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/in_topk_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/in_topk_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e063e2c6-efc4-4009-a05a-2335b45ae426
cpp
tensorflow/tensorflow
ragged_tensor_to_tensor_op
tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc
tensorflow/core/kernels/ragged_tensor_to_tensor_op_test.cc
#define EIGEN_USE_THREADS #include <stddef.h> #include <algorithm> #include <string> #include <vector> #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/broadcast_to_op.h" #include "tensorflow/core/kernels/list_kernels.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/bfloat16.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/bcast.h" #include "tensorflow/core/util/ragged_to_dense_util.h" namespace tensorflow { namespace { typedef Eigen::ThreadPoolDevice CPUDevice; using ::std::vector; const int kShapeInputIndex = 0; const int kValueInputIndex = 1; const int kDefaultValueInputIndex = 2; const int kFirstPartitionInputIndex = 3; template <typename INDEX_TYPE> class RaggedTensorToTensorBaseOp : public OpKernel { public: typedef typename ::tensorflow::TTypes<const INDEX_TYPE>::Flat RowPartitionTensor; explicit RaggedTensorToTensorBaseOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, GetRowPartitionTypes<OpKernelConstruction>( context, &row_partition_types_)); ragged_rank_ = GetRaggedRank(row_partition_types_); } RowPartitionType GetRowPartitionTypeByDimension(int dimension) { if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) { return row_partition_types_[dimension + 1]; } else { return row_partition_types_[dimension]; } } RowPartitionTensor GetRowPartitionTensor(OpKernelContext* c, int dimension) { if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) { return c->input(dimension + 1 + kFirstPartitionInputIndex) .flat<INDEX_TYPE>(); } else { return c->input(dimension + kFirstPartitionInputIndex).flat<INDEX_TYPE>(); } } Status GetMaxWidth(OpKernelContext* c, int dimension, INDEX_TYPE* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(c, dimension - 1); switch (GetRowPartitionTypeByDimension(dimension - 1)) { case RowPartitionType::VALUE_ROWIDS: *result = GetMaxWidthValueRowID(row_partition_tensor); return absl::OkStatus(); case RowPartitionType::ROW_SPLITS: *result = GetMaxWidthRowSplit(row_partition_tensor); return absl::OkStatus(); default: return errors::InvalidArgument( "Cannot handle partition type ", RowPartitionTypeToString( GetRowPartitionTypeByDimension(dimension - 1))); } } static INDEX_TYPE GetMaxWidthRowSplit(const RowPartitionTensor& row_split) { const INDEX_TYPE tensor_length = row_split.size(); if (tensor_length == 0 || tensor_length == 1) { return 0; } INDEX_TYPE max_width = 0; for (INDEX_TYPE i = 0; i < tensor_length - 1; ++i) { const INDEX_TYPE current_width = row_split(i + 1) - row_split(i); if (current_width > max_width) { max_width = current_width; } } return max_width; } static INDEX_TYPE GetMaxWidthValueRowID( const RowPartitionTensor& value_rowids) { const INDEX_TYPE index_length = value_rowids.size(); if (index_length == 0) { return 0; } INDEX_TYPE first_equal_index = 0; INDEX_TYPE first_equal_index_value = value_rowids(0); INDEX_TYPE max_width = 0; for (INDEX_TYPE i = 1; i < index_length; ++i) { const INDEX_TYPE value = value_rowids(i); if (value != first_equal_index_value) { first_equal_index_value = value; max_width = std::max(i - first_equal_index, max_width); first_equal_index = i; } } return std::max(index_length - first_equal_index, max_width); } Status CalculateOutputSize(INDEX_TYPE first_dim, OpKernelContext* c, vector<INDEX_TYPE>* result) { TensorShapeProto value_shape_proto; c->input(kValueInputIndex).shape().AsProto(&value_shape_proto); TensorShapeProto default_value_shape_proto; c->input(kDefaultValueInputIndex) .shape() .AsProto(&default_value_shape_proto); TensorShapeProto output_shape_proto; TF_RETURN_IF_ERROR(ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)); TensorShapeProto shape_proto; { PartialTensorShape partial_tensor_shape; TF_RETURN_IF_ERROR(TensorShapeFromTensor(c->input(kShapeInputIndex), &partial_tensor_shape)); partial_tensor_shape.AsProto(&shape_proto); } TF_RETURN_IF_ERROR(CombineRaggedTensorToTensorShapes( ragged_rank_, shape_proto, value_shape_proto, &output_shape_proto)); result->reserve(output_shape_proto.dim_size()); for (const TensorShapeProto::Dim& dim : output_shape_proto.dim()) { result->push_back(dim.size()); } if ((*result)[0] < 0) { (*result)[0] = first_dim; } for (int i = 1; i <= ragged_rank_; ++i) { if ((*result)[i] < 0) { TF_RETURN_IF_ERROR(GetMaxWidth(c, i, &(*result)[i])); } } return absl::OkStatus(); } void CalculateFirstParentOutputIndex(INDEX_TYPE first_dimension, INDEX_TYPE output_index_multiplier, INDEX_TYPE first_dimension_output, vector<INDEX_TYPE>* result) { const INDEX_TYPE min_dimension = std::min(first_dimension, first_dimension_output); result->reserve(first_dimension); int current_output_index = 0; for (INDEX_TYPE i = 0; i < min_dimension; ++i, current_output_index += output_index_multiplier) { result->push_back(current_output_index); } for (INDEX_TYPE i = min_dimension; i < first_dimension; ++i) { result->push_back(-1); } DCHECK_EQ(result->size(), first_dimension); } Status CalculateOutputIndexRowSplit( const RowPartitionTensor& row_split, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { INDEX_TYPE row_split_size = row_split.size(); if (row_split_size > 0) { result->reserve(row_split(row_split_size - 1)); } for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) { INDEX_TYPE row_length = row_split(i + 1) - row_split(i); INDEX_TYPE real_length = std::min(output_size, row_length); INDEX_TYPE parent_output_index_current = parent_output_index[i]; if (parent_output_index_current == -1) { real_length = 0; } for (INDEX_TYPE j = 0; j < real_length; ++j) { result->push_back(parent_output_index_current); parent_output_index_current += output_index_multiplier; } for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) { result->push_back(-1); } } if (row_split_size > 0 && result->size() != row_split(row_split_size - 1)) { return errors::InvalidArgument("Invalid row split size."); } return absl::OkStatus(); } Status CalculateOutputIndexValueRowID( const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return absl::OkStatus(); } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); if (current_value_rowid >= parent_output_index.size()) { return errors::InvalidArgument( "Got current_value_rowid=", current_value_rowid, " which is not less than ", parent_output_index.size()); } INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; if (next_value_rowid >= parent_output_index.size()) { return errors::InvalidArgument( "Got next_value_rowid=", next_value_rowid, " which is not less than ", parent_output_index.size()); } current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } if (result->size() != value_rowids.size()) { return errors::InvalidArgument("Invalid row ids."); } return absl::OkStatus(); } Status CalculateOutputIndex(OpKernelContext* context, int dimension, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(context, dimension); auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: return CalculateOutputIndexValueRowID( row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); case RowPartitionType::ROW_SPLITS: if (row_partition_tensor.size() - 1 > parent_output_index.size()) { return errors::InvalidArgument( "Row partition size is greater than output size: ", row_partition_tensor.size() - 1, " > ", parent_output_index.size()); } return CalculateOutputIndexRowSplit( row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); default: return errors::InvalidArgument( "Unsupported partition type:", RowPartitionTypeToString(partition_type)); } } Status GetFirstDimensionSize(OpKernelContext* context, INDEX_TYPE* result) { const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); if (row_partition_types_.empty()) { return errors::InvalidArgument("No row_partition_types given."); } const RowPartitionType first_partition_type = row_partition_types_[0]; switch (first_partition_type) { case RowPartitionType::FIRST_DIM_SIZE: *result = first_partition_tensor.scalar<INDEX_TYPE>()(); return absl::OkStatus(); case RowPartitionType::VALUE_ROWIDS: return errors::InvalidArgument( "Cannot handle VALUE_ROWIDS in first dimension."); case RowPartitionType::ROW_SPLITS: *result = first_partition_tensor.shape().dim_size(0) - 1; return absl::OkStatus(); default: return errors::InvalidArgument( "Cannot handle type ", RowPartitionTypeToString(first_partition_type)); } } void Compute(OpKernelContext* context) override { INDEX_TYPE first_dimension; const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); OP_REQUIRES(context, first_partition_tensor.NumElements() > 0, errors::InvalidArgument("Invalid first partition input. Tensor " "requires at least one element.")); OP_REQUIRES_OK(context, GetFirstDimensionSize(context, &first_dimension)); vector<INDEX_TYPE> output_size; OP_REQUIRES_OK(context, CalculateOutputSize(first_dimension, context, &output_size)); vector<INDEX_TYPE> multiplier; multiplier.resize(ragged_rank_ + 1); multiplier[multiplier.size() - 1] = 1; for (int i = multiplier.size() - 2; i >= 0; --i) { multiplier[i] = multiplier[i + 1] * output_size[i + 1]; } TensorShape output_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(output_size, &output_shape)); Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output_tensor)); const INDEX_TYPE full_size = multiplier[0] * output_size[0]; if (full_size > 0) { vector<INDEX_TYPE> output_index, new_output_index; int nvals = context->input(kValueInputIndex).shape().dim_size(0); output_index.reserve(nvals); new_output_index.reserve(nvals); CalculateFirstParentOutputIndex(first_dimension, multiplier[0], output_size[0], &output_index); for (int i = 1; i <= ragged_rank_; ++i) { OP_REQUIRES_OK(context, CalculateOutputIndex( context, i - 1, output_index, multiplier[i], output_size[i], &new_output_index)); output_index.swap(new_output_index); new_output_index.clear(); } SetOutput(context, ragged_rank_, output_index, output_tensor); } } virtual void SetOutput(OpKernelContext* context, int ragged_rank, const vector<INDEX_TYPE>& output_index, Tensor* output_tensor) = 0; private: vector<RowPartitionType> row_partition_types_; int ragged_rank_; }; template <typename VALUE_TYPE, typename INDEX_TYPE> void slow_copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) { for (INDEX_TYPE index = 0; index < size; ++index) { dst[index] = src[index]; } } template <typename VALUE_TYPE, typename INDEX_TYPE> void copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) { memcpy(dst, src, size * sizeof(VALUE_TYPE)); } template <> void copy_array<tstring, int64_t>(tstring* dst, const tstring* src, int64_t size) { slow_copy_array(dst, src, size); } template <> void copy_array<tstring, int32>(tstring* dst, const tstring* src, int32_t size) { slow_copy_array(dst, src, size); } template <> void copy_array<Eigen::half, int64_t>(Eigen::half* dst, const Eigen::half* src, int64_t size) { slow_copy_array(dst, src, size); } template <> void copy_array<Eigen::half, int32>(Eigen::half* dst, const Eigen::half* src, int32_t size) { slow_copy_array(dst, src, size); } template <typename VALUE_TYPE, typename INDEX_TYPE> class RaggedTensorToTensorOp : public RaggedTensorToTensorBaseOp<INDEX_TYPE> { public: explicit RaggedTensorToTensorOp(OpKernelConstruction* context) : RaggedTensorToTensorBaseOp<INDEX_TYPE>(context) {} void SetOutput(OpKernelContext* context, int ragged_rank, const vector<INDEX_TYPE>& output_index, Tensor* output_tensor) override { if (output_tensor->NumElements() == 0) return; const auto& values_tensor = context->input(kValueInputIndex); const VALUE_TYPE* values_base = values_tensor.flat<VALUE_TYPE>().data(); const auto& default_value_tensor = context->input(kDefaultValueInputIndex); VALUE_TYPE* output_base = output_tensor->flat<VALUE_TYPE>().data(); TensorShape element_shape = output_tensor->shape(); element_shape.RemoveDimRange(0, ragged_rank + 1); int value_element_size = element_shape.num_elements(); size_t output_index_size = output_index.size(); const VALUE_TYPE* default_value = default_value_tensor.flat<VALUE_TYPE>().data(); Tensor bcast_default; if (default_value_tensor.NumElements() != value_element_size && default_value_tensor.NumElements() != 1) { const auto& src_shape = default_value_tensor.shape(); BCast bcast(BCast::FromShape(src_shape), BCast::FromShape(element_shape), true); OP_REQUIRES(context, bcast.IsValid(), errors::InvalidArgument("Error broadcasting default_value")); OP_REQUIRES_OK(context, context->allocate_temp(default_value_tensor.dtype(), element_shape, &bcast_default)); const CPUDevice& device = context->eigen_device<CPUDevice>(); functor::BroadcastTo<CPUDevice, VALUE_TYPE>()( device, context, bcast_default, element_shape, default_value_tensor, src_shape, bcast); default_value = bcast_default.flat<VALUE_TYPE>().data(); } INDEX_TYPE src_start = 0; INDEX_TYPE dst_start = 0; INDEX_TYPE dst_end = 0; for (int src_i = 0; src_i <= output_index_size; ++src_i) { INDEX_TYPE dst_i = src_i < output_index_size ? output_index[src_i] : -1; if (dst_i == dst_end) { ++dst_end; continue; } if (dst_start < dst_end) { const VALUE_TYPE* src = values_base + src_start * value_element_size; VALUE_TYPE* dst = output_base + dst_start * value_element_size; INDEX_TYPE nvals = (dst_end - dst_start) * value_element_size; copy_array<VALUE_TYPE, INDEX_TYPE>(dst, src, nvals); } if (src_i >= output_index_size) { size_t output_size = output_tensor->NumElements(); dst_i = output_size / value_element_size; } if (dst_i > dst_end) { if (default_value_tensor.NumElements() == 1) { std::fill(output_base + dst_end * value_element_size, output_base + dst_i * value_element_size, *default_value); dst_end = dst_i; } else { while (dst_i > dst_end) { VALUE_TYPE* dst = output_base + dst_end * value_element_size; copy_array<VALUE_TYPE, INDEX_TYPE>(dst, default_value, value_element_size); ++dst_end; } } } if (dst_i < 0) { src_start = src_i + 1; dst_start = dst_end; } else { src_start = src_i; dst_start = dst_end; dst_end = dst_start + 1; } } } }; #define REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, index_type) \ REGISTER_KERNEL_BUILDER(Name("RaggedTensorToTensor") \ .Device(DEVICE_CPU) \ .TypeConstraint<value_type>("T") \ .TypeConstraint<index_type>("Tindex"), \ RaggedTensorToTensorOp<value_type, index_type>); #define REGISTER_CPU_KERNEL(value_type) \ REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, int64_t); \ REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, tensorflow::int32); TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL); TF_CALL_string(REGISTER_CPU_KERNEL); TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL); TF_CALL_quint16(REGISTER_CPU_KERNEL); TF_CALL_qint16(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL } }
#include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { template <typename VALUE_TYPE> struct ShapeAndValues { TensorShape shape; std::vector<VALUE_TYPE> values; }; template <typename VALUE_TYPE> ShapeAndValues<VALUE_TYPE> createVector(const std::vector<VALUE_TYPE>& values) { TensorShape shape({static_cast<int64_t>(values.size())}); return {shape, values}; } template <typename VALUE_TYPE> ShapeAndValues<VALUE_TYPE> createScalar(const VALUE_TYPE& values) { TensorShape shape({}); return {shape, {values}}; } class RaggedTensorToTensorOpTest : public ::tensorflow::OpsTestBase { protected: template <typename VALUE_TYPE, typename INDEX_TYPE> void BuildRaggedTensorToTensorGraph( const TensorShape& shape, const std::vector<string>& row_partition_types, const ShapeAndValues<VALUE_TYPE>& values, const ShapeAndValues<VALUE_TYPE>& default_value, const std::vector<ShapeAndValues<INDEX_TYPE>>& row_partition_tensors) { const auto& value_dtype = DataTypeToEnum<VALUE_TYPE>::v(); const auto& index_dtype = DataTypeToEnum<INDEX_TYPE>::v(); int num_row_partition_tensors = row_partition_tensors.size(); TF_ASSERT_OK( NodeDefBuilder("tested_op", "RaggedTensorToTensor") .Attr("T", value_dtype) .Attr("Tindex", index_dtype) .Attr("num_row_partition_tensors", num_row_partition_tensors) .Attr("row_partition_types", row_partition_types) .Input(FakeInput(index_dtype)) .Input(FakeInput(value_dtype)) .Input(FakeInput(value_dtype)) .Input(FakeInput(num_row_partition_tensors, index_dtype)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); { std::vector<INDEX_TYPE> shape_as_vector; for (const auto& dim : shape.dim_sizes()) { shape_as_vector.push_back(dim); } ShapeAndValues<INDEX_TYPE> shape_as_tensor = createVector(shape_as_vector); AddInputFromArray<INDEX_TYPE>(shape_as_tensor.shape, shape_as_tensor.values); } AddInputFromArray<VALUE_TYPE>(values.shape, values.values); AddInputFromArray<VALUE_TYPE>(default_value.shape, default_value.values); for (const auto& row_partition_tensor : row_partition_tensors) { AddInputFromArray<INDEX_TYPE>(row_partition_tensor.shape, row_partition_tensor.values); } } }; TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({4, 4}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS"}, createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}), createScalar<float>(1.5), {createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>( *GetOutput(0), test::AsTensor<float>({.1, .2, .3, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5, .6, .7, .8, .9, 1.5, 1.5}, TensorShape({4, 4})), 0.01); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorRowSplits) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({4, 4}), {"ROW_SPLITS"}, createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}), createScalar<float>(1.5), {createVector<int32>({0, 3, 3, 7, 9})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>( *GetOutput(0), test::AsTensor<float>({.1, .2, .3, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5, .6, .7, .8, .9, 1.5, 1.5}, TensorShape({4, 4})), 0.01); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParams) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({5, 2, 3}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS", "VALUE_ROWIDS"}, createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}), createScalar<float>(1.5), { createScalar<int32>(5), createVector<int32>({0, 1, 1, 3, 3, 4}), createVector<int32>({1, 1, 2, 3, 3, 4, 4, 4, 5}), } ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>( *GetOutput(0), test::AsTensor<float>({1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .1, .2, 1.5, .3, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5, 1.5, .6, .7, .8, .9, 1.5, 1.5, 1.5, 1.5, 1.5}, TensorShape({5, 2, 3})), 0.1); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParamsRowSplits) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({5, 2, 3}), {"ROW_SPLITS", "ROW_SPLITS"}, createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}), createScalar<float>(1.5), { createVector<int32>({0, 1, 3, 3, 5, 6}), createVector<int32>({0, 0, 2, 3, 5, 8, 9}), } ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>( *GetOutput(0), test::AsTensor<float>({1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .1, .2, 1.5, .3, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5, 1.5, .6, .7, .8, .9, 1.5, 1.5, 1.5, 1.5, 1.5}, TensorShape({5, 2, 3})), 0.1); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParamsRowSplits2) { BuildRaggedTensorToTensorGraph<int64_t, int64_t>( TensorShape({3, 2, 3}), {"ROW_SPLITS", "ROW_SPLITS"}, createVector<int64_t>({0, 1, 2, 3}), createScalar<int64_t>(5), { createVector<int64_t>({0, 2, 2, 3}), createVector<int64_t>({0, 3, 3, 4}), } ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>( *GetOutput(0), test::AsTensor<int64_t>( {0, 1, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, 5, 5}, TensorShape({3, 2, 3}))); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_4DParams) { BuildRaggedTensorToTensorGraph<int32, int32>( TensorShape({4, 2, 3, 2}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS", "VALUE_ROWIDS", "VALUE_ROWIDS"}, createVector<int32>({1, 2, 3, 4, 5, 6, 7, 8}), createScalar<int32>(15), {createScalar<int32>(5), createVector<int32>({0, 1, 1}), createVector<int32>({1, 1, 1, 2}), createVector<int32>({0, 0, 1, 1, 2, 2, 3, 3})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int32>( *GetOutput(0), test::AsTensor<int32>( {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, TensorShape({4, 2, 3, 2}))); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_4DParamsRowSplit) { BuildRaggedTensorToTensorGraph<int32, int32>( TensorShape({4, 2, 3, 2}), {"ROW_SPLITS", "ROW_SPLITS", "ROW_SPLITS"}, createVector<int32>({1, 2, 3, 4, 5, 6, 7, 8}), createScalar<int32>(15), {createVector<int32>({0, 1, 3}), createVector<int32>({0, 0, 3, 4}), createVector<int32>({0, 2, 4, 6, 8})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int32>( *GetOutput(0), test::AsTensor<int32>( {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, TensorShape({4, 2, 3, 2}))); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorContractExpanded) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({3, 5}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS"}, createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}), createScalar<float>(1.5), {createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>( *GetOutput(0), test::AsTensor<float>({.1, .2, .3, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .4, .5, .6, .7, 1.5}, TensorShape({3, 5})), 0.01); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorContractExpandedDense) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({3, 5, 2}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS"}, ShapeAndValues<float>{TensorShape({9, 2}), {.1, 1.1, .2, 1.2, .3, 1.3, .4, 1.4, .5, 1.5, .6, 1.6, .7, 1.7, .8, 1.8, .9, 1.9}}, createScalar<float>(1.5), {createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>( *GetOutput(0), test::AsTensor<float>( {.1, 1.1, .2, 1.2, .3, 1.3, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, .4, 1.4, .5, 1.5, .6, 1.6, .7, 1.7, 1.5, 1.5}, TensorShape({3, 5, 2})), 0.01); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensorConstrained) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({3, 3}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS"}, createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}), createScalar<float>(1.5), {createScalar<int32>(4), createVector<int32>({0, 0, 0, 2, 2, 2, 2, 3, 3})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>(*GetOutput(0), test::AsTensor<float>( { .1, .2, .3, 1.5, 1.5, 1.5, .4, .5, .6 }, TensorShape({3, 3})), 0.01); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_3DParamsConstrained) { BuildRaggedTensorToTensorGraph<float, int32>( TensorShape({4, 1, 2}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS", "VALUE_ROWIDS"}, createVector<float>({.1, .2, .3, .4, .5, .6, .7, .8, .9}), createScalar<float>(1.5), { createScalar<int32>(5), createVector<int32>({0, 1, 1, 3, 3, 4}), createVector<int32>({1, 1, 2, 3, 3, 4, 4, 4, 5}), } ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorNear<float>( *GetOutput(0), test::AsTensor<float>({1.5, 1.5, .1, .2, 1.5, 1.5, .4, .5}, TensorShape({4, 1, 2})), 0.01); } TEST_F(RaggedTensorToTensorOpTest, RaggedTensorToTensor_4DParamsConstrained) { BuildRaggedTensorToTensorGraph<int32, int32>( TensorShape({2, 2, 2, 2}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS", "VALUE_ROWIDS", "VALUE_ROWIDS"}, createVector<int32>({1, 2, 3, 4, 5, 6, 7, 8}), createScalar<int32>(15), {createScalar<int32>(5), createVector<int32>({0, 1, 1}), createVector<int32>({1, 1, 1, 2}), createVector<int32>({0, 0, 1, 1, 2, 2, 3, 3})} ); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int32>(*GetOutput(0), test::AsTensor<int32>( { 15, 15, 15, 15, 15, 15, 15, 15, 1, 2, 3, 4, 7, 8, 15, 15, }, TensorShape({2, 2, 2, 2}))); } TEST_F(RaggedTensorToTensorOpTest, ShapeWrongDimensions) { BuildRaggedTensorToTensorGraph<int32, int32>( TensorShape({10, 7, 10, 20}), {"FIRST_DIM_SIZE", "VALUE_ROWIDS", "VALUE_ROWIDS"}, createVector<int32>({1, 2, 3, 4}), createScalar<int32>(15), {createScalar<int32>(5), createVector<int32>({0, 1, 1}), createVector<int32>({1, 1, 1, 2})} ); EXPECT_EQ(errors::IsInvalidArgument(RunOpKernel()), true); } class RaggedTensorToTensorOpUnknownShapeTest : public ::tensorflow::OpsTestBase { protected: std::unique_ptr<ShapeInferenceTestOp> op_; void SetAttributes(const absl::Span<const string> row_partition_types, int num_row_partition_tensors) { op_ = std::make_unique<ShapeInferenceTestOp>("RaggedTensorToTensor"); SetAttrValue(row_partition_types, &((*op_->node_def.mutable_attr())["row_partition_types"])); (*op_->node_def.mutable_attr())["num_row_partition_tensors"].set_i( num_row_partition_tensors); } }; TEST_F(RaggedTensorToTensorOpUnknownShapeTest, ValueRowIDs) { SetAttributes(absl::Span<const string>{"FIRST_DIM_SIZE", "VALUE_ROWIDS"}, 2); INFER_OK(*op_, "?;?;?;?;?", "?"); INFER_OK(*op_, "?;[6];[];[];[6]", "[?,?]"); INFER_OK(*op_, "?;[6];?;[];[6]", "[?,?]"); INFER_OK(*op_, "?;?;[];[];[6]", "?"); INFER_OK(*op_, "?;[6];?;[];[6]", "[?,?]"); INFER_OK(*op_, "?;[6,2];?;[];[6]", "[?,?,2]"); INFER_OK(*op_, "?;[6,2];[2];[];[6]", "[?,?,2]"); INFER_OK(*op_, "?;[6,2,7];[2,7];[];[6]", "[?,?,2,7]"); INFER_ERROR( "default_value.shape=[3] and rt_input.flat_values.shape=[6,2] " "are incompatible", *op_, "?;[6,2];[3];[];[6]"); INFER_ERROR( "default_value.shape=[2,2] and rt_input.flat_values.shape=" "[6,2,1,2] are incompatible", *op_, "?;[6,2,1,2];[2,2];[];[6]"); INFER_ERROR("must be a vector", *op_, "?;[6];[];[];[3,6]"); INFER_ERROR("must be a scalar", *op_, "?;[6];[];[7];[3]"); } TEST_F(RaggedTensorToTensorOpUnknownShapeTest, RowSplits) { SetAttributes(absl::Span<const string>{"ROW_SPLITS"}, 1); INFER_OK(*op_, "?;?;?;?", "?"); INFER_OK(*op_, "?;[3];[];[6]", "[?,?]"); INFER_OK(*op_, "?;?;?;?", "?"); INFER_OK(*op_, "?;[3,2];[2];[6]", "[?,?,2]"); INFER_OK(*op_, "?;[3,2,7];[2,7];[6]", "[?,?,2,7]"); INFER_OK(*op_, "?;[3,2,7];[2,7];[6]", "[?,?,2,7]"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_tensor_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
379c40f5-6a9c-41ec-aaa3-7c8015cd1416
cpp
tensorflow/tensorflow
quantized_add_op
tensorflow/core/kernels/quantized_add_op.cc
tensorflow/core/kernels/quantized_add_op_test.cc
#define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #define QUANTIZED_ADD_USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/bcast.h" namespace tensorflow { namespace { template <class T, class Toutput> void ScalarAddition(OpKernelContext* context, const T* full_input, float full_input_min, float full_input_max, int64_t num_elements, T scalar_input, float scalar_input_min, float scalar_input_max, float output_min, float output_max, Toutput* output) { const Toutput scalar_in_output_range = RequantizeInNewRange<T, Toutput>( scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); for (int i = 0; i < num_elements; ++i) { const Toutput full_input_in_output_range = RequantizeInNewRange<T, Toutput>( full_input[i], full_input_min, full_input_max, output_min, output_max); output[i] = full_input_in_output_range + scalar_in_output_range; } } #ifdef QUANTIZED_ADD_USE_NEON template <> void ScalarAddition(OpKernelContext* context, const quint8* full_input, float full_input_min, float full_input_max, int64 num_elements, quint8 scalar_input, float scalar_input_min, float scalar_input_max, float output_min, float output_max, qint32* output) { const int32 scalar_in_output_range = RequantizeInNewRange<quint8, qint32>( scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); const float input_0_float = QuantizedToFloat<quint8>(0, full_input_min, full_input_max); const float input_1_float = QuantizedToFloat<quint8>(1, full_input_min, full_input_max); const int64 input_0_int64 = FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); const int64 input_1_int64 = FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max); const int32 input_mult_int32 = input_1_int64 - input_0_int64; const int64 lowest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest()); const int64 highest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::highest()); const int64x2_t input_0_64x2 = vmovq_n_s64(input_0_int64); const int32x2_t input_mult_32x2 = vmov_n_s32(input_mult_int32); const int32x4_t scalar_in_output_range_32x4 = vmovq_n_s32(scalar_in_output_range); int64 i = 0; for (; i < (num_elements - 7); i += 8) { const uint8* full_input_ptr = &(full_input->value) + i; const std::array<int32x4_t, 2> output_value = Requantize8x8To32Neon(full_input_ptr, input_0_64x2, input_mult_32x2); const int32x4_t result_low_32x4 = vaddq_s32(output_value[0], scalar_in_output_range_32x4); const int32x4_t result_high_32x4 = vaddq_s32(output_value[1], scalar_in_output_range_32x4); int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, result_low_32x4); vst1q_s32(output_ptr + 4, result_high_32x4); } for (; i < num_elements; ++i) { const int64 full_input_value = static_cast<int64_t>(full_input[i]); int64 full_input_in_output_range_64 = input_0_int64 + (full_input_value * input_mult_int32); full_input_in_output_range_64 = std::max(full_input_in_output_range_64, lowest_quantized); full_input_in_output_range_64 = std::min(full_input_in_output_range_64, highest_quantized); const int32 full_input_in_output_range = static_cast<int32>(full_input_in_output_range_64); output[i] = full_input_in_output_range + scalar_in_output_range; } } #else template <> void ScalarAddition(OpKernelContext* context, const quint8* full_input, float full_input_min, float full_input_max, int64_t num_elements, quint8 scalar_input, float scalar_input_min, float scalar_input_max, float output_min, float output_max, qint32* output) { const int32_t scalar_in_output_range = RequantizeInNewRange<quint8, qint32>( scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); const float input_0_float = QuantizedToFloat<quint8>(0, full_input_min, full_input_max); const float input_1_float = QuantizedToFloat<quint8>(1, full_input_min, full_input_max); const int64_t input_0_int64 = FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); const int64_t input_1_int64 = FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max); const int32_t input_mult_int32 = input_1_int64 - input_0_int64; const int64_t lowest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest()); const int64_t highest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::highest()); for (int i = 0; i < num_elements; ++i) { const int64_t full_input_value = static_cast<int64_t>(full_input[i]); int64_t full_input_in_output_range_64 = input_0_int64 + (full_input_value * input_mult_int32); full_input_in_output_range_64 = std::max(full_input_in_output_range_64, lowest_quantized); full_input_in_output_range_64 = std::min(full_input_in_output_range_64, highest_quantized); const int32_t full_input_in_output_range = static_cast<int32>(full_input_in_output_range_64); output[i] = full_input_in_output_range + scalar_in_output_range; } } #endif template <class T, class Toutput> void VectorAddition(OpKernelContext* context, const T* x_data, float min_x, float max_x, const T* y_data, float min_y, float max_y, int64_t num_elements, float output_min, float output_max, Toutput* output) { for (int i = 0; i < num_elements; ++i) { const Toutput x_in_output_range = RequantizeInNewRange<T, Toutput>( x_data[i], min_x, max_x, output_min, output_max); const Toutput y_in_output_range = RequantizeInNewRange<T, Toutput>( y_data[i], min_y, max_y, output_min, output_max); output[i] = x_in_output_range + y_in_output_range; } } #ifdef QUANTIZED_ADD_USE_NEON template <> void VectorAddition(OpKernelContext* context, const quint8* x_data, float min_x, float max_x, const quint8* y_data, float min_y, float max_y, int64 num_elements, float output_min, float output_max, qint32* output) { const float x_0_float = QuantizedToFloat<quint8>(0, min_x, max_x); const float x_1_float = QuantizedToFloat<quint8>(1, min_x, max_x); const int64 x_0_int64 = FloatToQuantizedUnclamped<qint32>(x_0_float, output_min, output_max); const int64 x_1_int64 = FloatToQuantizedUnclamped<qint32>(x_1_float, output_min, output_max); const int32 x_mult_int32 = x_1_int64 - x_0_int64; const float y_0_float = QuantizedToFloat<quint8>(0, min_y, max_y); const float y_1_float = QuantizedToFloat<quint8>(1, min_y, max_y); const int64 y_0_int64 = FloatToQuantizedUnclamped<qint32>(y_0_float, output_min, output_max); const int64 y_1_int64 = FloatToQuantizedUnclamped<qint32>(y_1_float, output_min, output_max); const int32 y_mult_int32 = y_1_int64 - y_0_int64; const int64 lowest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest()); const int64 highest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::highest()); const int64x2_t x_0_64x2 = vmovq_n_s64(x_0_int64); const int32x2_t x_mult_32x2 = vmov_n_s32(x_mult_int32); const int64x2_t y_0_64x2 = vmovq_n_s64(y_0_int64); const int32x2_t y_mult_32x2 = vmov_n_s32(y_mult_int32); int64 i = 0; for (; i < (num_elements - 7); i += 8) { const uint8* x_ptr = &(x_data->value) + i; const std::array<int32x4_t, 2> x_output_value = Requantize8x8To32Neon(x_ptr, x_0_64x2, x_mult_32x2); const uint8* y_ptr = &(y_data->value) + i; const std::array<int32x4_t, 2> y_output_value = Requantize8x8To32Neon(y_ptr, y_0_64x2, y_mult_32x2); const int32x4_t result_low_32x4 = vaddq_s32(x_output_value[0], y_output_value[0]); const int32x4_t result_high_32x4 = vaddq_s32(x_output_value[1], y_output_value[1]); int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, result_low_32x4); vst1q_s32(output_ptr + 4, result_high_32x4); } for (; i < num_elements; ++i) { const int64 x_value = static_cast<int64_t>(x_data[i]); int64 x_in_output_range_64 = x_0_int64 + (x_value * x_mult_int32); x_in_output_range_64 = std::max(x_in_output_range_64, lowest_quantized); x_in_output_range_64 = std::min(x_in_output_range_64, highest_quantized); const int32 x_in_output_range = static_cast<int32>(x_in_output_range_64); const int64 y_value = static_cast<int64_t>(y_data[i]); int64 y_in_output_range_64 = y_0_int64 + (y_value * y_mult_int32); y_in_output_range_64 = std::max(y_in_output_range_64, lowest_quantized); y_in_output_range_64 = std::min(y_in_output_range_64, highest_quantized); const int32 y_in_output_range = static_cast<int32>(y_in_output_range_64); output[i] = x_in_output_range + y_in_output_range; } } #else template <> void VectorAddition(OpKernelContext* context, const quint8* x_data, float min_x, float max_x, const quint8* y_data, float min_y, float max_y, int64_t num_elements, float output_min, float output_max, qint32* output) { const float x_0_float = QuantizedToFloat<quint8>(0, min_x, max_x); const float x_1_float = QuantizedToFloat<quint8>(1, min_x, max_x); const int64_t x_0_int64 = FloatToQuantizedUnclamped<qint32>(x_0_float, output_min, output_max); const int64_t x_1_int64 = FloatToQuantizedUnclamped<qint32>(x_1_float, output_min, output_max); const int32_t x_mult_int32 = x_1_int64 - x_0_int64; const float y_0_float = QuantizedToFloat<quint8>(0, min_y, max_y); const float y_1_float = QuantizedToFloat<quint8>(1, min_y, max_y); const int64_t y_0_int64 = FloatToQuantizedUnclamped<qint32>(y_0_float, output_min, output_max); const int64_t y_1_int64 = FloatToQuantizedUnclamped<qint32>(y_1_float, output_min, output_max); const int32_t y_mult_int32 = y_1_int64 - y_0_int64; const int64_t lowest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest()); const int64_t highest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::highest()); for (int i = 0; i < num_elements; ++i) { const int64_t x_value = static_cast<int64_t>(x_data[i]); int64_t x_in_output_range_64 = x_0_int64 + (x_value * x_mult_int32); x_in_output_range_64 = std::max(x_in_output_range_64, lowest_quantized); x_in_output_range_64 = std::min(x_in_output_range_64, highest_quantized); const int32_t x_in_output_range = static_cast<int32>(x_in_output_range_64); const int64_t y_value = static_cast<int64_t>(y_data[i]); int64_t y_in_output_range_64 = y_0_int64 + (y_value * y_mult_int32); y_in_output_range_64 = std::max(y_in_output_range_64, lowest_quantized); y_in_output_range_64 = std::min(y_in_output_range_64, highest_quantized); const int32_t y_in_output_range = static_cast<int32>(y_in_output_range_64); output[i] = x_in_output_range + y_in_output_range; } } #endif template <class T, class Toutput> void VectorTensorAddition(const T* vector_data, float min_vector, float max_vector, int64_t vector_num_elements, const T* tensor_data, float min_tensor, float max_tensor, int64_t tensor_num_elements, float output_min, float output_max, Toutput* output) { for (int i = 0; i < tensor_num_elements; ++i) { const int64_t vector_i = i % vector_num_elements; const Toutput vector_in_output_range = RequantizeInNewRange<T, Toutput>( vector_data[vector_i], min_vector, max_vector, output_min, output_max); const Toutput tensor_in_output_range = RequantizeInNewRange<T, Toutput>( tensor_data[i], min_tensor, max_tensor, output_min, output_max); output[i] = vector_in_output_range + tensor_in_output_range; } } #ifdef QUANTIZED_ADD_USE_NEON template <> void VectorTensorAddition(const quint8* vector_data, float min_vector, float max_vector, int64 vector_num_elements, const quint8* tensor_data, float min_tensor, float max_tensor, int64 tensor_num_elements, float output_min, float output_max, qint32* output) { const float vector_0_float = QuantizedToFloat<quint8>(0, min_vector, max_vector); const float vector_1_float = QuantizedToFloat<quint8>(1, min_vector, max_vector); const int64 vector_0_int64 = FloatToQuantizedUnclamped<qint32>(vector_0_float, output_min, output_max); const int64 vector_1_int64 = FloatToQuantizedUnclamped<qint32>(vector_1_float, output_min, output_max); const int32 vector_mult_int32 = vector_1_int64 - vector_0_int64; const float tensor_0_float = QuantizedToFloat<quint8>(0, min_tensor, max_tensor); const float tensor_1_float = QuantizedToFloat<quint8>(1, min_tensor, max_tensor); const int64 tensor_0_int64 = FloatToQuantizedUnclamped<qint32>(tensor_0_float, output_min, output_max); const int64 tensor_1_int64 = FloatToQuantizedUnclamped<qint32>(tensor_1_float, output_min, output_max); const int32 tensor_mult_int32 = tensor_1_int64 - tensor_0_int64; const int64 lowest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest()); const int64 highest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::highest()); const int64x2_t vector_0_64x2 = vmovq_n_s64(vector_0_int64); const int32x2_t vector_mult_32x2 = vmov_n_s32(vector_mult_int32); const int64x2_t tensor_0_64x2 = vmovq_n_s64(tensor_0_int64); const int32x2_t tensor_mult_32x2 = vmov_n_s32(tensor_mult_int32); for (int64 base_i = 0; base_i < tensor_num_elements; base_i += vector_num_elements) { int64 i = base_i; int64 vector_i = 0; for (; vector_i < (vector_num_elements - 7); vector_i += 8, i += 8) { const uint8* vector_ptr = &(vector_data->value) + vector_i; const std::array<int32x4_t, 2> vector_output_value = Requantize8x8To32Neon(vector_ptr, vector_0_64x2, vector_mult_32x2); const uint8* tensor_ptr = &(tensor_data->value) + i; const std::array<int32x4_t, 2> tensor_output_value = Requantize8x8To32Neon(tensor_ptr, tensor_0_64x2, tensor_mult_32x2); const int32x4_t result_low_32x4 = vaddq_s32(vector_output_value[0], tensor_output_value[0]); const int32x4_t result_high_32x4 = vaddq_s32(vector_output_value[1], tensor_output_value[1]); int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, result_low_32x4); vst1q_s32(output_ptr + 4, result_high_32x4); } for (; vector_i < vector_num_elements; ++vector_i, ++i) { const int64 vector_value = static_cast<int64_t>(vector_data[vector_i]); int64 vector_in_output_range_64 = vector_0_int64 + (vector_value * vector_mult_int32); vector_in_output_range_64 = std::max(vector_in_output_range_64, lowest_quantized); vector_in_output_range_64 = std::min(vector_in_output_range_64, highest_quantized); const int32 vector_in_output_range = static_cast<int32>(vector_in_output_range_64); const int64 tensor_value = static_cast<int64_t>(tensor_data[i]); int64 tensor_in_output_range_64 = tensor_0_int64 + (tensor_value * tensor_mult_int32); tensor_in_output_range_64 = std::max(tensor_in_output_range_64, lowest_quantized); tensor_in_output_range_64 = std::min(tensor_in_output_range_64, highest_quantized); const int32 tensor_in_output_range = static_cast<int32>(tensor_in_output_range_64); output[i] = vector_in_output_range + tensor_in_output_range; } } } #else template <> void VectorTensorAddition(const quint8* vector_data, float min_vector, float max_vector, int64_t vector_num_elements, const quint8* tensor_data, float min_tensor, float max_tensor, int64_t tensor_num_elements, float output_min, float output_max, qint32* output) { const float vector_0_float = QuantizedToFloat<quint8>(0, min_vector, max_vector); const float vector_1_float = QuantizedToFloat<quint8>(1, min_vector, max_vector); const int64_t vector_0_int64 = FloatToQuantizedUnclamped<qint32>(vector_0_float, output_min, output_max); const int64_t vector_1_int64 = FloatToQuantizedUnclamped<qint32>(vector_1_float, output_min, output_max); const int32_t vector_mult_int32 = vector_1_int64 - vector_0_int64; const float tensor_0_float = QuantizedToFloat<quint8>(0, min_tensor, max_tensor); const float tensor_1_float = QuantizedToFloat<quint8>(1, min_tensor, max_tensor); const int64_t tensor_0_int64 = FloatToQuantizedUnclamped<qint32>(tensor_0_float, output_min, output_max); const int64_t tensor_1_int64 = FloatToQuantizedUnclamped<qint32>(tensor_1_float, output_min, output_max); const int32_t tensor_mult_int32 = tensor_1_int64 - tensor_0_int64; const int64_t lowest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest()); const int64_t highest_quantized = static_cast<int64_t>(Eigen::NumTraits<qint32>::highest()); for (int i = 0; i < tensor_num_elements; ++i) { const int64_t vector_i = i % vector_num_elements; const int64_t vector_value = static_cast<int64_t>(vector_data[vector_i]); int64_t vector_in_output_range_64 = vector_0_int64 + (vector_value * vector_mult_int32); vector_in_output_range_64 = std::max(vector_in_output_range_64, lowest_quantized); vector_in_output_range_64 = std::min(vector_in_output_range_64, highest_quantized); const int32_t vector_in_output_range = static_cast<int32>(vector_in_output_range_64); const int64_t tensor_value = static_cast<int64_t>(tensor_data[i]); int64_t tensor_in_output_range_64 = tensor_0_int64 + (tensor_value * tensor_mult_int32); tensor_in_output_range_64 = std::max(tensor_in_output_range_64, lowest_quantized); tensor_in_output_range_64 = std::min(tensor_in_output_range_64, highest_quantized); const int32_t tensor_in_output_range = static_cast<int32>(tensor_in_output_range_64); output[i] = vector_in_output_range + tensor_in_output_range; } } #endif } template <class T, class Toutput> class QuantizedAddOp : public OpKernel { public: explicit QuantizedAddOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); const Tensor& min_x_tensor = context->input(2); const Tensor& max_x_tensor = context->input(3); const Tensor& min_y_tensor = context->input(4); const Tensor& max_y_tensor = context->input(5); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), errors::InvalidArgument("`min_x` must be rank 0 but is rank ", min_x_tensor.dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), errors::InvalidArgument("`max_x` must be rank 0 but is rank ", max_x_tensor.dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), errors::InvalidArgument("`min_y` must be rank 0 but is rank ", min_y_tensor.dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), errors::InvalidArgument("`max_y` must be rank 0 but is rank ", max_y_tensor.dims())); const float min_x = min_x_tensor.scalar<float>()(); const float max_x = max_x_tensor.scalar<float>()(); const float min_y = min_y_tensor.scalar<float>()(); const float max_y = max_y_tensor.scalar<float>()(); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_x.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_y must be larger than min_y.")); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const float smallest_min = std::min(min_x, min_y); const float largest_max = std::max(max_x, max_y); const float biggest_range = std::max(std::abs(smallest_min), std::abs(largest_max)); const float output_range = (biggest_range * (1 << 14)); const float min_z_value = -output_range; const float max_z_value = output_range; const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarAddition<T, Toutput>(context, y_data, min_y, max_y, y.NumElements(), x_data[0], min_x, max_x, min_z_value, max_z_value, z_data); } else if (y.NumElements() == 1) { ScalarAddition<T, Toutput>(context, x_data, min_x, max_x, x.NumElements(), y_data[0], min_y, max_y, min_z_value, max_z_value, z_data); } else { VectorAddition<T, Toutput>(context, x_data, min_x, max_x, y_data, min_y, max_y, x.NumElements(), min_z_value, max_z_value, z_data); } } else if (ndims == 2) { const T* vector_data; int64_t vector_num_elements; float vector_min; float vector_max; const T* tensor_data; int64_t tensor_num_elements; float tensor_min; float tensor_max; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_min = min_x; vector_max = max_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_min = min_y; tensor_max = max_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_min = min_y; vector_max = max_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_min = min_x; tensor_max = max_x; } OP_REQUIRES(context, vector_num_elements > 0, errors::InvalidArgument("Must have some elements to add")); VectorTensorAddition<T, Toutput>( vector_data, vector_min, vector_max, vector_num_elements, tensor_data, tensor_min, tensor_max, tensor_num_elements, min_z_value, max_z_value, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedAdd") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("Toutput"), QuantizedAddOp<quint8, qint32>); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace ops { namespace { void TestAdd(const std::vector<int64_t>& x_shape, const std::vector<float>& x_values, float x_min_value, float x_max_value, const std::vector<int64_t>& y_shape, const std::vector<float>& y_values, float y_min_value, float y_max_value, const std::vector<int64_t>& expected_shape, const std::vector<float>& expected_values, double tolerance) { Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Tensor x_quantized_tensor(DT_QUINT8, x_float_tensor.shape()); FloatTensorToQuantizedInPlace<quint8>(x_float_tensor, x_min_value, x_max_value, &x_quantized_tensor); Output x = Const(root.WithOpName("x"), Input::Initializer(x_quantized_tensor)); Output x_min = Const(root.WithOpName("x_min"), x_min_value); Output x_max = Const(root.WithOpName("x_max"), x_max_value); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Tensor y_quantized_tensor(DT_QUINT8, y_float_tensor.shape()); FloatTensorToQuantizedInPlace<quint8>(y_float_tensor, y_min_value, y_max_value, &y_quantized_tensor); Output y = Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor)); Output y_min = Const(root.WithOpName("y_min"), y_min_value); Output y_max = Const(root.WithOpName("y_max"), y_max_value); ops::QuantizedAdd add = ops::QuantizedAdd(root.WithOpName("add"), x, y, x_min, x_max, y_min, y_max); TF_EXPECT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {add.z, add.min_z, add.max_z}, &outputs)); const Tensor& z_quantized = outputs[0]; const float z_min = outputs[1].flat<float>()(0); const float z_max = outputs[2].flat<float>()(0); Tensor z_float = QuantizedTensorToFloat<qint32>(z_quantized, z_min, z_max); Tensor expected_z_float(DT_FLOAT, TensorShape(expected_shape)); test::FillValues<float>(&expected_z_float, expected_values); test::ExpectTensorNear<float>(expected_z_float, z_float, tolerance); } void TestAddShape(const std::vector<int64_t>& x_shape, const std::vector<int64_t>& y_shape) { const size_t x_num_elements = TensorShape(x_shape).num_elements(); std::vector<float> x_values(x_num_elements); for (int i = 0; i < x_num_elements; ++i) { x_values[i] = i % 256; } const float x_min_value = 0.0f; const float x_max_value = 256.0f; const size_t y_num_elements = TensorShape(y_shape).num_elements(); std::vector<float> y_values(y_num_elements); for (int i = 0; i < y_num_elements; ++i) { y_values[i] = ((i + 23) % 123) - 50; } const float y_min_value = -150.0f; const float y_max_value = 150.0f; Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor)); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor)); Add add = Add(root.WithOpName("add"), x, y); TF_EXPECT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {add.z}, &outputs)); const Tensor& expected_values_tensor = outputs[0]; const float* expected_values_data = expected_values_tensor.flat<float>().data(); std::vector<float> expected_values( expected_values_data, expected_values_data + expected_values_tensor.NumElements()); std::vector<int64_t> expected_shape; for (const int64_t dim : expected_values_tensor.shape().dim_sizes()) { expected_shape.push_back(dim); } TestAdd(x_shape, x_values, x_min_value, x_max_value, y_shape, y_values, y_min_value, y_max_value, expected_shape, expected_values, 256.0); } void TimeAdd(const std::vector<int64_t>& x_shape, const std::vector<int64_t>& y_shape, int64_t iterations) { TestAddShape(x_shape, y_shape); Scope root = Scope::NewRootScope(); Tensor x_quantized_tensor(DT_QUINT8, TensorShape(x_shape)); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_QUINT8); Output x_min = Const(root.WithOpName("x_min"), 0.0f); Output x_max = Const(root.WithOpName("x_max"), 1.0f); Tensor y_quantized_tensor(DT_QUINT8, TensorShape(y_shape)); Output y = Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor)); Output y_min = Const(root.WithOpName("y_min"), 0.0f); Output y_max = Const(root.WithOpName("y_max"), 1.0f); ops::QuantizedAdd add = ops::QuantizedAdd(root.WithOpName("add"), placeholder, y, x_min, x_max, y_min, y_max); TF_EXPECT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; int64_t total_duration = 0; for (int i = 0; i < iterations; ++i) { const int64_t start_time = Env::Default()->NowMicros(); TF_EXPECT_OK(session.Run({{placeholder, x_quantized_tensor}}, {add.z, add.min_z, add.max_z}, &outputs)); const int64_t end_time = Env::Default()->NowMicros(); total_duration += end_time - start_time; } const int64_t one_run_duration = total_duration / iterations; const int64_t num_ops = outputs[0].NumElements(); const double million_ops_per_second = (iterations * num_ops) / static_cast<double>(total_duration); LOG(INFO) << "TimeAdd: " << TensorShape(x_shape).DebugString() << " * " << TensorShape(y_shape).DebugString() << ": iterations=" << iterations << ", MOps/s=" << million_ops_per_second << ", one_run_duration=" << one_run_duration << ", total_duration=" << total_duration; } void TestManualScalar() { TestAdd( {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {1}, {10.0f}, -100.0f, 100.0f, {10}, {11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 1.0f); TestAdd( {1}, {10.0f}, -100.0f, 100.0f, {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {10}, {11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 1.0f); } void TestScalar() { TestAddShape({100}, {1}); TestAddShape({1}, {100}); } void TestManualVector() { TestAdd({10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {10}, {2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f}, 1.0f); } void TestVector() { TestAddShape({100}, {100}); } void TestManualVectorPlusTensor() { TestAdd( {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {2, 10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 0.0f, 20.0f, {2, 10}, {2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f}, 1.0f); TestAdd({2, 10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 0.0f, 20.0f, {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {2, 10}, {2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f}, 1.0f); TestAdd( {5, 2}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {2, 5, 2}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 0.0f, 20.0f, {2, 5, 2}, {2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f}, 1.0f); } void TestVectorPlusTensor() { TestAddShape({100}, {2, 100}); TestAddShape({2, 100}, {100}); TestAddShape({5, 2}, {2, 5, 2}); } void BenchmarkTensorScalar() { TimeAdd({200}, {1}, 1000); TimeAdd({10000}, {1}, 100); TimeAdd({1000000}, {1}, 10); TimeAdd({10000000}, {1}, 1); } void BenchmarkVector() { TimeAdd({200}, {200}, 1000); TimeAdd({10000}, {10000}, 100); TimeAdd({1000000}, {1000000}, 10); TimeAdd({10000000}, {10000000}, 1); } void BenchmarkVectorPlusTensor() { TimeAdd({10, 20}, {20}, 100); TimeAdd({10, 1000}, {1000}, 10); TimeAdd({1000, 1000}, {1000}, 1); TimeAdd({10000, 1000}, {1000}, 1); TimeAdd({100, 100}, {100}, 10); TimeAdd({10000, 100}, {100}, 1); TimeAdd({100000, 100}, {100}, 1); } } } } #define RUN_TEST(t) \ TEST(QuantizedAddOpTest, t) { tensorflow::ops::t(); } RUN_TEST(TestManualScalar); RUN_TEST(TestManualVector); RUN_TEST(TestManualVectorPlusTensor); RUN_TEST(TestScalar); RUN_TEST(TestVector); RUN_TEST(TestVectorPlusTensor); #if defined(__ANDROID__) RUN_TEST(BenchmarkTensorScalar); RUN_TEST(BenchmarkVector); RUN_TEST(BenchmarkVectorPlusTensor); #endif int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_add_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_add_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d80d3d9d-c4e3-4b0a-ad8c-a61d2b22f6e4
cpp
tensorflow/tensorflow
functional_ops
tensorflow/core/ops/functional_ops.cc
tensorflow/core/ops/functional_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::InferenceContext; REGISTER_OP("SymbolicGradient") .Input("input: Tin") .Output("output: Tout") .Attr("Tin: list(type)") .Attr("Tout: list(type)") .Attr("f: func") .SetShapeFn([](InferenceContext* c) { if (c->num_inputs() < c->num_outputs()) { return errors::InvalidArgument("len(inputs) < len(outputs)"); } std::vector<DataType> types; TF_RETURN_IF_ERROR(c->GetAttr("Tin", &types)); for (int i = 0; i < c->num_outputs(); ++i) { if (types[i] == DT_RESOURCE) { const std::vector<shape_inference::ShapeAndType>* handle_type = c->input_handle_shapes_and_types(i); if (handle_type != nullptr) { c->set_output(i, handle_type->at(0).shape); } else { c->set_output(i, c->UnknownShape()); } } else { c->set_output(i, c->input(i)); } } return absl::OkStatus(); }); REGISTER_OP("RemoteCall") .Input("target: string") .Input("args: Tin") .Output("output: Tout") .Attr("Tin: list(type)") .Attr("Tout: list(type)") .Attr("f: func") .SetIsStateful() .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("_If") .Input("cond: Tcond") .Input("input: Tin") .Output("output: Tout") .Attr("Tcond: type") .Attr("Tin: list(type)") .Attr("Tout: list(type)") .Attr("then_branch: func") .Attr("else_branch: func") .SetIsStateful() .SetShapeFn(shape_inference::UnknownShape) .Doc(R"doc( output = cond ? then_branch(input) : else_branch(input) cond: A Tensor. If the tensor is a scalar of non-boolean type, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. If the tensor is not a scalar, being empty means False and being non-empty means True. input: A list of input tensors. then_branch: A function that takes 'inputs' and returns a list of tensors, whose types are the same as what else_branch returns. else_branch: A function that takes 'inputs' and returns a list of tensors. whose types are the same as what then_branch returns. )doc"); Status IfShapeInferenceFn(shape_inference::InferenceContext* c) { std::vector<PartialTensorShape> output_shapes; TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes)); if (output_shapes.empty()) return shape_inference::UnknownShape(c); if (output_shapes.size() != c->num_outputs()) { return errors::InvalidArgument( "`output_shapes` must be the same length as num outputs (", output_shapes.size(), " vs. ", c->num_outputs()); } for (size_t i = 0; i < output_shapes.size(); ++i) { shape_inference::ShapeHandle output_shape_handle; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape( output_shapes[i], &output_shape_handle)); c->set_output(static_cast<int>(i), output_shape_handle); } return absl::OkStatus(); } REGISTER_OP("StatelessIf") .Input("cond: Tcond") .Input("input: Tin") .Output("output: Tout") .Attr("Tcond: type") .Attr("Tin: list(type) >= 0") .Attr("Tout: list(type) >= 0") .Attr("then_branch: func") .Attr("else_branch: func") .Attr("output_shapes: list(shape) = []") .SetShapeFn(IfShapeInferenceFn); REGISTER_OP("If") .Input("cond: Tcond") .Input("input: Tin") .Output("output: Tout") .Attr("Tcond: type") .Attr("Tin: list(type) >= 0") .Attr("Tout: list(type) >= 0") .Attr("then_branch: func") .Attr("else_branch: func") .Attr("output_shapes: list(shape) = []") .SetIsStateful() .SetShapeFn(IfShapeInferenceFn); Status CaseShapeInferenceFn(shape_inference::InferenceContext* c) { std::vector<PartialTensorShape> output_shapes; TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes)); if (output_shapes.empty()) return shape_inference::UnknownShape(c); if (output_shapes.size() != c->num_outputs()) { return errors::InvalidArgument( "`output_shapes` must be the same length as num outputs (", output_shapes.size(), " vs. ", c->num_outputs()); } for (size_t i = 0; i < output_shapes.size(); ++i) { shape_inference::ShapeHandle output_shape_handle; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape( output_shapes[i], &output_shape_handle)); c->set_output(static_cast<int>(i), output_shape_handle); } return absl::OkStatus(); } REGISTER_OP("StatelessCase") .Input("branch_index: int32") .Input("input: Tin") .Output("output: Tout") .Attr("Tin: list(type) >= 0") .Attr("Tout: list(type) >= 0") .Attr("branches: list(func) >= 1") .Attr("output_shapes: list(shape) = []") .SetShapeFn(CaseShapeInferenceFn); REGISTER_OP("Case") .Input("branch_index: int32") .Input("input: Tin") .Output("output: Tout") .Attr("Tin: list(type) >= 0") .Attr("Tout: list(type) >= 0") .Attr("branches: list(func) >= 1") .Attr("output_shapes: list(shape) = []") .SetIsStateful() .SetShapeFn(CaseShapeInferenceFn); REGISTER_OP("_While") .Input("input: T") .Output("output: T") .Attr("T: list(type) >= 0") .Attr("cond: func") .Attr("body: func") .SetIsStateful() .SetShapeFn([](shape_inference::InferenceContext* c) { for (int i = 0; i < c->num_outputs(); ++i) { c->set_output(i, c->input(i)); } return absl::OkStatus(); }) .Doc(R"doc( output = input; While (Cond(output)) { output = Body(output) } input: A list of input tensors whose types are T. output: A list of output tensors whose types are T. cond: A function takes 'input' and returns a tensor. If the tensor is a scalar of non-boolean, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. If the tensor is not a scalar, non-emptiness means True and False otherwise. body: A function that takes a list of tensors and returns another list of tensors. Both lists have the same types as specified by T. )doc"); Status WhileShapeInferenceFn(shape_inference::InferenceContext* c) { std::vector<PartialTensorShape> output_shapes; TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes)); if (!output_shapes.empty()) { if (output_shapes.size() != c->num_outputs()) { return errors::InvalidArgument( "`output_shapes` must be the same length as num outputs (", output_shapes.size(), " vs. ", c->num_outputs()); } for (size_t i = 0; i < output_shapes.size(); ++i) { shape_inference::ShapeHandle output_shape_handle; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape( output_shapes[i], &output_shape_handle)); c->set_output(static_cast<int>(i), output_shape_handle); } } else { for (int i = 0; i < c->num_outputs(); ++i) { c->set_output(i, c->input(i)); } } return absl::OkStatus(); } REGISTER_OP("While") .Input("input: T") .Output("output: T") .Attr("T: list(type) >= 0") .Attr("cond: func") .Attr("body: func") .Attr("output_shapes: list(shape) = []") .Attr("parallel_iterations: int = 10") .SetIsStateful() .SetShapeFn(WhileShapeInferenceFn); REGISTER_OP("StatelessWhile") .Input("input: T") .Output("output: T") .Attr("T: list(type) >= 0") .Attr("cond: func") .Attr("body: func") .Attr("output_shapes: list(shape) = []") .Attr("parallel_iterations: int = 10") .SetShapeFn(WhileShapeInferenceFn); REGISTER_OP("ToBool") .Input("input: T") .Output("output: bool") .Attr("T: type") .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("For") .Input("start: int32") .Input("limit: int32") .Input("delta: int32") .Input("input: T") .Output("output: T") .Attr("T: list(type) >= 0") .Attr("body: func") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("PartitionedCall") .Input("args: Tin") .Output("output: Tout") .Attr("Tin: list(type) >= 0") .Attr("Tout: list(type) >= 0") .Attr("f: func") .Attr("config: string = ''") .Attr("config_proto: string = ''") .Attr("executor_type: string = ''") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("StatefulPartitionedCall") .Input("args: Tin") .Output("output: Tout") .Attr("Tin: list(type) >= 0") .Attr("Tout: list(type) >= 0") .Attr("f: func") .Attr("config: string = ''") .Attr("config_proto: string = ''") .Attr("executor_type: string = ''") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("FakeParam") .Output("output: dtype") .Attr("dtype: type") .Attr("shape: shape") .SetShapeFn([](InferenceContext* c) { PartialTensorShape shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape)); shape_inference::ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("DeviceIndex") .Output("index: int32") .Attr("device_names: list(string)") .SetShapeFn(shape_inference::ScalarShape) .SetDoNotOptimize(); }
#include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(FunctionalOpsTest, Arg_ShapeFn) { ShapeInferenceTestOp op("_Arg"); std::vector<DataType> out_type_list; out_type_list.emplace_back(DT_RESOURCE); TF_ASSERT_OK(NodeDefBuilder("test", "_Arg") .Attr("T", DataType::DT_RESOURCE) .Attr("index", 0) .Attr("_output_shapes", {TensorShape({5, 4})}) .Attr("_handle_shapes", {TensorShape({3, 7})}) .Attr("_handle_dtypes", {DataType::DT_FLOAT}) .Finalize(&op.node_def)); const OpRegistrationData* op_reg_data; TF_ASSERT_OK(OpRegistry::Global()->LookUp(op.name, &op_reg_data)); shape_inference::InferenceContext c( op.graph_def_version, op.node_def, op_reg_data->op_def, std::vector<shape_inference::ShapeHandle>{}, op.input_tensors, {}, {}); TF_ASSERT_OK(c.Run(op_reg_data->shape_inference_fn)); auto output = c.output(0); ASSERT_EQ(c.Value(c.Rank(output)), 2); EXPECT_EQ(c.Value(c.Dim(output, 0)), 5); EXPECT_EQ(c.Value(c.Dim(output, 1)), 4); auto outputs = c.output_handle_shapes_and_types(0); ASSERT_EQ(outputs->size(), 1); EXPECT_EQ(outputs->front().dtype, DataType::DT_FLOAT); EXPECT_EQ(c.Value(c.Dim(outputs->front().shape, 0)), 3); EXPECT_EQ(c.Value(c.Dim(outputs->front().shape, 1)), 7); } TEST(FunctionalOpsTest, SymbolicGradient_ShapeFn) { ShapeInferenceTestOp op("SymbolicGradient"); int num_inputs = 4; int num_outputs = 3; std::vector<NodeDefBuilder::NodeOut> src_list; std::vector<DataType> in_type_list; std::vector<DataType> out_type_list; for (int i = 0; i < num_inputs; ++i) { in_type_list.emplace_back(DT_FLOAT); src_list.emplace_back("a", 0, DT_FLOAT); } out_type_list.reserve(num_outputs); for (int i = 0; i < num_outputs; ++i) { out_type_list.emplace_back(DT_FLOAT); } TF_ASSERT_OK(NodeDefBuilder("test", "SymbolicGradient") .Input(src_list) .Attr("Tin", in_type_list) .Attr("Tout", out_type_list) .Finalize(&op.node_def)); INFER_OK(op, "?;?;?;?", "in0;in1;in2"); INFER_OK(op, "[];[2];?;?", "in0;in1;in2"); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/functional_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/functional_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b01051db-62dc-4b93-9e88-82ac8ddbf30d
cpp
tensorflow/tensorflow
concat_op
tensorflow/compiler/tf2xla/kernels/concat_op.cc
tensorflow/core/kernels/concat_op_test.cc
#include <cstdint> #include <limits> #include <vector> #include "tensorflow/compiler/tf2xla/kernels/shape_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal_util.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { class ConcatBaseOp : public XlaOpKernel { public: ConcatBaseOp(OpKernelConstruction* c, int64_t axis_index) : XlaOpKernel(c), axis_index_(axis_index) {} void Compile(XlaOpKernelContext* ctx) override { const TensorShape concat_dim_tensor_shape = ctx->InputShape(axis_index_); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(concat_dim_tensor_shape), errors::InvalidArgument( "Concat dim tensor should be a scalar, but got shape ", concat_dim_tensor_shape.DebugString())); int64_t concat_dim; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(axis_index_, &concat_dim)); std::vector<xla::XlaOp> values; std::vector<TensorShape> shapes; OP_REQUIRES_OK(ctx, ctx->InputList("values", &values, &shapes)); const int N = values.size(); const int input_dims = shapes[0].dims(); const TensorShape& input_shape = shapes[0]; int64_t axis = concat_dim < 0 ? concat_dim + input_dims : concat_dim; OP_REQUIRES(ctx, 0 <= axis && axis < input_dims, errors::InvalidArgument( "ConcatOp : Expected concatenating dimensions in the range " "[", -input_dims, ", ", input_dims, "), but got ", concat_dim)); std::vector<xla::XlaOp> input_data; int output_concat_dim = 0; for (int i = 0; i < N; ++i) { xla::XlaOp handle = values[i]; const TensorShape& in_shape = shapes[i]; OP_REQUIRES( ctx, in_shape.dims() == input_dims, errors::InvalidArgument( "ConcatOp : Ranks of all input tensors should match: shape[0] = ", input_shape.DebugString(), " vs. shape[", i, "] = ", in_shape.DebugString())); if (in_shape.dims() == 0) { input_data.push_back(xla::Reshape(handle, {1})); } else { input_data.push_back(handle); } output_concat_dim += in_shape.dims() > 0 ? in_shape.dim_size(axis) : 1; } VLOG(1) << "Concat dim " << concat_dim << " equivalent to " << axis; ctx->SetOutput(0, xla::ConcatInDim(ctx->builder(), input_data, axis)); } private: int axis_index_; }; class ConcatOp : public ConcatBaseOp { public: explicit ConcatOp(OpKernelConstruction* c) : ConcatBaseOp(c, 0) {} }; class ConcatV2Op : public ConcatBaseOp { public: explicit ConcatV2Op(OpKernelConstruction* c) : ConcatBaseOp(c, c->num_inputs() - 1) {} }; REGISTER_XLA_OP(Name("Concat").CompileTimeConstantInput("concat_dim"), ConcatOp); REGISTER_XLA_OP(Name("ConcatV2") .TypeConstraint("Tidx", {DT_INT32, DT_INT64}) .CompileTimeConstantInput("axis"), ConcatV2Op); class ConcatOffsetOp : public XlaOpKernel { public: explicit ConcatOffsetOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("shape_type", &shape_type_)); } void Compile(XlaOpKernelContext* ctx) override { const TensorShape concat_dim_shape = ctx->InputShape(0); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(concat_dim_shape), errors::InvalidArgument( "Concat dim tensor should be a scalar, but got shape ", concat_dim_shape.DebugString())); for (int i = 1; i < ctx->num_inputs(); ++i) { OP_REQUIRES(ctx, TensorShapeUtils::IsVector(ctx->InputShape(i)), errors::InvalidArgument("input ", i, " should be a vector, but got shape ", ctx->InputShape(i).DebugString())); } const int32_t N = ctx->num_inputs() - 1; const TensorShape inp0_shape = ctx->InputShape(1); std::vector<int64_t> inp0_dims; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector( 1, &inp0_dims, xla::ValueInferenceMode::kUpperBound)); const int64_t inp0_rank = inp0_shape.num_elements(); int64_t cdim; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(0, &cdim)); VLOG(1) << "ConcatOffset " << cdim << "," << inp0_rank; int32_t axis = cdim < 0 ? cdim + inp0_rank : cdim; OP_REQUIRES(ctx, FastBoundsCheck(axis, inp0_rank), errors::InvalidArgument("Concat dim is out of range: ", axis, " vs. ", inp0_rank)); int64_t offset = 0; for (int i = 0; i < N; ++i) { const TensorShape inp_shape = ctx->InputShape(1 + i); OP_REQUIRES(ctx, inp0_rank == inp_shape.num_elements(), errors::InvalidArgument("input ", i, " should contain ", inp0_rank, " elements, but got ", inp_shape.num_elements())); std::vector<int64_t> inp_dims; OP_REQUIRES_OK( ctx, ctx->ConstantInputAsIntVector( 1 + i, &inp_dims, xla::ValueInferenceMode::kUpperBound)); std::vector<int64_t> output_dims(inp0_rank); for (int64_t j = 0; j < inp0_rank; ++j) { if (j == axis) { output_dims[j] = offset; offset += inp_dims[j]; } else { const int64_t inp0_element = inp0_dims[j]; const int64_t inp_element = inp_dims[j]; OP_REQUIRES(ctx, inp0_element == inp_element, errors::InvalidArgument( "All dimensions except ", axis, " must match. Input ", i, " has shape [", absl::StrJoin(inp_dims, " "), "] and doesn't match input 0 with shape [", absl::StrJoin(inp0_dims, " "), "].")); output_dims[j] = 0; } } TensorShape out_shape; OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(output_dims, &out_shape)); Tensor out_constant(shape_type_, TensorShape({inp0_rank})); OP_REQUIRES_OK(ctx, TensorShapeToConstant(out_shape, &out_constant)); ctx->SetConstantOutput(i, out_constant); } } private: DataType shape_type_; }; REGISTER_XLA_OP(Name("ConcatOffset") .TypeConstraint("shape_type", {DT_INT32, DT_INT64}) .CompileTimeConstantInput("concat_dim") .CompileTimeConstantInput("shape"), ConcatOffsetOp); } }
#include <functional> #include <memory> #include <vector> #include "absl/base/prefetch.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { template <typename T> void FillTensorWithRandomValues(Tensor* t, int string_length, int64_t* bytes) { t->flat<T>().setRandom(); *bytes = t->flat<T>().size() * sizeof(T); } template <> void FillTensorWithRandomValues<tstring>(Tensor* t, int string_length, int64_t* bytes) { auto ts = t->flat<tstring>(); *bytes = 0; for (int i = 0; i < ts.size(); i++) { ts(i) = tstring(string_length, 'x'); *bytes += sizeof(ts(i)) + ts(i).size(); } } template <typename T> static void ConcatHelper(::testing::benchmark::State& state, int concat_dimension, int dim2, int string_length = 0) { Graph* g = new Graph(OpRegistry::Global()); DataType dt = DataTypeToEnum<T>::v(); const int kDim1 = 100; Tensor concat_dim(DT_INT32, TensorShape({})); concat_dim.scalar<int32>()() = concat_dimension; Tensor in0(dt, TensorShape({kDim1, dim2})); Tensor in1(dt, TensorShape({kDim1, dim2})); int64_t in0_bytes, in1_bytes; FillTensorWithRandomValues<T>(&in0, string_length, &in0_bytes); FillTensorWithRandomValues<T>(&in1, string_length, &in1_bytes); Node* node; TF_CHECK_OK( NodeBuilder(g->NewName("n"), "Concat") .Input(test::graph::Constant(g, concat_dim)) .Input({test::graph::Constant(g, in0), test::graph::Constant(g, in1)}) .Attr("N", 2) .Attr("T", dt) .Finalize(g, &node)); test::Benchmark("cpu", g, false).Run(state); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * (in0_bytes + in1_bytes)); } void BM_ConcatDim0Float(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<float>(state, 0, dim2); } void BM_ConcatDim1Float(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<float>(state, 1, dim2); } BENCHMARK(BM_ConcatDim0Float) ->UseRealTime() ->Arg(1000) ->Arg(100000) ->Arg(1000000); BENCHMARK(BM_ConcatDim1Float) ->UseRealTime() ->Arg(1000) ->Arg(100000) ->Arg(1000000); void BM_ConcatDim0String(::testing::benchmark::State& state) { const int dim2 = state.range(0); const int string_length = state.range(1); ConcatHelper<tstring>(state, 0, dim2, string_length); } BENCHMARK(BM_ConcatDim0String) ->UseRealTime() ->ArgPair(1, 16) ->ArgPair(1, 10000) ->ArgPair(100, 16); void BM_ConcatDim1uint8(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<uint8>(state, 1, dim2); } void BM_ConcatDim1int16(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<int16>(state, 1, dim2); } void BM_ConcatDim1bfloat16(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<bfloat16>(state, 1, dim2); } BENCHMARK(BM_ConcatDim1uint8) ->UseRealTime() ->Arg(1000) ->Arg(100000) ->Arg(1000000); BENCHMARK(BM_ConcatDim1int16) ->UseRealTime() ->Arg(1000) ->Arg(100000) ->Arg(1000000); BENCHMARK(BM_ConcatDim1bfloat16) ->UseRealTime() ->Arg(1000) ->Arg(100000) ->Arg(1000000); template <typename T> static void ConcatManyHelper(::testing::benchmark::State& state, int concat_dimension, int dim2) { Graph* g = new Graph(OpRegistry::Global()); DataType dt = DataTypeToEnum<T>::v(); const int kDim1 = 40000; const int kNumInputs = 64; Tensor concat_dim(DT_INT32, TensorShape({})); concat_dim.scalar<int32>()() = concat_dimension; std::vector<NodeBuilder::NodeOut> inputs; inputs.reserve(kNumInputs); for (int i = 0; i < kNumInputs; ++i) { Tensor in(dt, TensorShape({kDim1, dim2})); in.flat<T>().setRandom(); inputs.push_back(test::graph::Constant(g, in)); } Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Concat") .Input(test::graph::Constant(g, concat_dim)) .Input(inputs) .Attr("N", 64) .Attr("T", dt) .Finalize(g, &node)); test::Benchmark("cpu", g, false).Run(state); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim1 * dim2 * kNumInputs * sizeof(T)); } void BM_ConcatManyDim1bfloat16(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatManyHelper<bfloat16>(state, 1, dim2); } BENCHMARK(BM_ConcatManyDim1bfloat16)->UseRealTime()->Arg(18)->Arg(34)->Arg(60); void MemcpyAlternativeHelper(::testing::benchmark::State& state, int dim2) { const int kDim1 = 100; std::vector<float> data1(kDim1 * dim2, 1.0f); std::vector<float> data2(kDim1 * dim2, 2.0f); for (auto s : state) { const size_t n0 = data1.size(); const size_t n1 = data2.size(); float* result = new float[n0 + n1]; memcpy(&result[0], &data1[0], n0 * sizeof(float)); memcpy(&result[n0], &data2[0], n1 * sizeof(float)); delete[] result; } state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * ((kDim1 * dim2) + (kDim1 * dim2)) * sizeof(float)); } void BM_MemcpyAlternativeDim0(::testing::benchmark::State& state) { const int dim2 = state.range(0); MemcpyAlternativeHelper(state, dim2); } void BM_MemcpyAlternativeDim1(::testing::benchmark::State& state) { const int dim2 = state.range(0); MemcpyAlternativeHelper(state, dim2); } BENCHMARK(BM_MemcpyAlternativeDim0) ->UseRealTime() ->Arg(1000) ->Arg(100000) ->Arg(1000000); BENCHMARK(BM_MemcpyAlternativeDim1) ->UseRealTime() ->Arg(1000) ->Arg(100000) ->Arg(1000000); typedef Eigen::TensorMap<Eigen::Tensor<bfloat16, 1, Eigen::RowMajor>, Eigen::Unaligned> EigenMap; void MemcpyManyAlternative1(::testing::benchmark::State& state) { int dim2 = state.range(0); const int kDim1 = 40000; const int kNumCopies = 64; const int size = kDim1 * dim2 * kNumCopies; bfloat16* data = new bfloat16[size]; EigenMap map(data, size); map.setRandom(); for (auto s : state) { std::vector<bfloat16*> inputs(kNumCopies); for (int i = 0; i < kNumCopies; ++i) { inputs[i] = &data[i * kDim1 * dim2]; } bfloat16* result = new bfloat16[size]; for (int j = 0; j < kNumCopies; ++j) { bfloat16* output = &result[j * dim2]; for (int i = 0; i < kDim1; ++i) { if (i + 1 < kDim1) { absl::PrefetchToLocalCache(inputs[j] + dim2); } memcpy(output, inputs[j], dim2 * sizeof(bfloat16)); inputs[j] += dim2; output += dim2 * kNumCopies; } } delete[] result; } delete[] data; state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim1 * dim2 * kNumCopies * sizeof(bfloat16)); } void MemcpyManyAlternative2(::testing::benchmark::State& state) { int dim2 = state.range(0); const int kDim1 = 40000; const int kNumCopies = 64; const int size = kDim1 * dim2 * kNumCopies; bfloat16* data = new bfloat16[size]; EigenMap map(data, size); map.setRandom(); std::vector<bfloat16*> inputs(kNumCopies); for (auto s : state) { bfloat16* result = new bfloat16[size]; for (int i = 0; i < kNumCopies; ++i) { inputs[i] = &data[i * kDim1 * dim2]; } bfloat16* output = result; for (int i = 0; i < kDim1; ++i) { for (int j = 0; j < kNumCopies; ++j) { if (j + 1 < kNumCopies) { absl::PrefetchToLocalCache(inputs[j + 1]); } memcpy(output, inputs[j], dim2 * sizeof(bfloat16)); inputs[j] += dim2; output += dim2; } } delete[] result; } delete[] data; state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim1 * dim2 * kNumCopies * sizeof(bfloat16)); } BENCHMARK(MemcpyManyAlternative1) ->Arg(16) ->Arg(17) ->Arg(18) ->Arg(32) ->Arg(33) ->Arg(34) ->Arg(60) ->Arg(64) ->Arg(65); BENCHMARK(MemcpyManyAlternative2) ->Arg(16) ->Arg(17) ->Arg(18) ->Arg(32) ->Arg(33) ->Arg(34) ->Arg(60) ->Arg(64) ->Arg(65); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/concat_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/concat_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fd3e1b7e-aead-499d-bda0-cf8d606beb95
cpp
tensorflow/tensorflow
guarantee_const_op
tensorflow/core/kernels/guarantee_const_op.cc
tensorflow/core/kernels/guarantee_const_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { namespace { class GuaranteeConstOp : public OpKernel { public: explicit GuaranteeConstOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const DataType input_dtype = ctx->input_dtype(0); OP_REQUIRES(ctx, input_dtype != DT_RESOURCE, errors::InvalidArgument( "Input tensor cannot be a resource variable handle.")); const Tensor& input_tensor = ctx->input(0); Tensor* output = nullptr; if (!ctx->forward_input_to_output_with_shape(0, 0, input_tensor.shape(), &output)) { ctx->set_output(0, input_tensor); } } bool IsExpensive() override { return false; } }; REGISTER_KERNEL_BUILDER(Name("GuaranteeConst").Device(DEVICE_CPU), GuaranteeConstOp); } }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/variable_ops.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class GuaranteeConstOpTest : public OpsTestBase { protected: Status Init(DataType input_type) { TF_CHECK_OK(NodeDefBuilder("op", "GuaranteeConst") .Input(FakeInput(input_type)) .Finalize(node_def())); return InitOp(); } }; TEST_F(GuaranteeConstOpTest, Int32Success_6) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(GuaranteeConstOpTest, Int32Success_2_3) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2, 3})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(GuaranteeConstOpTest, StringSuccess) { TF_ASSERT_OK(Init(DT_STRING)); AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({6})); test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(GuaranteeConstOpTest, ResourceInputError) { TF_ASSERT_OK(Init(DT_RESOURCE)); AddResourceInput("", "resource", new Var(DT_INT32)); const auto status = RunOpKernel(); ASSERT_EQ(error::INVALID_ARGUMENT, status.code()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/guarantee_const_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/guarantee_const_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a98d044e-9cf9-4514-bd1e-db7503f03570
cpp
tensorflow/tensorflow
range_sampler
tensorflow/core/kernels/range_sampler.cc
tensorflow/core/kernels/range_sampler_test.cc
#include "tensorflow/core/kernels/range_sampler.h" #include <cmath> #include <unordered_set> #include <vector> #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/lib/io/inputbuffer.h" #include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { using gtl::ArraySlice; using gtl::MutableArraySlice; RangeSampler::~RangeSampler() {} void RangeSampler::SampleBatch(random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch) const { SampleBatchGetExpectedCount(rnd, unique, batch, absl::Span<float>(), absl::Span<const int64_t>(), absl::Span<float>()); } void RangeSampler::SampleBatchGetExpectedCount( random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch, absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras, absl::Span<float> extras_expected_count) const { SampleBatchGetExpectedCountAvoid(rnd, unique, batch, batch_expected_count, extras, extras_expected_count, absl::Span<const int64_t>()); } namespace { static float ExpectedCountHelper(float p, int batch_size, int num_tries) { if (num_tries == batch_size) { return p * batch_size; } return -std::expm1(num_tries * std::log1p(-p)); } } void RangeSampler::SampleBatchGetExpectedCountAvoid( random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch, absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras, absl::Span<float> extras_expected_count, absl::Span<const int64_t> avoided_values) const { const int batch_size = batch.size(); int num_tries; if (unique) { CHECK_LE(static_cast<int64_t>(batch_size + avoided_values.size()), range_); std::unordered_set<int64_t> used(batch_size); used.insert(avoided_values.begin(), avoided_values.end()); int num_picked = 0; num_tries = 0; while (num_picked < batch_size) { num_tries++; CHECK_LT(num_tries, kint32max); int64_t value = Sample(rnd); if (gtl::InsertIfNotPresent(&used, value)) { batch[num_picked++] = value; } } } else { CHECK_EQ(avoided_values.size(), size_t{0}) << "avoided_values only supported with unique=true"; for (int i = 0; i < batch_size; i++) { batch[i] = Sample(rnd); } num_tries = batch_size; } if (!batch_expected_count.empty()) { CHECK_EQ(batch_size, batch_expected_count.size()); for (int i = 0; i < batch_size; i++) { batch_expected_count[i] = ExpectedCountHelper(Probability(batch[i]), batch_size, num_tries); } } CHECK_EQ(extras.size(), extras_expected_count.size()); for (size_t i = 0; i < extras.size(); i++) { extras_expected_count[i] = ExpectedCountHelper(Probability(extras[i]), batch_size, num_tries); } } AllSampler::AllSampler(int64_t range) : RangeSampler(range) {} void AllSampler::SampleBatchGetExpectedCountAvoid( random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch, absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras, absl::Span<float> extras_expected_count, absl::Span<const int64_t> avoided_values) const { const int batch_size = batch.size(); CHECK_EQ(range_, batch_size); for (int i = 0; i < batch_size; i++) { batch[i] = i; } if (!batch_expected_count.empty()) { CHECK_EQ(batch_size, batch_expected_count.size()); for (int i = 0; i < batch_size; i++) { batch_expected_count[i] = 1; } } CHECK_EQ(size_t{0}, avoided_values.size()); CHECK_EQ(extras.size(), extras_expected_count.size()); for (size_t i = 0; i < extras.size(); i++) { extras_expected_count[i] = 1; } } UniformSampler::UniformSampler(int64_t range) : RangeSampler(range), inv_range_(1.0 / range) {} int64_t UniformSampler::Sample(random::SimplePhilox* rnd) const { return rnd->Uniform64(range_); } float UniformSampler::Probability(int64_t value) const { return inv_range_; } LogUniformSampler::LogUniformSampler(int64_t range) : RangeSampler(range), log_range_(log1p(range)) {} int64_t LogUniformSampler::Sample(random::SimplePhilox* rnd) const { const int64_t value = static_cast<int64_t>(exp(rnd->RandDouble() * log_range_)) - 1; DCHECK_GE(value, 0); return value % range_; } float LogUniformSampler::Probability(int64_t value) const { return (log((value + 2.0) / (value + 1.0))) / log_range_; } ThreadUnsafeUnigramSampler::ThreadUnsafeUnigramSampler(int64_t range) : RangeSampler(range), picker_(range) { CHECK_LT(range, kint32max); } int64_t ThreadUnsafeUnigramSampler::Sample(random::SimplePhilox* rnd) const { return picker_.Pick(rnd); } float ThreadUnsafeUnigramSampler::Probability(int64_t value) const { return static_cast<float>(picker_.get_weight(value)) / picker_.total_weight(); } void ThreadUnsafeUnigramSampler::Update(absl::Span<const int64_t> values) { int num_updates = std::min(static_cast<int>(values.size()), kint32max - picker_.total_weight()); for (int i = 0; i < num_updates; i++) { const int64_t value = values[i]; picker_.set_weight(value, picker_.get_weight(value) + 1); } } UnigramSampler::UnigramSampler(int64_t range) : RangeSampler(range), unsafe_sampler_(range) { CHECK_LT(range, kint32max); } int64_t UnigramSampler::Sample(random::SimplePhilox* rnd) const { tf_shared_lock lock(mu_); return unsafe_sampler_.Sample(rnd); } float UnigramSampler::Probability(int64_t value) const { tf_shared_lock lock(mu_); return unsafe_sampler_.Probability(value); } void UnigramSampler::SampleBatchGetExpectedCountAvoid( random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch, absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras, absl::Span<float> extras_expected_count, absl::Span<const int64_t> avoided_values) const { tf_shared_lock lock(mu_); unsafe_sampler_.SampleBatchGetExpectedCountAvoid( rnd, unique, batch, batch_expected_count, extras, extras_expected_count, avoided_values); } void UnigramSampler::Update(absl::Span<const int64_t> values) { mutex_lock lock(mu_); unsafe_sampler_.Update(values); } FixedUnigramSampler::FixedUnigramSampler(int64_t range, float distortion, int32_t num_reserved_ids, int32_t num_shards, int32_t shard) : RangeSampler(range), total_weight_(0.0), num_shards_(num_shards), shard_(shard), distortion_(distortion) { FillReservedIds(num_reserved_ids); } Status FixedUnigramSampler::SetDistributionSampler(Env* env, const string& vocab_file) { TF_RETURN_IF_ERROR(LoadFromFile(env, vocab_file, distortion_)); if (!TF_PREDICT_TRUE(FixedUnigramSampler::range() == weights_.size())) return (errors::InvalidArgument("range is ", FixedUnigramSampler::range(), " must be equal to weights size ", weights_.size())); dist_sampler_.reset(new random::DistributionSampler(weights_)); return absl::OkStatus(); } Status FixedUnigramSampler::SetDistributionSampler( const std::vector<float>& unigrams) { LoadFromUnigrams(unigrams, distortion_); if (!TF_PREDICT_TRUE(FixedUnigramSampler::range() == weights_.size())) return (errors::InvalidArgument("range is ", FixedUnigramSampler::range(), " must be equal to weights size ", weights_.size())); dist_sampler_.reset(new random::DistributionSampler(weights_)); return absl::OkStatus(); } float FixedUnigramSampler::Probability(int64_t value) const { if (value < 0 || static_cast<size_t>(value) >= weights_.size()) { return 0.0; } return weights_.at(value) / total_weight_; } int64_t FixedUnigramSampler::Sample(random::SimplePhilox* rnd) const { return dist_sampler_->Sample(rnd); } void FixedUnigramSampler::FillReservedIds(int32_t num_reserved_ids) { for (int32_t word_id = 0; word_id < num_reserved_ids; ++word_id) { if (word_id % num_shards_ == shard_) weights_.push_back(0.0); } } Status FixedUnigramSampler::LoadFromFile(Env* env, const string& vocab_file, float distortion) { std::unique_ptr<RandomAccessFile> file; TF_RETURN_IF_ERROR(env->NewRandomAccessFile(vocab_file, &file)); io::InputBuffer in(file.get(), 262144 ); string line; int32_t word_id = weights_.size(); while (in.ReadLine(&line).ok()) { std::vector<string> cols = str_util::Split(line, ','); if (cols.empty()) continue; if (word_id % num_shards_ == shard_) { float w = 0.0; if (!strings::safe_strtof(cols.at(cols.size() - 1), &w)) { return errors::InvalidArgument("Wrong vocabulary format at line: ", line); } w = std::pow(w, distortion); total_weight_ += w; weights_.push_back(w); } ++word_id; } return absl::OkStatus(); } void FixedUnigramSampler::LoadFromUnigrams(const std::vector<float>& unigrams, float distortion) { int32_t word_id = weights_.size(); for (float w : unigrams) { if (word_id % num_shards_ == shard_) { w = std::pow(w, distortion); total_weight_ += w; weights_.push_back(w); } ++word_id; } } }
#include "tensorflow/core/kernels/range_sampler.h" #include <vector> #include "absl/status/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { using gtl::ArraySlice; using gtl::MutableArraySlice; class RangeSamplerTest : public ::testing::Test { protected: void CheckProbabilitiesSumToOne() { double sum = 0; for (int i = 0; i < sampler_->range(); i++) { sum += sampler_->Probability(i); } EXPECT_NEAR(sum, 1.0, 1e-4); } void CheckHistogram(int num_samples, float tolerance) { const int range = sampler_->range(); std::vector<int> h(range); std::vector<int64_t> a(num_samples); random::PhiloxRandom philox(123, 17); random::SimplePhilox rnd(&philox); sampler_->SampleBatch(&rnd, false, absl::MakeSpan(a)); for (int i = 0; i < num_samples; i++) { int64_t val = a[i]; ASSERT_GE(val, 0); ASSERT_LT(val, range); h[val]++; } for (int val = 0; val < range; val++) { EXPECT_NEAR((h[val] + 0.0) / num_samples, sampler_->Probability(val), tolerance); } } void Update1() { std::vector<int64_t> a(10); for (int i = 0; i < 10; i++) { a[i] = 3; } sampler_->Update(a); } void Update2() { int64_t a[10]; for (int i = 0; i < 10; i++) { a[i] = i; } for (int64_t i = 1; i < 10; i++) { sampler_->Update(absl::Span<const int64_t>(a + i, 10 - i)); } } std::unique_ptr<RangeSampler> sampler_; }; TEST_F(RangeSamplerTest, UniformProbabilities) { sampler_.reset(new UniformSampler(10)); for (int i = 0; i < 10; i++) { CHECK_EQ(sampler_->Probability(i), sampler_->Probability(0)); } } TEST_F(RangeSamplerTest, UniformChecksum) { sampler_.reset(new UniformSampler(10)); CheckProbabilitiesSumToOne(); } TEST_F(RangeSamplerTest, UniformHistogram) { sampler_.reset(new UniformSampler(10)); CheckHistogram(1000, 0.05); } TEST_F(RangeSamplerTest, LogUniformProbabilities) { int range = 1000000; sampler_.reset(new LogUniformSampler(range)); for (int i = 100; i < range; i *= 2) { float ratio = sampler_->Probability(i) / sampler_->Probability(i / 2); EXPECT_NEAR(ratio, 0.5, 0.1); } } TEST_F(RangeSamplerTest, LogUniformChecksum) { sampler_.reset(new LogUniformSampler(10)); CheckProbabilitiesSumToOne(); } TEST_F(RangeSamplerTest, LogUniformHistogram) { sampler_.reset(new LogUniformSampler(10)); CheckHistogram(1000, 0.05); } TEST_F(RangeSamplerTest, UnigramProbabilities1) { sampler_.reset(new UnigramSampler(10)); Update1(); EXPECT_NEAR(sampler_->Probability(3), 0.55, 1e-4); for (int i = 0; i < 10; i++) { if (i != 3) { ASSERT_NEAR(sampler_->Probability(i), 0.05, 1e-4); } } } TEST_F(RangeSamplerTest, UnigramProbabilities2) { sampler_.reset(new UnigramSampler(10)); Update2(); for (int i = 0; i < 10; i++) { ASSERT_NEAR(sampler_->Probability(i), (i + 1) / 55.0, 1e-4); } } TEST_F(RangeSamplerTest, UnigramChecksum) { sampler_.reset(new UnigramSampler(10)); Update1(); CheckProbabilitiesSumToOne(); } TEST_F(RangeSamplerTest, UnigramHistogram) { sampler_.reset(new UnigramSampler(10)); Update1(); CheckHistogram(1000, 0.05); } static const char kVocabContent[] = "w1,1\n" "w2,2\n" "w3,4\n" "w4,8\n" "w5,16\n" "w6,32\n" "w7,64\n" "w8,128\n" "w9,256"; TEST_F(RangeSamplerTest, FixedUnigramProbabilities) { Env* env = Env::Default(); string fname = io::JoinPath(testing::TmpDir(), "vocab_file"); TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent)); FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname)); sampler_.reset(test_sampler); for (int i = 0; i < 9; i++) { ASSERT_NEAR(sampler_->Probability(i), pow(2, i * 0.8) / 197.05, 1e-4); } } TEST_F(RangeSamplerTest, FixedUnigramNoExistingFilename) { Env* env = Env::Default(); string fname = "NoExistingFile"; FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0); Status s = test_sampler->SetDistributionSampler(env, fname); sampler_.reset(test_sampler); EXPECT_TRUE(absl::IsNotFound(s)) << s; } TEST_F(RangeSamplerTest, FixedUnigramNoMatchingRangeWeights) { Env* env = Env::Default(); string fname = io::JoinPath(testing::TmpDir(), "vocab_file"); TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent)); FixedUnigramSampler* test_sampler = new FixedUnigramSampler(8, 0.8, 0, 1, 0); Status s = test_sampler->SetDistributionSampler(env, fname); sampler_.reset(test_sampler); EXPECT_TRUE(absl::IsInvalidArgument(s)) << s; } TEST_F(RangeSamplerTest, FixedUnigramChecksum) { Env* env = Env::Default(); string fname = io::JoinPath(testing::TmpDir(), "vocab_file"); TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent)); FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname)); sampler_.reset(test_sampler); CheckProbabilitiesSumToOne(); } TEST_F(RangeSamplerTest, FixedUnigramHistogram) { Env* env = Env::Default(); string fname = io::JoinPath(testing::TmpDir(), "vocab_file"); TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent)); FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname)); sampler_.reset(test_sampler); CheckHistogram(1000, 0.05); } TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve1) { Env* env = Env::Default(); string fname = io::JoinPath(testing::TmpDir(), "vocab_file"); TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent)); FixedUnigramSampler* test_sampler = new FixedUnigramSampler(10, 0.8, 1, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname)); sampler_.reset(test_sampler); ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4); for (int i = 1; i < 10; i++) { ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 1) * 0.8) / 197.05, 1e-4); } } TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve2) { Env* env = Env::Default(); string fname = io::JoinPath(testing::TmpDir(), "vocab_file"); TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent)); FixedUnigramSampler* test_sampler = new FixedUnigramSampler(11, 0.8, 2, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname)); sampler_.reset(test_sampler); ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4); ASSERT_NEAR(sampler_->Probability(1), 0, 1e-4); for (int i = 2; i < 11; i++) { ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 2) * 0.8) / 197.05, 1e-4); } } TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesFromVector) { std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256}; FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(weights)); sampler_.reset(test_sampler); for (int i = 0; i < 9; i++) { ASSERT_NEAR(sampler_->Probability(i), pow(2, i * 0.8) / 197.05, 1e-4); } } TEST_F(RangeSamplerTest, FixedUnigramChecksumFromVector) { std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256}; FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(weights)); sampler_.reset(test_sampler); CheckProbabilitiesSumToOne(); } TEST_F(RangeSamplerTest, FixedUnigramHistogramFromVector) { std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256}; FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(weights)); sampler_.reset(test_sampler); CheckHistogram(1000, 0.05); } TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve1FromVector) { std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256}; FixedUnigramSampler* test_sampler = new FixedUnigramSampler(10, 0.8, 1, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(weights)); sampler_.reset(test_sampler); ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4); for (int i = 1; i < 10; i++) { ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 1) * 0.8) / 197.05, 1e-4); } } TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve2FromVector) { std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256}; FixedUnigramSampler* test_sampler = new FixedUnigramSampler(11, 0.8, 2, 1, 0); TF_CHECK_OK(test_sampler->SetDistributionSampler(weights)); sampler_.reset(test_sampler); ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4); ASSERT_NEAR(sampler_->Probability(1), 0, 1e-4); for (int i = 2; i < 11; i++) { ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 2) * 0.8) / 197.05, 1e-4); } } TEST_F(RangeSamplerTest, All) { int batch_size = 10; sampler_.reset(new AllSampler(10)); std::vector<int64_t> batch(batch_size); std::vector<float> batch_expected(batch_size); std::vector<int64_t> extras(2); std::vector<float> extras_expected(2); extras[0] = 0; extras[1] = batch_size - 1; sampler_->SampleBatchGetExpectedCount(nullptr, false, absl::MakeSpan(batch), absl::MakeSpan(batch_expected), extras, absl::MakeSpan(extras_expected)); for (int i = 0; i < batch_size; i++) { EXPECT_EQ(i, batch[i]); EXPECT_EQ(1, batch_expected[i]); } EXPECT_EQ(1, extras_expected[0]); EXPECT_EQ(1, extras_expected[1]); } TEST_F(RangeSamplerTest, Unique) { random::PhiloxRandom philox(123, 17); random::SimplePhilox rnd(&philox); const int range = 100; const int batch_size = 50; const int num_batches = 100; sampler_.reset(new LogUniformSampler(range)); std::vector<int> histogram(range); std::vector<int64_t> batch(batch_size); std::vector<int64_t> all_values(range); for (int i = 0; i < range; i++) { all_values[i] = i; } std::vector<float> expected(range); sampler_->SampleBatchGetExpectedCount(&rnd, true, absl::MakeSpan(batch), absl::Span<float>(), all_values, absl::MakeSpan(expected)); std::set<int64_t> s(batch.begin(), batch.end()); CHECK_EQ(batch_size, s.size()); for (int trial = 0; trial < num_batches; trial++) { std::vector<float> trial_expected(range); sampler_->SampleBatchGetExpectedCount(&rnd, true, absl::MakeSpan(batch), absl::Span<float>(), all_values, absl::MakeSpan(trial_expected)); for (int i = 0; i < range; i++) { EXPECT_NEAR(expected[i], trial_expected[i], expected[i] * 0.5); } for (int i = 0; i < batch_size; i++) { histogram[batch[i]]++; } } for (int i = 0; i < range; i++) { const float average_count = static_cast<float>(histogram[i]) / num_batches; EXPECT_NEAR(expected[i], average_count, 0.2); } } TEST_F(RangeSamplerTest, Avoid) { random::PhiloxRandom philox(123, 17); random::SimplePhilox rnd(&philox); sampler_.reset(new LogUniformSampler(100)); std::vector<int64_t> avoided(2); avoided[0] = 17; avoided[1] = 23; std::vector<int64_t> batch(98); sampler_->SampleBatchGetExpectedCountAvoid( &rnd, true, absl::MakeSpan(batch), absl::Span<float>(), absl::Span<const int64_t>(), absl::Span<float>(), avoided); int sum = 0; for (auto val : batch) { sum += val; } const int expected_sum = 100 * 99 / 2 - avoided[0] - avoided[1]; EXPECT_EQ(expected_sum, sum); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/range_sampler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/range_sampler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b6e96f8c-48b7-4a54-83c3-09a699070b82
cpp
tensorflow/tensorflow
identity_op
tensorflow/compiler/tf2xla/kernels/identity_op.cc
tensorflow/core/kernels/identity_op_test.cc
#include "absl/log/check.h" #include "tensorflow/compiler/tf2xla/kernels/tensor_list_utils.h" #include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.pb.h" namespace tensorflow { namespace { class IdentityOp : public XlaOpKernel { public: explicit IdentityOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* ctx) override { for (int i = 0; i < ctx->num_inputs(); ++i) { if (IsTensorListInput(ctx, i)) { ctx->SetTensorListOutput(i, ctx->Input(i)); } else { DCHECK(ctx->input_type(i) != DT_VARIANT); ctx->op_kernel_context()->set_output( i, ctx->op_kernel_context()->input(i)); } } } private: IdentityOp(const IdentityOp&) = delete; void operator=(const IdentityOp&) = delete; }; REGISTER_XLA_OP( Name("Identity").AllowResourceTypes().AllowVariantTypes().CompilationOnly(), IdentityOp); REGISTER_XLA_OP(Name("IdentityN") .AllowResourceTypes() .AllowVariantTypes() .CompilationOnly(), IdentityOp); REGISTER_XLA_OP(Name("PlaceholderWithDefault"), IdentityOp); REGISTER_XLA_OP(Name("PreventGradient"), MlirXlaOpKernel); REGISTER_XLA_OP(Name("StopGradient").AllowVariantTypes(), IdentityOp); REGISTER_XLA_OP(Name("Snapshot"), IdentityOp); REGISTER_XLA_OP(Name("_EagerConst"), IdentityOp); } }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class IdentityOpTest : public OpsTestBase { protected: Status Init(DataType input_type) { TF_CHECK_OK(NodeDefBuilder("op", "Identity") .Input(FakeInput(input_type)) .Finalize(node_def())); return InitOp(); } }; TEST_F(IdentityOpTest, Int32Success_6) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(IdentityOpTest, Int32Success_2_3) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2, 3})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(IdentityOpTest, StringSuccess) { TF_ASSERT_OK(Init(DT_STRING)); AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({6})); test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(IdentityOpTest, RefInputError) { TF_ASSERT_OK(Init(DT_INT32_REF)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/identity_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/identity_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
317537c2-d79f-423f-8307-3068349c7227
cpp
tensorflow/tensorflow
restore_op
tensorflow/core/kernels/restore_op.cc
tensorflow/core/kernels/restore_op_test.cc
#include "tensorflow/core/kernels/save_restore_tensor.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/util/tensor_slice_reader.h" namespace tensorflow { class RestoreOp : public OpKernel { public: explicit RestoreOp(OpKernelConstruction* context) : OpKernel(context) { int preferred_shard; OP_REQUIRES_OK(context, context->GetAttr("preferred_shard", &preferred_shard)); if (preferred_shard == -1) { preferred_shard_ = checkpoint::TensorSliceReader::kLoadAllShards; } else { OP_REQUIRES(context, preferred_shard >= 0, errors::InvalidArgument("Attribute 'preferred_shard' must be " "greater or equal to -1")); preferred_shard_ = preferred_shard; } } void Compute(OpKernelContext* context) override { RestoreTensor(context, &checkpoint::OpenTableTensorSliceReader, preferred_shard_, false, 0); } private: int preferred_shard_; }; REGISTER_KERNEL_BUILDER(Name("Restore").Device(DEVICE_CPU), RestoreOp); class RestoreSliceOp : public OpKernel { public: explicit RestoreSliceOp(OpKernelConstruction* context) : OpKernel(context) { int preferred_shard; OP_REQUIRES_OK(context, context->GetAttr("preferred_shard", &preferred_shard)); if (preferred_shard == -1) { preferred_shard_ = checkpoint::TensorSliceReader::kLoadAllShards; } else { OP_REQUIRES(context, preferred_shard >= 0, errors::InvalidArgument("Attribute 'preferred_shard' must be " "greater or equal to -1")); preferred_shard_ = preferred_shard; } } void Compute(OpKernelContext* context) override { RestoreTensor(context, &checkpoint::OpenTableTensorSliceReader, preferred_shard_, true, 0); } private: int preferred_shard_; }; REGISTER_KERNEL_BUILDER(Name("RestoreSlice").Device(DEVICE_CPU), RestoreSliceOp); }
#include <functional> #include <memory> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/tensor_slice_reader_cache.h" namespace tensorflow { namespace { class RestoreOpTest : public OpsTestBase { protected: void MakeRestoreOp(DataType dt) { TF_ASSERT_OK(NodeDefBuilder("myop", "Restore") .Input(FakeInput()) .Input(FakeInput()) .Attr("dt", dt) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; template <typename T> Tensor MakeInput(const TensorShape& shape, std::function<T(int)> input_mapping) { Tensor input(DataTypeToEnum<T>::v(), shape); test::FillFn(&input, input_mapping); return input; } TEST_F(RestoreOpTest, RestoreSimple) { const string filename = io::JoinPath(testing::TmpDir(), "tensor_simple"); const std::vector<string> tensor_names = { "tensor_bool", "tensor_int", "tensor_float", "tensor_double", "tensor_qint8", "tensor_qint32", "tensor_uint8", "tensor_int8", "tensor_int16", "tensor_int64", "tensor_string", "tensor_complex64", "tensor_half", "tensor_float_empty"}; { NodeDef save; TF_ASSERT_OK( NodeDefBuilder("myop", "Save") .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE, DT_QINT8, DT_QINT32, DT_UINT8, DT_INT8, DT_INT16, DT_STRING, DT_COMPLEX64, DT_HALF})) .Finalize(&save)); std::unique_ptr<Device> device( DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0")); absl::InlinedVector<TensorValue, 4> inputs; Status status; std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(), save, TF_GRAPH_DEF_VERSION, &status)); TF_EXPECT_OK(status); Tensor input_0(DT_STRING, TensorShape({})); input_0.scalar<tstring>()() = filename; inputs.push_back({nullptr, &input_0}); Tensor input_1 = MakeInput<tstring>( TensorShape({static_cast<int>(tensor_names.size())}), [&tensor_names](int x) -> string { return tensor_names[x]; }); inputs.push_back({nullptr, &input_1}); Tensor input_2 = MakeInput<bool>(TensorShape({2}), [](int x) -> bool { return x != 0; }); inputs.push_back({nullptr, &input_2}); Tensor input_3 = MakeInput<int32>(TensorShape({10}), [](int x) -> int32 { return x + 1; }); inputs.push_back({nullptr, &input_3}); Tensor input_4 = MakeInput<float>(TensorShape({2, 4}), [](int x) -> float { return static_cast<float>(x) / 10; }); inputs.push_back({nullptr, &input_4}); Tensor input_5 = MakeInput<double>( TensorShape({2, 4}), [](int x) -> double { return static_cast<double>(x) / 20; }); inputs.push_back({nullptr, &input_5}); Tensor input_6 = MakeInput<qint8>(TensorShape({3, 2}), [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); inputs.push_back({nullptr, &input_6}); Tensor input_7 = MakeInput<qint32>(TensorShape({2, 3}), [](int x) -> qint32 { return *reinterpret_cast<qint32*>(&x) * qint8(2); }); inputs.push_back({nullptr, &input_7}); Tensor input_8 = MakeInput<uint8>(TensorShape({11}), [](int x) -> uint8 { return x + 1; }); inputs.push_back({nullptr, &input_8}); Tensor input_9 = MakeInput<int8>(TensorShape({7}), [](int x) -> int8 { return x - 7; }); inputs.push_back({nullptr, &input_9}); Tensor input_10 = MakeInput<int16>(TensorShape({7}), [](int x) -> int16 { return x - 8; }); inputs.push_back({nullptr, &input_10}); Tensor input_11 = MakeInput<int64_t>(TensorShape({9}), [](int x) -> int64 { return x - 9; }); inputs.push_back({nullptr, &input_11}); Tensor input_12 = MakeInput<tstring>( TensorShape({2}), [](int x) -> string { return x ? "yes" : "no"; }); inputs.push_back({nullptr, &input_12}); Tensor input_13 = MakeInput<complex64>( TensorShape({2, 3}), [](int x) -> complex64 { return complex64(100 + x, 200 + x); }); inputs.push_back({nullptr, &input_13}); Tensor input_14 = MakeInput<Eigen::half>(TensorShape({2, 4}), [](int x) -> Eigen::half { return static_cast<Eigen::half>(x) / Eigen::half(5); }); inputs.push_back({nullptr, &input_14}); Tensor input_15 = MakeInput<float>(TensorShape({2, 0}), [](int x) -> float { return static_cast<float>(x) / 10; }); inputs.push_back({nullptr, &input_15}); OpKernelContext::Params params; params.device = device.get(); params.frame_iter = FrameAndIter(0, 0); params.inputs = inputs; params.op_kernel = op.get(); std::vector<AllocatorAttributes> attrs; test::SetOutputAttrs(&params, &attrs); checkpoint::TensorSliceReaderCacheWrapper slice_reader_cache_wrapper; params.slice_reader_cache = &slice_reader_cache_wrapper; OpKernelContext ctx(&params); op->Compute(&ctx); TF_EXPECT_OK(ctx.status()); } { MakeRestoreOp(DT_BOOL); AddInput<tstring>(TensorShape({}), [&filename](int x) -> tstring { return filename; }); AddInput<tstring>(TensorShape({}), [&](int x) -> tstring { return tensor_names[0]; }); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 2; ++i) { EXPECT_EQ(i != 0, output->flat<bool>()(i)); } } { MakeRestoreOp(DT_INT32); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[1]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({10}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 10; ++i) { EXPECT_EQ(i + 1, output->flat<int32>()(i)); } } { MakeRestoreOp(DT_FLOAT); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[2]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2, 4}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 8; ++i) { EXPECT_EQ(static_cast<float>(i) / 10, output->flat<float>()(i)); } } { MakeRestoreOp(DT_DOUBLE); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[3]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2, 4}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 8; ++i) { EXPECT_EQ(static_cast<double>(i) / 20, output->flat<double>()(i)); } } { MakeRestoreOp(DT_QINT8); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[4]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({3, 2}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 6; ++i) { EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i)); } } { MakeRestoreOp(DT_QINT32); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[5]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2, 3}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 6; ++i) { EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), output->flat<qint32>()(i)); } } { MakeRestoreOp(DT_UINT8); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[6]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({11}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 11; ++i) { EXPECT_EQ(i + 1, output->flat<uint8>()(i)); } } { MakeRestoreOp(DT_INT8); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[7]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({7}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 7; ++i) { EXPECT_EQ(i - 7, output->flat<int8>()(i)); } } { MakeRestoreOp(DT_INT16); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[8]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({7}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 7; ++i) { EXPECT_EQ(i - 8, output->flat<int16>()(i)); } } { MakeRestoreOp(DT_INT64); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[9]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({9}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 9; ++i) { EXPECT_EQ(i - 9, output->flat<int64_t>()(i)); } } { MakeRestoreOp(DT_STRING); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[10]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2}); EXPECT_TRUE(output->shape().IsSameSize(expected)); EXPECT_EQ("no", output->flat<tstring>()(0)); EXPECT_EQ("yes", output->flat<tstring>()(1)); } { MakeRestoreOp(DT_COMPLEX64); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[11]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2, 3}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 6; ++i) { EXPECT_EQ(complex64(100 + i, 200 + i), output->flat<complex64>()(i)); } } { MakeRestoreOp(DT_HALF); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[12]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2, 4}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int i = 0; i < 8; ++i) { EXPECT_EQ(static_cast<Eigen::half>(i) / Eigen::half(5), output->flat<Eigen::half>()(i)); } } { MakeRestoreOp(DT_FLOAT); (*mutable_input(1).tensor).scalar<tstring>()() = tensor_names[13]; TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2, 0}); EXPECT_TRUE(output->shape().IsSameSize(expected)); } } class RestoreSliceOpTest : public OpsTestBase { protected: void MakeRestoreSliceOp(DataType dt) { TF_ASSERT_OK(NodeDefBuilder("myop", "RestoreSlice") .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput()) .Attr("dt", dt) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(RestoreSliceOpTest, RestoreInt) { const string filename = io::JoinPath(testing::TmpDir(), "tensor_int"); const string tensor_name = "tensor_int"; { NodeDef save; TF_ASSERT_OK(NodeDefBuilder("save", "Save") .Input(FakeInput(DT_STRING)) .Input(FakeInput(DT_STRING)) .Input(FakeInput({DT_INT32})) .Finalize(&save)); std::unique_ptr<Device> device( DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0")); absl::InlinedVector<TensorValue, 4> inputs; Status status; std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(), save, TF_GRAPH_DEF_VERSION, &status)); TF_EXPECT_OK(status); Tensor input_0(DT_STRING, TensorShape({})); input_0.scalar<tstring>()() = filename; inputs.push_back({nullptr, &input_0}); Tensor input_1(DT_STRING, TensorShape({})); input_1.scalar<tstring>()() = tensor_name; inputs.push_back({nullptr, &input_1}); Tensor input_2(DT_INT32, TensorShape({4, 16})); for (int64_t i = 0; i < input_2.NumElements(); ++i) { input_2.flat<int32>()(i) = i + 1; } inputs.push_back({nullptr, &input_2}); OpKernelContext::Params params; params.device = device.get(); params.frame_iter = FrameAndIter(0, 0); params.inputs = inputs; params.op_kernel = op.get(); std::vector<AllocatorAttributes> attrs; test::SetOutputAttrs(&params, &attrs); checkpoint::TensorSliceReaderCacheWrapper slice_reader_cache_wrapper; params.slice_reader_cache = &slice_reader_cache_wrapper; OpKernelContext ctx(&params); op->Compute(&ctx); TF_EXPECT_OK(ctx.status()); } MakeRestoreSliceOp(DT_INT32); string shape_and_slice = "4 16 0,2:-"; AddInput<tstring>(TensorShape({}), [&filename](int x) -> tstring { return filename; }); AddInput<tstring>(TensorShape({}), [&tensor_name](int x) -> tstring { return tensor_name; }); AddInput<tstring>(TensorShape({}), [&shape_and_slice](int x) -> tstring { return shape_and_slice; }); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); TensorShape expected({2, 16}); EXPECT_TRUE(output->shape().IsSameSize(expected)); for (int64_t i = 0; i < expected.num_elements(); ++i) { EXPECT_EQ(i + 1, output->flat<int32>()(i)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/restore_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/restore_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
411d864e-9c80-468d-8190-fd508633bd99
cpp
tensorflow/tensorflow
quantization_utils
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantization_utils_test.cc
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h" #include <algorithm> #include <cmath> #include <cstdint> #include <cstdlib> #include <functional> #include <iterator> #include <limits> #include <memory> #include <numeric> #include <string> #include <vector> #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/Quant/IR/QuantTypes.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OpDefinition.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h" #include "tensorflow/compiler/mlir/lite/quantization/ir/QuantizeUtils.h" #include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/portable_tensor_utils.h" #include "tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.h" #include "tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h" #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h" #include "tensorflow/compiler/mlir/tools/optimize/quantization_utils.h" namespace mlir { #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_interface.cc.inc" namespace quant { namespace { constexpr double kSmallestHalfRange = kNearZeroTolerance / 2; using QType = quant::QuantizedType; template <typename T> bool BroadcastVector(int target_size, SmallVectorImpl<T>& data) { const int size = data.size(); if (size != target_size) { if (target_size % size != 0) return true; data.reserve(target_size); for (int i = 1; i < target_size / size; ++i) { data.insert(data.end(), data.begin(), data.begin() + size); } } return false; } void ExpandVerySmallRange(const ArrayRef<double> mins, const ArrayRef<double> maxs, SmallVectorImpl<double>& effective_mins, SmallVectorImpl<double>& effective_maxs) { for (const auto [min, max] : llvm::zip(mins, maxs)) { if (max - min > kNearZeroTolerance) { effective_mins.push_back(min); effective_maxs.push_back(max); } else { effective_mins.push_back(std::min(min, -kSmallestHalfRange)); effective_maxs.push_back(std::max(max, kSmallestHalfRange)); } } } QuantizedType ResetMinMaxFromNumBits(const QuantizedType type, const int num_bits, const bool narrow_range, const bool is_signed) { if (num_bits >= 8) { return type; } int64_t qmin = QType::getDefaultMinimumForInteger(is_signed, num_bits); int64_t qmax = QType::getDefaultMaximumForInteger(is_signed, num_bits); if (narrow_range) { qmin += 1; } const int64_t storage_type_min = type.getStorageTypeMin(); const int64_t storage_type_max = type.getStorageTypeMax(); const double rate = static_cast<double>(storage_type_max - storage_type_min) / (qmax - qmin); const auto& recalculate_scale = [&](double scale) -> double { return scale * rate; }; const auto& recalculate_zero_point = [&](int64_t zero_point) -> int64_t { return qmax - std::round((storage_type_max - zero_point) / rate); }; if (auto q_type = dyn_cast<UniformQuantizedType>(type)) { const double scale = recalculate_scale(q_type.getScale()); const double zero_point = recalculate_zero_point(q_type.getZeroPoint()); return UniformQuantizedType::get(q_type.getFlags(), q_type.getStorageType(), q_type.getExpressedType(), scale, zero_point, qmin, qmax); } else if (auto q_type = dyn_cast<quant::UniformQuantizedPerAxisType>(type)) { const int size = q_type.getScales().size(); SmallVector<double, 4> scales(size); SmallVector<int64_t, 4> zero_points(size); for (int i = 0; i < size; ++i) { scales[i] = recalculate_scale(q_type.getScales()[i]); zero_points[i] = recalculate_zero_point(q_type.getZeroPoints()[i]); } return quant::UniformQuantizedPerAxisType::get( q_type.getFlags(), q_type.getStorageType(), q_type.getExpressedType(), scales, zero_points, q_type.getQuantizedDimension(), qmin, qmax); } else { llvm_unreachable("Unsupported QuantizedType in ResetMinMaxFromNumBits"); } return type; } quant::UniformQuantizedPerAxisType ResetAxisAndBroadcast( const ArrayRef<int64_t> shape, const quant::UniformQuantizedPerAxisType qtype, const Type target, const int quant_dim) { const auto shaped = dyn_cast<RankedTensorType>(target); if (!shaped) return {}; const ArrayRef<int64_t> new_shape = shaped.getShape(); SmallVector<double, 4> scales(qtype.getScales().begin(), qtype.getScales().end()); SmallVector<int64_t, 4> zero_points(qtype.getZeroPoints().begin(), qtype.getZeroPoints().end()); if (new_shape.size() == shape.size()) { if (BroadcastVector<double>(shaped.getDimSize(quant_dim), scales) || BroadcastVector<int64_t>(shaped.getDimSize(quant_dim), zero_points)) { return {}; } } else if ((new_shape.size() == shape.size() + 1) && new_shape.front() == 1) { if (!(std::equal(shape.begin(), shape.end(), new_shape.begin() + 1) && quant_dim == new_shape.size() - 1)) { return {}; } } else { return {}; } return quant::UniformQuantizedPerAxisType::get( qtype.getFlags(), qtype.getStorageType(), qtype.getExpressedType(), scales, zero_points, quant_dim, qtype.getStorageTypeMin(), qtype.getStorageTypeMax()); } } bool IsOpQuantizable(Operation* op) { if (isa<func::ConstantOp, arith::ConstantOp, quantfork::StatisticsOp>(op)) { return true; } else if (op->hasTrait<OpTrait::IsTerminator>() || isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(op)) { return false; } const bool attr_enforced_quantizable = op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) && op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue().str() == QuantTraitValues[QuantizationTrait::FullyQuantizable]; const bool trait_enforced_quantizable = op->hasTrait<OpTrait::quant::QuantizableResult>(); return attr_enforced_quantizable || trait_enforced_quantizable; } Type GetQuantizedType(Builder builder, const Type input_type, const ArrayRef<double> min, const ArrayRef<double> max, const int quant_dim, const int storage_type_width, const bool narrow_range, const bool is_signed, const bool legacy_float_scale, const bool use_fake_quant_num_bits) { auto converter = quantfork::ExpressedToQuantizedConverter::forInputType(input_type); SmallVector<double, 4> effective_mins, effective_maxs; ExpandVerySmallRange(min, max, effective_mins, effective_maxs); quant::QuantizedType quantized_element_type; if (min.size() == 1 && max.size() == 1 && quant_dim == -1) { quantized_element_type = quantfork::fakeQuantAttrsToType( builder.getUnknownLoc(), storage_type_width, effective_mins[0], effective_maxs[0], narrow_range, converter.expressed_type, is_signed); if (legacy_float_scale) { quantized_element_type = DownCastScale(quantized_element_type, effective_mins[0], effective_maxs[0], builder.getUnknownLoc()); } } else if (min.size() == max.size()) { auto shape = dyn_cast<ShapedType>(input_type); if (!shape || shape.getRank() <= quant_dim || static_cast<int64_t>(min.size()) != shape.getDimSize(quant_dim)) { return {}; } quantized_element_type = quantfork::fakeQuantAttrsToType( builder.getUnknownLoc(), storage_type_width, quant_dim, effective_mins, effective_maxs, narrow_range, converter.expressed_type, is_signed); if (legacy_float_scale) { quantized_element_type = DownCastScale(quantized_element_type, effective_mins, effective_maxs, builder.getUnknownLoc()); } } if (!quantized_element_type) return {}; if (use_fake_quant_num_bits && storage_type_width > 1 && storage_type_width < 8 && quantized_element_type.getStorageTypeMax() > QType::getDefaultMinimumForInteger(is_signed, storage_type_width)) { const auto resetEleType = ResetMinMaxFromNumBits( quantized_element_type, storage_type_width, narrow_range, is_signed); return converter.convert(resetEleType); } return converter.convert(quantized_element_type); } TypeAttr RescaleQuantizedType(const Type input, const Attribute factor) { const auto factor_values = dyn_cast_or_null<DenseFPElementsAttr>(factor); if (!factor_values) return {}; const auto element_type = quant::QuantizedType::getQuantizedElementType(input); if (!element_type) return {}; if (auto qtype = dyn_cast<quant::UniformQuantizedPerAxisType>(element_type)) { const ArrayRef<double> scales = qtype.getScales(); if (static_cast<int64_t>(scales.size()) != factor_values.getNumElements()) return {}; SmallVector<double, 4> new_scales; new_scales.reserve(scales.size()); auto scales_iter = scales.begin(); for (const auto& f : factor_values) { new_scales.push_back(*scales_iter * std::fabs(FloatAttr::getValueAsDouble(f))); ++scales_iter; } auto new_ele_type = quant::UniformQuantizedPerAxisType::get( qtype.getFlags(), qtype.getStorageType(), qtype.getExpressedType(), new_scales, qtype.getZeroPoints(), qtype.getQuantizedDimension(), qtype.getStorageTypeMin(), qtype.getStorageTypeMax()); if (const auto new_type = new_ele_type.castFromExpressedType( quant::QuantizedType::castToExpressedType(input))) { return TypeAttr::get(new_type); } } return {}; } TypeAttr GetQuantizedTypeAttr(const Builder builder, const Type input_type, const Attribute min, const Attribute max, const int quant_dim, const IntegerAttr num_bits, const BoolAttr narrow_range, const bool is_signed, const bool legacy_float_scale, const bool use_fake_quant_num_bits) { SmallVector<double, 4> min_value, max_value; const auto mins = dyn_cast<DenseFPElementsAttr>(min); const auto maxs = dyn_cast<DenseFPElementsAttr>(max); if (mins && maxs) { min_value.reserve(mins.getNumElements()); max_value.reserve(maxs.getNumElements()); for (auto it = mins.begin(); it != mins.end(); ++it) { min_value.push_back(FloatAttr::getValueAsDouble(*it)); } for (auto it = maxs.begin(); it != maxs.end(); ++it) { max_value.push_back(FloatAttr::getValueAsDouble(*it)); } } else { const auto fmin = dyn_cast<FloatAttr>(min); const auto fmax = dyn_cast<FloatAttr>(max); if (fmin && fmax) { min_value.push_back(fmin.getValueAsDouble()); max_value.push_back(fmax.getValueAsDouble()); } else { return {}; } } const Type final_type = GetQuantizedType(builder, input_type, min_value, max_value, quant_dim, num_bits.getInt(), narrow_range.getValue(), is_signed, legacy_float_scale, use_fake_quant_num_bits); if (!final_type) return {}; return TypeAttr::get(final_type); } TypeAttr CastQuantizedTypeAttrFromExpressedType(const Builder builder, const TypeAttr source, const Type target, const int axis) { const auto source_type = dyn_cast_or_null<ShapedType>(source.getValue()); if (!source_type) return {}; const auto src_ele_type = source_type.getElementType(); auto qtype = dyn_cast<quant::QuantizedType>(src_ele_type); if (const auto per_axis = dyn_cast_or_null<quant::UniformQuantizedPerAxisType>(qtype)) { if (axis == -1) return {}; qtype = ResetAxisAndBroadcast(source_type.getShape(), per_axis, target, axis); } if (!qtype) return {}; const Type final_type = qtype.castFromExpressedType(target); if (!final_type) return {}; return TypeAttr::get(final_type); } void ExtractMinMaxFromAttr(const DenseFPElementsAttr values, const int dim_size, const int slice_size, bool symmetric, SmallVectorImpl<double>& mins, SmallVectorImpl<double>& maxs) { if (values.isSplat()) { const double single_value = FloatAttr::getValueAsDouble(values.getSplatValue<llvm::APFloat>()); if (single_value < 0.0) { mins[0] = single_value; maxs[0] = symmetric ? -single_value : 0.0; } else if (single_value > 0.0) { mins[0] = symmetric ? -single_value : 0.0; maxs[0] = single_value; } else { mins[0] = maxs[0] = single_value; } for (int i = 1; i < dim_size; ++i) { mins[i] = mins[0]; maxs[i] = maxs[0]; } } else { int64_t flatten_index = 0; auto begin = values.begin(); auto end = values.end(); for (auto it = begin; it != end; ++it, ++flatten_index) { const double ele_value = FloatAttr::getValueAsDouble(*it); const int slice_index = flatten_index / slice_size; const int channel_index = slice_index % dim_size; mins[channel_index] = std::min(mins[channel_index], ele_value); maxs[channel_index] = std::max(maxs[channel_index], ele_value); } for (int i = 0; i < dim_size; ++i) { maxs[i] = std::max(maxs[i], 0.0); mins[i] = std::min(mins[i], 0.0); } if (symmetric) { for (int i = 0; i < dim_size; ++i) { maxs[i] = std::max(std::abs(mins[i]), std::abs(maxs[i])); mins[i] = -maxs[i]; } } } } Type GetUniformQuantizedTypeForWeight( const ElementsAttr attr, const bool symmetric, const unsigned num_bits, const bool is_signed, const bool narrow_range, const bool legacy_float_scale, const bool use_fake_quant_num_bits) { const Builder builder(attr.getContext()); if (symmetric && (!is_signed || !narrow_range)) return {}; SmallVector<double, 4> mins(1, std::numeric_limits<double>::max()); SmallVector<double, 4> maxs(1, std::numeric_limits<double>::min()); const auto fp = dyn_cast<DenseFPElementsAttr>(attr); if (!fp) return {}; ExtractMinMaxFromAttr(fp, 1, 1, symmetric, mins, maxs); const auto type = GetQuantizedType(builder, attr.getType(), mins[0], maxs[0], -1, num_bits, narrow_range, is_signed, legacy_float_scale, use_fake_quant_num_bits); if (const auto ele_type = dyn_cast_or_null<TensorType>(type)) return ele_type.getElementType(); return {}; } Type GetUniformQuantizedPerAxisTypeForWeight( const ElementsAttr attr, const int quant_dim, const bool symmetric, const unsigned num_bits, const bool is_signed, const bool narrow_range, const bool legacy_float_scale, const bool use_fake_quant_num_bits) { const Builder builder(attr.getContext()); const auto shape = cast<ShapedType>(attr.getType()).getShape(); if (static_cast<int>(shape.size()) <= quant_dim) return {}; if (symmetric && (!is_signed || !narrow_range)) return {}; const int dim_size = shape[quant_dim]; const int slice_size = std::accumulate(std::next(shape.begin(), quant_dim + 1), shape.end(), 1, std::multiplies<int64_t>()); SmallVector<double, 4> mins(dim_size, std::numeric_limits<double>::max()); SmallVector<double, 4> maxs(dim_size, std::numeric_limits<double>::min()); const auto fp = dyn_cast<DenseFPElementsAttr>(attr); if (!fp) return {}; ExtractMinMaxFromAttr(fp, dim_size, slice_size, symmetric, mins, maxs); const auto type = GetQuantizedType( builder, attr.getType(), mins, maxs, quant_dim, num_bits, narrow_range, is_signed, legacy_float_scale, use_fake_quant_num_bits); if (auto ele_type = dyn_cast_or_null<TensorType>(type)) return ele_type.getElementType(); return {}; } quant::QuantizedType GetUniformQuantizedTypeForBias( const std::vector<quant::QuantizedType>& op_types, const int adjusted_quant_dim, const bool legacy_float_scale) { if (op_types.empty()) return {}; size_t axis_size = 1; int32_t quant_dim = -1; Type expressed_type; for (const auto op_type : op_types) { if (!op_type) return {}; if (expressed_type && expressed_type != op_type.getExpressedType()) { return {}; } expressed_type = op_type.getExpressedType(); if (const auto type = dyn_cast<quant::UniformQuantizedPerAxisType>(op_type)) { if (axis_size != 1 && axis_size != type.getScales().size()) return {}; if (quant_dim != -1 && quant_dim != type.getQuantizedDimension()) return {}; axis_size = type.getScales().size(); quant_dim = type.getQuantizedDimension(); } else if (!isa<quant::UniformQuantizedType>(op_type)) { return {}; } } SmallVector<double, 4> scales(axis_size, 1.0); for (const auto op_type : op_types) { if (const auto type = dyn_cast<quant::UniformQuantizedPerAxisType>(op_type)) { for (const auto& index_scale : llvm::enumerate(type.getScales())) { scales[index_scale.index()] *= index_scale.value(); } } else if (const auto type = dyn_cast<quant::UniformQuantizedType>(op_type)) { for (int index = 0; index < axis_size; ++index) { scales[index] *= type.getScale(); } } } if (legacy_float_scale) { for (int i = 0; i < scales.size(); ++i) { scales[i] = static_cast<float>(scales[i]); } } Builder builder(expressed_type.getContext()); const IntegerType storage_type = builder.getIntegerType(32); const int64_t storage_type_min = quant::QuantizedType::getDefaultMinimumForInteger(true, 32); const int64_t storage_type_max = quant::QuantizedType::getDefaultMaximumForInteger(true, 32); if (axis_size == 1) { return quant::UniformQuantizedType::getChecked( builder.getUnknownLoc(), true, storage_type, expressed_type, scales[0], 0, storage_type_min, storage_type_max); } else { SmallVector<int64_t, 4> zero_points(axis_size, 0); return quant::UniformQuantizedPerAxisType::getChecked( builder.getUnknownLoc(), true, storage_type, expressed_type, scales, zero_points, std::max(adjusted_quant_dim, 0), storage_type_min, storage_type_max); } } ElementsAttr QuantizeLegacy(const Attribute real_value, const Type tensor_type) { if (!isa<DenseFPElementsAttr>(real_value) || !quant::QuantizedType::getQuantizedElementType(tensor_type)) { return {}; } const auto real_values_attr = cast<DenseFPElementsAttr>(real_value); auto q_type = quant::QuantizedType::getQuantizedElementType(tensor_type); std::vector<float> real_values; SmallVector<APInt, 8> quantized_attr; real_values.reserve(real_values_attr.getNumElements()); quantized_attr.reserve(real_values_attr.getNumElements()); std::transform(real_values_attr.begin(), real_values_attr.end(), std::back_inserter(real_values), [&](APFloat value) -> float { return value.convertToFloat(); }); const ShapedType new_dense_type = dyn_cast_or_null<ShapedType>( q_type.castExpressedToStorageType(real_values_attr.getType())); const int width = dyn_cast<IntegerType>(q_type.getStorageType()).getWidth(); if (width == 8 && q_type.getStorageTypeMax() == 127 && q_type.getStorageTypeMin() == -127) { std::vector<int8_t> quantized_values(real_values_attr.getNumElements()); if (auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) { float min, max, scale; mlir::lite::toco_legacy::PortableSymmetricQuantizeFloats( real_values.data(), real_values.size(), quantized_values.data(), &min, &max, &scale); if (std::abs(scale - uniform_type.getScale()) > 1e-3) { return Quantize(real_value, tensor_type); } } else if (auto uniform_type = dyn_cast<quant::UniformQuantizedPerAxisType>(q_type)) { std::vector<float> scales_inv; std::vector<int32_t> dimension; dimension.insert(dimension.end(), new_dense_type.getShape().begin(), new_dense_type.getShape().end()); std::transform(uniform_type.getScales().begin(), uniform_type.getScales().end(), std::back_inserter(scales_inv), [](float scale) { return 1.0 / scale; }); tflite_migration::optimize::utils::SymmetricPerChannelQuantizeValues( real_values.data(), scales_inv, dimension, uniform_type.getQuantizedDimension(), &quantized_values); } else { return {}; } std::transform(quantized_values.begin(), quantized_values.end(), std::back_inserter(quantized_attr), [&](int8_t value) -> APInt { return APInt(8, value, true); }); return DenseElementsAttr::get(new_dense_type, quantized_attr); } else if (width == 8) { return Quantize(real_value, tensor_type); } else if (width == 16) { if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) { const auto quantized_values = tflite_migration::optimize::utils::SymmetricQuantizeFloatsToInt16( real_values.data(), real_values.size(), uniform_type.getScale()); std::transform(quantized_values.begin(), quantized_values.end(), std::back_inserter(quantized_attr), [&](int16_t value) -> APInt { return APInt(16, value, true); }); return DenseElementsAttr::get(new_dense_type, quantized_attr); } } else if (width == 32) { std::vector<float> scales; if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) { scales.push_back(uniform_type.getScale()); } else if (const auto uniform_type = dyn_cast<quant::UniformQuantizedPerAxisType>(q_type)) { scales.insert(scales.end(), uniform_type.getScales().begin(), uniform_type.getScales().end()); } else { return {}; } const auto quantized_bias = tflite_migration::optimize::utils::SymmetricBiasQuantize<std::int32_t>( real_values.data(), real_values.size(), scales); std::transform(quantized_bias.begin(), quantized_bias.end(), std::back_inserter(quantized_attr), [&](int32_t value) -> APInt { return APInt(32, value, true); }); return DenseElementsAttr::get(new_dense_type, quantized_attr); } return {}; } ElementsAttr Quantize(const Attribute real_value, const Type tensor_type) { if (const auto q_type = quant::QuantizedType::getQuantizedElementType(tensor_type)) { Type converted_type; return dyn_cast_or_null<ElementsAttr>( quantfork::quantizeAttr(real_value, q_type, converted_type)); } return {}; } quant::QuantizedType DownCastScale(QuantizedType type, double min, double max, Location loc) { const SmallVector<double, 1> mins = {min}; const SmallVector<double, 1> maxs = {max}; return DownCastScale(type, mins, maxs, loc); } quant::QuantizedType DownCastScale(QuantizedType type, const SmallVectorImpl<double>& mins, const SmallVectorImpl<double>& maxs, Location loc) { if (!type) return type; SmallVector<double, 4> scales(mins.size()); SmallVector<int64_t, 4> zero_points(mins.size()); if (auto q_type = dyn_cast<UniformQuantizedType>(type)) { zero_points.push_back(q_type.getZeroPoint()); } else if (auto q_type = dyn_cast<quant::UniformQuantizedPerAxisType>(type)) { zero_points = {q_type.getZeroPoints().begin(), q_type.getZeroPoints().end()}; } for (int i = 0; i < mins.size(); ++i) { scales[i] = (static_cast<float>(maxs[i]) - static_cast<float>(mins[i])) / (type.getStorageTypeMax() - type.getStorageTypeMin()); if (type.getStorageTypeMax() != -type.getStorageTypeMin()) { const float zero_point_from_min = type.getStorageTypeMin() - mins[i] / scales[i]; if (zero_point_from_min < type.getStorageTypeMin()) { zero_points[i] = static_cast<int64_t>(type.getStorageTypeMin()); } else if (zero_point_from_min > type.getStorageTypeMax()) { zero_points[i] = static_cast<int64_t>(type.getStorageTypeMax()); } else { zero_points[i] = static_cast<int64_t>(std::round(zero_point_from_min)); } } } if (auto q_type = dyn_cast<UniformQuantizedType>(type)) { return UniformQuantizedType::get(q_type.getFlags(), q_type.getStorageType(), q_type.getExpressedType(), scales[0], zero_points[0], q_type.getStorageTypeMin(), q_type.getStorageTypeMax()); } else if (auto q_type = dyn_cast<quant::UniformQuantizedPerAxisType>(type)) { return quant::UniformQuantizedPerAxisType::get( q_type.getFlags(), q_type.getStorageType(), q_type.getExpressedType(), scales, zero_points, q_type.getQuantizedDimension(), q_type.getStorageTypeMin(), q_type.getStorageTypeMax()); } return type; } static bool PreferResultScale(Operation* op) { int float_operands = 0; for (auto operand : op->getOperands()) { if (auto operand_type = dyn_cast<ShapedType>(operand.getType())) { if (isa<FloatType>(operand_type.getElementType())) { if (++float_operands > 1) return true; } } } return false; } std::unique_ptr<OpQuantScaleSpec> GetDefaultQuantScaleSpec(Operation* op) { auto spec = std::make_unique<OpQuantScaleSpec>(); if (isa<SameScalesOpInterface>(op)) { spec->has_same_scale_requirement = true; spec->required_same_scale_func = [op](const bool sign, const int bit_width) { return cast<SameScalesOpInterface>(op) .RequiredSameOperandsAndResultsScale(sign, bit_width); }; spec->required_same_quantized_axes_func = [op]() { return cast<SameScalesOpInterface>(op).RequiredSameQuantizedAxes(); }; } if (isa<FixedOutputRangeInterface>(op)) { spec->has_fixed_output_range = true; spec->fixed_output_range_func = [op](bool sign, int bit_width) { return cast<FixedOutputRangeInterface>(op).GetFixedOutputRange(sign, bit_width); }; } return spec; } static bool IsStatsRedundant( Operation* op, const OpQuantSpecGetter op_quant_spec_getter, const OpQuantScaleSpecGetter op_quant_scale_spec_getter) { return isa<FixedOutputRangeInterface>(op) || op_quant_scale_spec_getter(op)->has_fixed_output_range; } static bool IsSameScaleOp( Operation* op, const OpQuantScaleSpecGetter op_quant_scale_spec_getter) { return dyn_cast<SameScalesOpInterface>(op) || op_quant_scale_spec_getter(op)->has_same_scale_requirement; } bool RemoveRedundantStatsOps( func::FuncOp func, const OpQuantSpecGetter op_quant_spec_getter, const OpQuantScaleSpecGetter op_quant_scale_spec_getter) { SmallVector<quantfork::StatisticsOp, 16> all_stats_ops; llvm::DenseSet<Operation*> redundant_stats_ops; func.walk([&](quantfork::QuantizeCastOp q) { auto input_op = q.getArg().getDefiningOp(); if (auto stats = dyn_cast_or_null<quantfork::StatisticsOp>(input_op)) { q.setOperand(stats.getArg()); if (stats.use_empty()) stats.erase(); } }); func.walk([&](quantfork::StatisticsOp stats_op) { all_stats_ops.push_back(stats_op); }); while (!all_stats_ops.empty()) { quantfork::StatisticsOp stats_op = all_stats_ops.back(); all_stats_ops.pop_back(); if (auto def = stats_op.getArg().getDefiningOp()) { if (IsStatsRedundant(def, op_quant_spec_getter, op_quant_scale_spec_getter)) { redundant_stats_ops.insert(stats_op); } } for (Operation* user : stats_op.getResult().getUsers()) { if (!IsSameScaleOp(user, op_quant_scale_spec_getter) || PreferResultScale(user)) { continue; } for (Value res : user->getResults()) { if (!res.hasOneUse()) { continue; } if (auto next_stats = dyn_cast<quantfork::StatisticsOp>(*res.getUsers().begin())) { redundant_stats_ops.insert(next_stats); all_stats_ops.push_back(next_stats); } } } } func.walk([&](quantfork::StatisticsOp stats_op) { if (redundant_stats_ops.find(stats_op) == redundant_stats_ops.end()) { all_stats_ops.push_back(stats_op); } }); while (!all_stats_ops.empty()) { quantfork::StatisticsOp stats_op = all_stats_ops.back(); all_stats_ops.pop_back(); if (Operation* def = stats_op.getArg().getDefiningOp()) { if (!IsSameScaleOp(def, op_quant_scale_spec_getter)) { continue; } for (Value input : def->getOperands()) { if (auto next_stats = dyn_cast_or_null<quantfork::StatisticsOp>( input.getDefiningOp())) { redundant_stats_ops.insert(next_stats); all_stats_ops.push_back(next_stats); } } } } for (Operation* it : redundant_stats_ops) { if (!isa<quantfork::StatisticsOp>(it)) return true; auto stats_op = cast<quantfork::StatisticsOp>(it); stats_op.getResult().replaceAllUsesWith(stats_op.getArg()); stats_op.erase(); } return false; } LogicalResult VerifySameScales(Operation* op) { auto same_scale_op = cast<SameScalesOpInterface>(op); SmallVector<QuantizedType, 4> collected_quant_params; for (Value input : op->getOperands()) { QuantizedType quant_params = QuantizedType::getQuantizedElementType(input.getType()); if (quant_params) { collected_quant_params.push_back(quant_params); } } for (Value output : op->getResults()) { const QuantizedType quant_params = QuantizedType::getQuantizedElementType(output.getType()); if (quant_params) { collected_quant_params.push_back(quant_params); } } if (collected_quant_params.size() <= 1) return success(); const auto& expected_params = collected_quant_params[0]; for (int i = 1; i < collected_quant_params.size(); ++i) { const auto& compared_params = collected_quant_params[i]; if (!same_scale_op.RequiredSameQuantizedAxes()) { const auto expected_per_axis_qtype = dyn_cast<quant::UniformQuantizedPerAxisType>(expected_params); const auto compared_per_axis_qtype = dyn_cast<quant::UniformQuantizedPerAxisType>(compared_params); if (expected_per_axis_qtype && compared_per_axis_qtype && llvm::equal(expected_per_axis_qtype.getScales(), compared_per_axis_qtype.getScales()) && llvm::equal(expected_per_axis_qtype.getZeroPoints(), compared_per_axis_qtype.getZeroPoints()) && expected_params.getStorageType() == compared_params.getStorageType() && expected_params.getExpressedType() == compared_params.getExpressedType()) { continue; } } if (expected_params == compared_params) continue; if (expected_params.isSigned() == compared_params.isSigned() && expected_params.getStorageTypeIntegralWidth() == compared_params.getStorageTypeIntegralWidth() && !same_scale_op.RequiredSameOperandsAndResultsScale( expected_params.isSigned(), expected_params.getStorageTypeIntegralWidth())) continue; std::string err_msg = "quantization parameters violate the same scale constraint: "; llvm::raw_string_ostream os(err_msg); expected_params.print(os); os << " vs. "; compared_params.print(os); os.flush(); return op->emitOpError(err_msg); } return success(); } quant::UniformQuantizedType GetFixedOutputRange( const bool is_signed, const int bit_width, const Type tensor_type, const double scale, int64_t zero_point, int64_t storage_min, int64_t storage_max) { const auto result_type = cast<ShapedType>(tensor_type); if (!isa<FloatType>(result_type.getElementType())) return {}; Builder builder(result_type.getContext()); if (bit_width != 8 && bit_width != 16) return {}; const IntegerType storage_type = builder.getIntegerType(bit_width); if (!is_signed && bit_width == 8) { zero_point += 128; storage_min += 128; storage_max += 128; } return quant::UniformQuantizedType::getChecked( builder.getUnknownLoc(), is_signed, storage_type, result_type.getElementType(), scale, zero_point, storage_min, storage_max); } quant::UniformQuantizedType GetFixedOutputRange(const bool is_signed, const int bit_width, const Type tensor_type, const double scale, const int64_t zero_point) { return GetFixedOutputRange(is_signed, bit_width, tensor_type, scale, zero_point, -(1 << (bit_width - 1)), (1 << (bit_width - 1)) - 1); } Type ConvertSignedQuantizedToUnsigned(const Type signed_tensor_type, const Location loc) { const auto qtype = QType::getQuantizedElementType(signed_tensor_type); if (!qtype || !qtype.isSigned()) return {}; const int num_bits = qtype.getStorageTypeIntegralWidth(); const int64_t offset = QType::getDefaultMinimumForInteger(true, num_bits) - QType::getDefaultMinimumForInteger(false, num_bits); const auto flags = !quant::QuantizationFlags::Signed; QType new_qtype; if (auto uqtype = dyn_cast<quant::UniformQuantizedType>(qtype)) { new_qtype = quant::UniformQuantizedType::getChecked( loc, flags, qtype.getStorageType(), qtype.getExpressedType(), uqtype.getScale(), uqtype.getZeroPoint() - offset, uqtype.getStorageTypeMin() - offset, uqtype.getStorageTypeMax() - offset); } else if (auto aqtype = dyn_cast<quant::UniformQuantizedPerAxisType>(qtype)) { const auto zero_points = aqtype.getZeroPoints(); SmallVector<int64_t, 4> new_zero_points(zero_points.begin(), zero_points.end()); for (int i = 0; i < new_zero_points.size(); ++i) { new_zero_points[i] -= offset; } new_qtype = quant::UniformQuantizedPerAxisType::getChecked( loc, flags, qtype.getStorageType(), qtype.getExpressedType(), aqtype.getScales(), new_zero_points, aqtype.getQuantizedDimension(), aqtype.getStorageTypeMin() - offset, aqtype.getStorageTypeMax() - offset); } return new_qtype.castFromExpressedType( QType::castToExpressedType(signed_tensor_type)); } LogicalResult RemoveDebugAttrPattern::matchAndRewrite( Operation* op, PatternRewriter& rewriter) const { return success( op->removeAttr(kDebugModeOpQuantAttrName) || op->removeAttr(kDebugModeOpFloatAttrName)); } } }
#include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantization_utils.h" #include <algorithm> #include <cstdint> #include <cstdlib> #include <iostream> #include <memory> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorflow/compiler/mlir/lite/core/absl_error_model_builder.h" #include "tensorflow/compiler/mlir/lite/quantization/lite/test_util.h" #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h" #include "tensorflow/compiler/mlir/lite/schema/schema_utils.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/tsl/util/command_line_flags.h" #include "tsl/platform/init_main.h" #include "tsl/platform/path.h" namespace { std::string* g_test_model_dir = nullptr; } namespace mlir { namespace lite { namespace toco_legacy { namespace { using mlir::TFL::FlatBufferModelAbslError; using tflite::BuiltinOperator_CONV_2D; using tflite::QuantizationParametersT; using tflite::SubGraphT; using tflite::TensorT; using tflite::TensorType_FLOAT16; using tflite::TensorType_FLOAT32; using tflite::TensorType_INT8; std::unique_ptr<FlatBufferModelAbslError> ReadModel(const char* model) { auto model_path = tsl::io::JoinPath(*g_test_model_dir, model); return FlatBufferModelAbslError::BuildFromFile(model_path.c_str()); } std::unique_ptr<FlatBufferModelAbslError> ReadConvModel() { return ReadModel(mlir::lite::internal::kConvModelWith0Plus10Weights); } using ::testing::ElementsAreArray; class QuantizationUtilsTest : public testing::Test {}; TEST_F(QuantizationUtilsTest, NumElements) { TensorT tensor; tensor.shape = {1, 2, 3, 4}; uint64_t num_elements; TF_EXPECT_OK(NumElements(tensor, &num_elements)); EXPECT_EQ(num_elements, 1 * 2 * 3 * 4); tensor.shape = {5}; TF_EXPECT_OK(NumElements(tensor, &num_elements)); EXPECT_EQ(num_elements, 5); tensor.shape = {}; TF_EXPECT_OK(NumElements(tensor, &num_elements)); EXPECT_EQ(num_elements, 1); tensor.shape = {1, 2, 3, -1}; EXPECT_EQ(NumElements(tensor, &num_elements).code(), absl::StatusCode::kInternal); } TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantizationWithNullQParams) { const std::vector<float> input = { 3.0, 2.0, 5.0, -2.0, 3.0, 2.0, 5.0, -2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, }; const int channel_index = 0; std::vector<float> output_scales(3); std::vector<int8_t> output_data(3 * 2 * 2 * 2); TensorT tensor = TensorT(); tensor.quantization = nullptr; tensor.shape = {3, 2, 2, 2}; TF_EXPECT_OK(mlir::lite::toco_legacy::SymmetricPerChannelQuantization( &tensor, input.data(), channel_index, &output_scales, &output_data)); const std::vector<float> expected_output_scales = {0.0393700786, 0.0629921257, 0.0472440943}; const std::vector<int8_t> expected_output_data = { 76, 51, 127, -51, 76, 51, 127, -51, 16, 32, 48, 64, 79, 95, 111, 127, 21, 0, -21, -42, -64, -85, -106, -127, }; EXPECT_THAT(output_scales, ElementsAreArray(expected_output_scales)); EXPECT_THAT(output_data, ElementsAreArray(expected_output_data)); } TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantization) { const std::vector<float> input = { 3.0, 2.0, 5.0, -2.0, 3.0, 2.0, 5.0, -2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, }; const int32_t channel_index = 0; std::vector<float> output_scales(3); std::vector<int8_t> output_data(3 * 2 * 2 * 2); TensorT tensor = TensorT(); tensor.quantization = std::make_unique<QuantizationParametersT>(); tensor.shape = {3, 2, 2, 2}; TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax( input.data(), tensor.shape, channel_index, tensor.quantization.get())); const std::vector<float> expected_mins = {-2.0, 1.0, -6.0}; const std::vector<float> expected_maxs = {5.0, 8.0, 1.0}; EXPECT_THAT(tensor.quantization->min, ElementsAreArray(expected_mins)); EXPECT_THAT(tensor.quantization->max, ElementsAreArray(expected_maxs)); TF_EXPECT_OK(mlir::lite::toco_legacy::SymmetricPerChannelQuantization( &tensor, input.data(), channel_index, &output_scales, &output_data)); const std::vector<float> expected_output_scales = {0.0393700786, 0.0629921257, 0.0472440943}; const std::vector<int8_t> expected_output_data = { 76, 51, 127, -51, 76, 51, 127, -51, 16, 32, 48, 64, 79, 95, 111, 127, 21, 0, -21, -42, -64, -85, -106, -127, }; EXPECT_THAT(output_scales, ElementsAreArray(expected_output_scales)); EXPECT_THAT(output_data, ElementsAreArray(expected_output_data)); } TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantization2DTensor) { const std::vector<float> input = { 3.0, 2.0, 5.0, -2.0, 3.0, 2.0, 5.0, -2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, }; const int32_t channel_index = 1; std::vector<float> output_scales(8); std::vector<int8_t> output_data(3 * 8); TensorT tensor = TensorT(); tensor.quantization = std::make_unique<QuantizationParametersT>(); tensor.shape = {3, 8}; TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax( input.data(), tensor.shape, channel_index, tensor.quantization.get())); const std::vector<float> expected_mins = {1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0}; const std::vector<float> expected_maxs = {3.0, 2.0, 5.0, 4.0, 5.0, 6.0, 7.0, 8.0}; EXPECT_THAT(tensor.quantization->min, ElementsAreArray(expected_mins)); EXPECT_THAT(tensor.quantization->max, ElementsAreArray(expected_maxs)); TF_EXPECT_OK(mlir::lite::toco_legacy::SymmetricPerChannelQuantization( &tensor, input.data(), channel_index, &output_scales, &output_data)); const std::vector<float> expected_output_scales = { 0.02362204724, 0.01574803149, 0.03937007874, 0.03149606299, 0.03937007874, 0.04724409448, 0.05511811023, 0.06299212598}; const std::vector<int8_t> expected_output_data = { 127, 127, 127, -64, 76, 42, 91, -32, 42, 127, 76, 127, 127, 127, 127, 127, 42, 0, -25, -64, -76, -85, -91, -95, }; EXPECT_THAT(output_scales, ElementsAreArray(expected_output_scales)); EXPECT_THAT(output_data, ElementsAreArray(expected_output_data)); } TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantizeValues) { const std::vector<float> input = { 13.0, 21.0, 21.0, 22.0, 31.0, 40.0, }; const std::vector<float> scales_inv = {2, 0.5, 3}; const std::vector<int32_t> dimension = {3, 1, 1, 2}; const int channel_index = 0; std::vector<int8_t> output_data(3 * 1 * 1 * 2); SymmetricPerChannelQuantizeValues(input.data(), scales_inv, dimension, channel_index, &output_data); const std::vector<int8_t> expected_output_data = { 26, 42, 11, 11, 93, 120, }; EXPECT_THAT(output_data, ElementsAreArray(expected_output_data)); } TEST_F(QuantizationUtilsTest, FillPerChannelMinMax) { const std::vector<float> input = { 13.0, 21.0, 21.0, 22.0, 31.0, 40.0, }; QuantizationParametersT quantization_params = QuantizationParametersT(); std::vector<int> dimension = {3, 1, 1, 2}; int32_t channel_dim_idx = 0; const std::vector<float> expected_mins = {13.0, 21.0, 31.0}; const std::vector<float> expected_maxs = {21.0, 22.0, 40.0}; TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax( input.data(), dimension, channel_dim_idx, &quantization_params)); EXPECT_EQ(quantization_params.min, expected_mins); EXPECT_EQ(quantization_params.max, expected_maxs); EXPECT_EQ(quantization_params.quantized_dimension, channel_dim_idx); } TEST_F(QuantizationUtilsTest, FillPerChannelMinMaxFillDim3) { const std::vector<float> input = { 13.0, 21.0, 21.0, 22.0, 31.0, 40.0, }; QuantizationParametersT quantization_params = QuantizationParametersT(); std::vector<int> dimension = {3, 1, 1, 2}; int32_t channel_dim_idx = 3; const std::vector<float> expected_mins = {13.0, 21.0}; const std::vector<float> expected_maxs = {31.0, 40.0}; TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax( input.data(), dimension, channel_dim_idx, &quantization_params)); EXPECT_EQ(quantization_params.min, expected_mins); EXPECT_EQ(quantization_params.max, expected_maxs); EXPECT_EQ(quantization_params.quantized_dimension, channel_dim_idx); } TEST_F(QuantizationUtilsTest, FillPerChannelMinMax2DTensor) { const std::vector<float> input = { 13.0, 21.0, 21.0, 22.0, 31.0, 40.0, }; QuantizationParametersT quantization_params = QuantizationParametersT(); std::vector<int> dimension = {3, 2}; int32_t channel_dim_idx = 1; const std::vector<float> expected_mins = {13.0, 21.0}; const std::vector<float> expected_maxs = {31.0, 40.0}; TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax( input.data(), dimension, channel_dim_idx, &quantization_params)); EXPECT_EQ(quantization_params.min, expected_mins); EXPECT_EQ(quantization_params.max, expected_maxs); EXPECT_EQ(quantization_params.quantized_dimension, channel_dim_idx); } TEST_F(QuantizationUtilsTest, SymmetricQuantizeTensorNullInputs) { EXPECT_EQ(SymmetricQuantizeTensor(nullptr, nullptr).code(), absl::StatusCode::kInvalidArgument); } TEST_F(QuantizationUtilsTest, SymmetricQuantizeTensorNullQuantParams) { ASSERT_TRUE(g_test_model_dir); ASSERT_FALSE(g_test_model_dir->empty()); auto test_model = ReadConvModel(); ASSERT_TRUE(test_model); auto readonly_model = test_model->GetModel(); ASSERT_TRUE(readonly_model); ASSERT_TRUE(readonly_model->subgraphs()); ASSERT_GE(readonly_model->subgraphs()->size(), 1); tflite::ModelT model; readonly_model->UnPackTo(&model); auto subgraph = model.subgraphs[0].get(); auto conv_op = subgraph->operators.at(0).get(); ASSERT_EQ( GetBuiltinCode(model.operator_codes.at(conv_op->opcode_index).get()), BuiltinOperator_CONV_2D); int32_t weights_tensor_idx = conv_op->inputs[1]; TensorT* weights_tensor = subgraph->tensors.at(weights_tensor_idx).get(); weights_tensor->quantization = std::make_unique<QuantizationParametersT>(); EXPECT_EQ(weights_tensor->type, TensorType_FLOAT32); size_t float_buffer_size = model.buffers.at(weights_tensor->buffer)->data.size(); TF_EXPECT_OK(SymmetricQuantizeTensor(&model, weights_tensor)); size_t quant_buffer_size = model.buffers.at(weights_tensor->buffer)->data.size(); EXPECT_EQ(weights_tensor->type, TensorType_INT8); EXPECT_EQ(quant_buffer_size * 4, float_buffer_size); } TEST_F(QuantizationUtilsTest, SymmetricQuantizeTensor) { ASSERT_TRUE(g_test_model_dir); ASSERT_FALSE(g_test_model_dir->empty()); auto test_model = ReadConvModel(); ASSERT_TRUE(test_model); auto readonly_model = test_model->GetModel(); ASSERT_TRUE(readonly_model); ASSERT_TRUE(readonly_model->subgraphs()); ASSERT_GE(readonly_model->subgraphs()->size(), 1); tflite::ModelT model; readonly_model->UnPackTo(&model); auto subgraph = model.subgraphs[0].get(); auto conv_op = subgraph->operators.at(0).get(); ASSERT_EQ( GetBuiltinCode(model.operator_codes.at(conv_op->opcode_index).get()), BuiltinOperator_CONV_2D); int32_t weights_tensor_idx = conv_op->inputs[1]; TensorT* weights_tensor = subgraph->tensors.at(weights_tensor_idx).get(); EXPECT_EQ(weights_tensor->type, TensorType_FLOAT32); size_t float_buffer_size = model.buffers.at(weights_tensor->buffer)->data.size(); TF_EXPECT_OK(SymmetricQuantizeTensor(&model, weights_tensor)); size_t quant_buffer_size = model.buffers.at(weights_tensor->buffer)->data.size(); EXPECT_EQ(weights_tensor->type, TensorType_INT8); EXPECT_EQ(quant_buffer_size * 4, float_buffer_size); } TEST_F(QuantizationUtilsTest, QuantizeFloat16Clamp) { auto model = std::make_unique<ModelT>(); auto subgraph = std::make_unique<tflite::SubGraphT>(); auto tensor = std::make_unique<TensorT>(); auto buffer = std::make_unique<tflite::BufferT>(); constexpr int kNumElements = 6; const std::vector<float> weights = {2.0, 1.0, 65504., 65505, -65504., -99999}; auto weights_reinterpreted_data = reinterpret_cast<const unsigned char*>(weights.data()); buffer->data.assign(weights_reinterpreted_data, weights_reinterpreted_data + weights.size() * 4); tensor->buffer = 0; tensor->shape = {1, kNumElements}; model->subgraphs.push_back(std::move(subgraph)); model->subgraphs[0]->tensors.push_back(std::move(tensor)); model->buffers.push_back(std::move(buffer)); TF_EXPECT_OK(QuantizeTensorFloat16(model.get(), model->subgraphs[0]->tensors[0].get())); auto weightsf16 = reinterpret_cast<Eigen::half*>( model->buffers[model->subgraphs[0]->tensors[0]->buffer]->data.data()); std::vector<float> wf32(kNumElements); std::transform(weightsf16, weightsf16 + 6, wf32.begin(), [](Eigen::half a) { return static_cast<float>(a); }); EXPECT_THAT(wf32, ElementsAreArray({2.0, 1.0, 65504., 65504., -65504., -65504.})); EXPECT_EQ(model->subgraphs[0]->tensors[0]->type, TensorType_FLOAT16); } TEST_F(QuantizationUtilsTest, QuantizeFloat16) { ASSERT_TRUE(g_test_model_dir != nullptr); ASSERT_FALSE(g_test_model_dir->empty()); auto test_model = ReadConvModel(); ASSERT_TRUE(test_model); auto readonly_model = test_model->GetModel(); ASSERT_TRUE(readonly_model); ASSERT_TRUE(readonly_model->subgraphs()); ASSERT_GE(readonly_model->subgraphs()->size(), 1); tflite::ModelT model; readonly_model->UnPackTo(&model); auto subgraph = model.subgraphs[0].get(); auto conv_op = subgraph->operators.at(0).get(); ASSERT_EQ( GetBuiltinCode(model.operator_codes.at(conv_op->opcode_index).get()), BuiltinOperator_CONV_2D); int32_t weights_tensor_idx = conv_op->inputs[1]; TensorT* weights_tensor = subgraph->tensors.at(weights_tensor_idx).get(); EXPECT_EQ(weights_tensor->type, TensorType_FLOAT32); size_t float_buffer_size = model.buffers.at(weights_tensor->buffer)->data.size(); TF_EXPECT_OK(QuantizeTensorFloat16(&model, weights_tensor)); size_t quant_buffer_size = model.buffers.at(weights_tensor->buffer)->data.size(); EXPECT_EQ(weights_tensor->type, TensorType_FLOAT16); EXPECT_EQ(quant_buffer_size * 2, float_buffer_size); } TEST_F(QuantizationUtilsTest, AddQuantizationParams) { auto model = std::make_unique<ModelT>(); auto subgraph = std::make_unique<tflite::SubGraphT>(); auto tensor = std::make_unique<TensorT>(); auto buffer = std::make_unique<tflite::BufferT>(); const std::vector<float> scales = {0.5, 1.0, 1.5}; const std::vector<int64_t> zero_points = {5, 10, 15}; const int32_t quantizated_dimension = 3; const std::vector<uint8_t> buffer_data = {1, 2, 3, 4}; const int32_t buffer_size = 4; tensor->buffer = 0; model->subgraphs.push_back(std::move(subgraph)); model->subgraphs[0]->tensors.push_back(std::move(tensor)); model->buffers.push_back(std::move(buffer)); TF_EXPECT_OK(AddQuantizationParams(scales, zero_points, quantizated_dimension, buffer_data.data(), buffer_size, TensorType_INT8, model.get(), model->subgraphs[0]->tensors[0].get())); EXPECT_THAT(model->subgraphs[0]->tensors[0]->quantization->scale, ElementsAreArray(scales)); EXPECT_THAT(model->subgraphs[0]->tensors[0]->quantization->zero_point, ElementsAreArray(zero_points)); EXPECT_THAT(model->buffers[model->subgraphs[0]->tensors[0]->buffer]->data, ElementsAreArray(buffer_data)); EXPECT_EQ(model->subgraphs[0]->tensors[0]->type, TensorType_INT8); } } } } } int main(int argc, char** argv) { std::string model_file; const std::vector<tsl::Flag> flag_list = { tsl::Flag("test_model_file", &model_file, "Path to test tflite model file."), }; const bool parse_result = tsl::Flags::Parse(&argc, argv, flag_list); if (!parse_result) { std::cerr << "Required test_model_file\n"; std::abort(); } g_test_model_dir = new std::string(tsl::io::Dirname(model_file)); ::tsl::port::InitMain(argv[0], &argc, &argv); return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantization_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1f41e247-d429-4883-ac69-1d498174b3ed
cpp
tensorflow/tensorflow
quantized_instance_norm
tensorflow/core/kernels/quantized_instance_norm.cc
tensorflow/core/kernels/quantized_instance_norm_test.cc
#define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #include <arm_neon.h> #endif #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/quantization_utils.h" #ifdef USE_NEON namespace { void ColMeanAndVariance(const uint8_t* input, const uint32_t rows, const uint32_t cols, float* mean, float* variance) { for (uint32_t col_offset = 0; col_offset < cols; col_offset += 16) { uint32x4_t sum[4] = {0}; float nA = 0.0f; float32x4_t xA[4] = {0.0f}; float32x4_t M2A[4] = {0.0f}; const uint8_t* inp_ptr = input + col_offset; for (uint32_t row = 0; row < rows; row += 256) { uint32x4_t sub_sum[4] = {0}; uint32x4_t sub_sq_sum[4] = {0}; const uint32_t limit = std::min(rows, row + 256); const float nB = limit - row; for (uint32_t subrow = row; subrow < limit; ++subrow) { const uint8x16_t v = vld1q_u8(inp_ptr); inp_ptr += cols; const uint8x8_t v_high = vget_high_u8(v); const uint8x8_t v_low = vget_low_u8(v); const uint16x8_t v_high_u16 = vmovl_u8(v_high); const uint16x8_t v_low_u16 = vmovl_u8(v_low); const uint16x4_t v_high_high = vget_high_u16(v_high_u16); const uint16x4_t v_high_low = vget_low_u16(v_high_u16); const uint16x4_t v_low_high = vget_high_u16(v_low_u16); const uint16x4_t v_low_low = vget_low_u16(v_low_u16); sub_sum[0] = vaddw_u16(sub_sum[0], v_high_high); sub_sum[1] = vaddw_u16(sub_sum[1], v_high_low); sub_sum[2] = vaddw_u16(sub_sum[2], v_low_high); sub_sum[3] = vaddw_u16(sub_sum[3], v_low_low); sub_sq_sum[0] = vmlal_u16(sub_sq_sum[0], v_high_high, v_high_high); sub_sq_sum[1] = vmlal_u16(sub_sq_sum[1], v_high_low, v_high_low); sub_sq_sum[2] = vmlal_u16(sub_sq_sum[2], v_low_high, v_low_high); sub_sq_sum[3] = vmlal_u16(sub_sq_sum[3], v_low_low, v_low_low); } for (int i = 0; i < 4; ++i) { sum[i] = vaddq_u32(sum[i], sub_sum[i]); const float nX = nA + nB; const float32x4_t xB = vmulq_n_f32(vcvtq_f32_u32(sub_sum[i]), 1.0f / nB); const float32x4_t delta = vsubq_f32(xB, xA[i]); xA[i] = vmulq_n_f32( vaddq_f32(vmulq_n_f32(xA[i], nA), vmulq_n_f32(xB, nB)), 1.0f / nX); const float32x4_t sub_sum_f32 = vcvtq_f32_u32(sub_sum[i]); const float32x4_t sub_sum_sq = vmulq_f32(sub_sum_f32, sub_sum_f32); const float32x4_t M2B = vsubq_f32(vcvtq_f32_u32(sub_sq_sum[i]), vmulq_n_f32(sub_sum_sq, 1.0f / nB)); const float32x4_t last_term = vmulq_n_f32(vmulq_f32(delta, delta), nA * nB / nX); M2A[i] = vaddq_f32(vaddq_f32(M2A[i], M2B), last_term); } nA += limit; } const float inv_rows = 1.0f / static_cast<float>(rows); vst1q_f32(mean + col_offset, vmulq_n_f32(vcvtq_f32_u32(sum[3]), inv_rows)); vst1q_f32(mean + col_offset + 4, vmulq_n_f32(vcvtq_f32_u32(sum[2]), inv_rows)); vst1q_f32(mean + col_offset + 8, vmulq_n_f32(vcvtq_f32_u32(sum[1]), inv_rows)); vst1q_f32(mean + col_offset + 12, vmulq_n_f32(vcvtq_f32_u32(sum[0]), inv_rows)); vst1q_f32(variance + col_offset, vmulq_n_f32(M2A[3], inv_rows)); vst1q_f32(variance + col_offset + 4, vmulq_n_f32(M2A[2], inv_rows)); vst1q_f32(variance + col_offset + 8, vmulq_n_f32(M2A[1], inv_rows)); vst1q_f32(variance + col_offset + 12, vmulq_n_f32(M2A[0], inv_rows)); } } void MinAndMax(const uint8_t* input, const uint32_t rows, const uint32_t cols, const float* mean_ptr, const float* variance_ptr, float variance_epsilon, float* minimum, float* maximum) { float v_maximum = std::numeric_limits<float>::min(); float v_minimum = std::numeric_limits<float>::max(); const float32x4_t eps = vdupq_n_f32(variance_epsilon); for (uint32_t col_offset = 0; col_offset < cols; col_offset += 16) { const float32x4_t mean[4] = {vld1q_f32(mean_ptr + col_offset), vld1q_f32(mean_ptr + col_offset + 4), vld1q_f32(mean_ptr + col_offset + 8), vld1q_f32(mean_ptr + col_offset + 12)}; const float32x4_t variance[4] = {vld1q_f32(variance_ptr + col_offset), vld1q_f32(variance_ptr + col_offset + 4), vld1q_f32(variance_ptr + col_offset + 8), vld1q_f32(variance_ptr + col_offset + 12)}; const float32x4_t inv_stddev[4] = { vrsqrteq_f32(vaddq_f32(variance[0], eps)), vrsqrteq_f32(vaddq_f32(variance[1], eps)), vrsqrteq_f32(vaddq_f32(variance[2], eps)), vrsqrteq_f32(vaddq_f32(variance[3], eps))}; const uint8_t* inp_ptr = input + col_offset; for (uint32_t row = 0; row < rows; ++row) { const uint8x16_t v = vld1q_u8(inp_ptr); inp_ptr += cols; const uint16x8_t v_high = vmovl_u8(vget_high_u8(v)); const uint16x8_t v_low = vmovl_u8(vget_low_u8(v)); const float32x4_t v_float[4] = { vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_high))), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_low))), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))}; for (int i = 0; i < 4; ++i) { const float32x4_t normed = vmulq_f32(vsubq_f32(v_float[i], mean[i]), inv_stddev[i]); const float32x2_t high = vget_high_f32(normed); const float32x2_t low = vget_low_f32(normed); float32x2_t tmp_max = vpmax_f32(low, high); tmp_max = vpmax_f32(tmp_max, tmp_max); v_maximum = std::max(v_maximum, vget_lane_f32(tmp_max, 0)); float32x2_t tmp_min = vpmin_f32(low, high); tmp_min = vpmin_f32(tmp_min, tmp_min); v_minimum = std::min(v_minimum, vget_lane_f32(tmp_min, 0)); } } } *minimum = v_minimum; *maximum = v_maximum; } void InstanceNorm(const uint8_t* input, const uint32_t rows, const uint32_t cols, const float* mean_ptr, const float* variance_ptr, float variance_epsilon, float minimum, float maximum, uint8_t* output) { const float32x4_t eps = vdupq_n_f32(variance_epsilon); const float32x4_t out_min = vdupq_n_f32(minimum); const float out_scale = 255.0f / (maximum - minimum); for (uint32_t col_offset = 0; col_offset < cols; col_offset += 16) { const float32x4_t mean[4] = {vld1q_f32(mean_ptr + col_offset + 12), vld1q_f32(mean_ptr + col_offset + 8), vld1q_f32(mean_ptr + col_offset + 4), vld1q_f32(mean_ptr + col_offset)}; const float32x4_t variance[4] = {vld1q_f32(variance_ptr + col_offset + 12), vld1q_f32(variance_ptr + col_offset + 8), vld1q_f32(variance_ptr + col_offset + 4), vld1q_f32(variance_ptr + col_offset)}; const float32x4_t inv_stddev[4] = { vrsqrteq_f32(vaddq_f32(variance[0], eps)), vrsqrteq_f32(vaddq_f32(variance[1], eps)), vrsqrteq_f32(vaddq_f32(variance[2], eps)), vrsqrteq_f32(vaddq_f32(variance[3], eps))}; const uint8_t* inp_ptr = input + col_offset; uint8_t* out_ptr = output + col_offset; for (uint32_t row = 0; row < rows; ++row) { const uint8x16_t v = vld1q_u8(inp_ptr); inp_ptr += cols; const uint16x8_t v_high = vmovl_u8(vget_high_u8(v)); const uint16x8_t v_low = vmovl_u8(vget_low_u8(v)); const float32x4_t v_float[4] = { vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_high))), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))), vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_low))), vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))}; uint16x4_t normed_uint16[4]; for (int i = 0; i < 4; ++i) { const float32x4_t normed = vmulq_f32(vsubq_f32(v_float[i], mean[i]), inv_stddev[i]); const int32x4_t normed_int32 = vcvtq_s32_f32(vmulq_n_f32(vsubq_f32(normed, out_min), out_scale)); normed_uint16[i] = vqmovun_s32(normed_int32); } vst1_u8(out_ptr, vqmovn_u16(vcombine_u16(normed_uint16[3], normed_uint16[2]))); vst1_u8(out_ptr + 8, vqmovn_u16(vcombine_u16(normed_uint16[1], normed_uint16[0]))); out_ptr += cols; } } } } #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; class QuantizedInstanceNorm : public OpKernel { public: explicit QuantizedInstanceNorm(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("variance_epsilon", &variance_epsilon_)); OP_REQUIRES_OK(context, context->GetAttr("min_separation", &min_separation_)); OP_REQUIRES_OK( context, context->GetAttr("output_range_given", &output_range_given_)); if (output_range_given_) { OP_REQUIRES_OK(context, context->GetAttr("given_y_min", &given_y_min_)); OP_REQUIRES_OK(context, context->GetAttr("given_y_max", &given_y_max_)); OP_REQUIRES(context, given_y_min_ < given_y_max_, errors::InvalidArgument( "given_y_min must be less than given_y_max : ", given_y_min_, " >= ", given_y_max_)); } } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& x_min = context->input(1); const Tensor& x_max = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(x_min.shape()), errors::InvalidArgument("`x_min` must be rank 0 but is rank ", x_min.dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(x_max.shape()), errors::InvalidArgument("`x_max` must be rank 0 but is rank ", x_max.dims())); float input_min = x_min.scalar<float>()(); float input_max = x_max.scalar<float>()(); float input_scale = (input_max - input_min) / 255.0f; OP_REQUIRES(context, input_min < input_max, errors::InvalidArgument( "input_min must be less than input_max : ", input_min, " >= ", input_max)); auto input_tensor = input.tensor<quint8, 4>(); auto N = input_tensor.dimension(0); auto H = input_tensor.dimension(1); auto W = input_tensor.dimension(2); auto C = input_tensor.dimension(3); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); Tensor* output_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); Tensor* output_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); typedef TTypes<float>::Tensor::Index Index; const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>> reduction_indices; Eigen::IndexList<Eigen::type2index<1>, Index, Index, Eigen::type2index<1>> broadcast_spec; broadcast_spec.set(1, H); broadcast_spec.set(2, W); Eigen::IndexList<Index, Eigen::type2index<1>, Eigen::type2index<1>, Index> expand_spec; expand_spec.set(0, N); expand_spec.set(3, C); Eigen::Tensor<float, 2, Eigen::RowMajor> float_mean(N, C); Eigen::Tensor<float, 2, Eigen::RowMajor> float_variance(N, C); #ifdef USE_NEON if (N == 1 && (C % 16 == 0)) { VLOG(2) << "Calling optimized"; ColMeanAndVariance(reinterpret_cast<const uint8_t*>(input_tensor.data()), H * W, C, float_mean.data(), float_variance.data()); float minimum = given_y_min_, maximum = given_y_max_; if (!output_range_given_) { MinAndMax(reinterpret_cast<const uint8_t*>(input_tensor.data()), H * W, C, float_mean.data(), float_variance.data(), variance_epsilon_, &minimum, &maximum); } if (maximum - minimum < min_separation_) { maximum = minimum + min_separation_; } InstanceNorm(reinterpret_cast<const uint8_t*>(input_tensor.data()), H * W, C, float_mean.data(), float_variance.data(), variance_epsilon_, minimum, maximum, reinterpret_cast<uint8_t*>(output->flat<quint8>().data())); output_min->scalar<float>()() = minimum; output_max->scalar<float>()() = maximum; } else #endif { VLOG(2) << "Calling unoptimized"; float_mean = input_tensor.cast<float>().reduce( reduction_indices, Eigen::internal::MeanReducer<float>()); float_variance = (input_scale * ((input_tensor.cast<float>() - float_mean.reshape(expand_spec).broadcast(broadcast_spec)))) .square() .reduce(reduction_indices, Eigen::internal::MeanReducer<float>()); Eigen::Tensor<float, 4, Eigen::RowMajor> instance_normed = input_scale * (input_tensor.cast<float>() - float_mean.reshape(expand_spec).broadcast(broadcast_spec)) * (float_variance + variance_epsilon_) .rsqrt() .reshape(expand_spec) .broadcast(broadcast_spec); Eigen::Tensor<float, 0, Eigen::RowMajor> normed_min; Eigen::Tensor<float, 0, Eigen::RowMajor> normed_max; if (!output_range_given_) { normed_min = instance_normed.minimum(); normed_max = instance_normed.maximum(); } else { normed_min() = given_y_min_; normed_max() = given_y_max_; } if (normed_max() - normed_min() < min_separation_) { normed_max() = normed_min() + min_separation_; } FloatToQuantizedStruct<quint8> output_f2q(normed_min(), normed_max()); auto instance_normed_quantized = QUANTIZE_WITH_EIGEN(instance_normed, output_f2q, quint8); output->tensor<quint8, 4>().device( context->template eigen_device<CPUDevice>()) = instance_normed_quantized; output_min->flat<float>()(0) = normed_min(); output_max->flat<float>()(0) = normed_max(); } } private: float variance_epsilon_; float min_separation_; bool output_range_given_; float given_y_min_; float given_y_max_; }; REGISTER_KERNEL_BUILDER(Name("QuantizedInstanceNorm") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T"), QuantizedInstanceNorm); }
#define EIGEN_USE_THREADS #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" namespace tensorflow { namespace ops { namespace { void ReferenceImpl(const quint8* inp, float inp_min, float inp_max, const TensorShape& shape, float var_eps, float* out) { int N = shape.dim_size(0); int H = shape.dim_size(1); int W = shape.dim_size(2); int C = shape.dim_size(3); int total = N * H * W * C; float inp_scale = (inp_max - inp_min) / 255.0f; std::unique_ptr<float[]> dequantized(new float[total]); for (int i = 0; i < total; ++i) { dequantized[i] = inp_min + inp_scale * static_cast<float>(inp[i]); } std::unique_ptr<float[]> inp_mean(new float[N * C]); std::unique_ptr<float[]> inp_var(new float[N * C]); float img_size = static_cast<float>(H) * static_cast<float>(W); for (int n = 0; n < N; ++n) { for (int c = 0; c < C; ++c) { float sum = 0.0; for (int i = 0; i < H * W; ++i) { sum += dequantized[n * H * W * C + i * C + c]; } inp_mean[n * C + c] = sum / img_size; } } for (int n = 0; n < N; ++n) { for (int c = 0; c < C; ++c) { float sum = 0.0; for (int i = 0; i < H * W; ++i) { float tmp = dequantized[n * H * W * C + i * C + c] - inp_mean[n * C + c]; sum += tmp * tmp; } inp_var[n * C + c] = sum / img_size; } } for (int n = 0; n < N; ++n) { for (int c = 0; c < C; ++c) { for (int i = 0; i < H * W; ++i) { out[n * H * W * C + i * C + c] = (dequantized[n * H * W * C + i * C + c] - inp_mean[n * C + c]) / std::sqrt(inp_var[n * C + c] + var_eps); } } } } void Expect(const Tensor& input, float x_min, float x_max, bool output_range_given, float give_y_min, float given_y_max) { Scope root = Scope::NewRootScope(); auto input_ph = Placeholder(root, DT_QUINT8); const float variance_eps = 1e-5; auto instance_norm = QuantizedInstanceNorm( root, input_ph, x_min, x_max, QuantizedInstanceNorm::Attrs().VarianceEpsilon(variance_eps)); Status s = root.status(); EXPECT_TRUE(s.ok()); ClientSession session(root); std::vector<Tensor> outputs; s = session.Run({{input_ph, input}}, {instance_norm.y, instance_norm.y_min, instance_norm.y_max}, &outputs); EXPECT_TRUE(s.ok()); Tensor expected(DT_FLOAT, input.shape()); ReferenceImpl(input.flat<quint8>().data(), x_min, x_max, input.shape(), variance_eps, expected.flat<float>().data()); auto out = outputs[0].flat<quint8>(); float out_min = outputs[1].flat<float>()(0); float out_max = outputs[2].flat<float>()(0); float out_scale = (out_max - out_min) / 255.0f; Eigen::Tensor<float, 0, Eigen::RowMajor> max_diff = (expected.flat<float>() - (out_min + out_scale * out.cast<float>())) .abs() .maximum(); EXPECT_LE(max_diff(), 0.1); LOG(INFO) << "max diff " << max_diff(); } void TestBasic() { Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32}); auto input = input_tensor.flat<quint8>(); input = input.random(Eigen::internal::UniformRandomGenerator<quint8>()); Expect(input_tensor, 0.0f, 1.0f, false, 0.0f, 0.0f); } void TestZeroInput() { Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32}); auto input = input_tensor.flat<quint8>(); input = input.setConstant(0); Expect(input_tensor, 2.0f, 3.0f, false, 0.0f, 0.0f); } void TestMaxInput() { Tensor input_tensor(DT_QUINT8, {1, 1, 2, 16}); auto input = input_tensor.flat<quint8>(); input = input.setConstant(255); Expect(input_tensor, 0.0f, std::numeric_limits<float>::max() / static_cast<float>(2 * 16), false, 0.0f, 0.0f); } void TestOutputRangeGiven() { Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32}); auto input = input_tensor.flat<quint8>(); input = input.random(Eigen::internal::UniformRandomGenerator<quint8>()); Expect(input_tensor, -10.0f, 10.0f, true, -1.0f, 1.0f); } void TestClamp() { GTEST_SKIP() << "TODO(b/339058131): Fix test failure."; Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32}); auto input = input_tensor.flat<quint8>(); input = input.random(Eigen::internal::UniformRandomGenerator<quint8>()); Expect(input_tensor, -10.0f, 10.0f, true, 0.0f, 1.0f); } } } } #define RUN_TEST(t) \ TEST(QuantizedInstanceNormTest, t) { tensorflow::ops::t(); } RUN_TEST(TestBasic); RUN_TEST(TestZeroInput); RUN_TEST(TestMaxInput); RUN_TEST(TestOutputRangeGiven); RUN_TEST(TestClamp); int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_instance_norm.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_instance_norm_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cf40d8f1-fe90-470c-b9b1-a415ae72b520
cpp
tensorflow/tensorflow
random_binomial_op
tensorflow/core/kernels/random_binomial_op.cc
tensorflow/core/kernels/random_binomial_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/random_binomial_op.h" #include <algorithm> #include <cmath> #include <memory> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/rng_alg.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/random_ops_util.h" #include "tensorflow/core/kernels/stateful_random_ops_cpu_gpu.h" #include "tensorflow/core/kernels/stateless_random_ops.h" #include "tensorflow/core/kernels/training_op_helpers.h" #include "tensorflow/core/lib/core/refcount.h" #include "tensorflow/core/lib/random/random_distributions.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/util/bcast.h" #include "tensorflow/core/util/guarded_philox_random.h" #include "tensorflow/core/util/work_sharder.h" #define UNIFORM(X) \ if (uniform_remaining == 0) { \ uniform_remaining = Uniform::kResultElementCount; \ uniform_result = uniform(gen); \ } \ uniform_remaining--; \ double X = uniform_result[uniform_remaining] namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace { typedef random::UniformDistribution<random::PhiloxRandom, double> Uniform; double binomial_inversion(double count, double prob, random::PhiloxRandom* gen) { using Eigen::numext::ceil; using Eigen::numext::log; using Eigen::numext::log1p; double geom_sum = 0; int num_geom = 0; Uniform uniform; typename Uniform::ResultType uniform_result; int16_t uniform_remaining = 0; while (true) { UNIFORM(u); double geom = ceil(log(u) / log1p(-prob)); geom_sum += geom; if (geom_sum > count) { break; } ++num_geom; } return num_geom; } inline double stirling_approx_tail(double k) { static double kTailValues[] = {0.0810614667953272, 0.0413406959554092, 0.0276779256849983, 0.02079067210376509, 0.0166446911898211, 0.0138761288230707, 0.0118967099458917, 0.0104112652619720, 0.00925546218271273, 0.00833056343336287}; if (k <= 9) { return kTailValues[static_cast<int>(k)]; } double kp1sq = (k + 1) * (k + 1); return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1); } inline double btrs(double count, double prob, random::PhiloxRandom* gen) { using Eigen::numext::abs; using Eigen::numext::floor; using Eigen::numext::log; using Eigen::numext::log1p; using Eigen::numext::sqrt; const double stddev = sqrt(count * prob * (1 - prob)); const double b = 1.15 + 2.53 * stddev; const double a = -0.0873 + 0.0248 * b + 0.01 * prob; const double c = count * prob + 0.5; const double v_r = 0.92 - 4.2 / b; const double r = prob / (1 - prob); const double alpha = (2.83 + 5.1 / b) * stddev; const double m = floor((count + 1) * prob); Uniform uniform; typename Uniform::ResultType uniform_result; int16_t uniform_remaining = 0; while (true) { UNIFORM(u); UNIFORM(v); u = u - 0.5; double us = 0.5 - abs(u); double k = floor((2 * a / us + b) * u + c); if (us >= 0.07 && v <= v_r) { return k; } if (k < 0 || k > count) { continue; } v = log(v * alpha / (a / (us * us) + b)); double upperbound = ((m + 0.5) * log((m + 1) / (r * (count - m + 1))) + (count + 1) * log((count - m + 1) / (count - k + 1)) + (k + 0.5) * log(r * (count - k + 1) / (k + 1)) + stirling_approx_tail(m) + stirling_approx_tail(count - m) - stirling_approx_tail(k) - stirling_approx_tail(count - k)); if (v <= upperbound) { return k; } } } } namespace functor { template <typename T, typename U> struct RandomBinomialFunctor<CPUDevice, T, U> { void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches, int64_t samples_per_batch, int64_t num_elements, const BCast& bcast, typename TTypes<T>::ConstFlat counts, typename TTypes<T>::ConstFlat probs, const random::PhiloxRandom& gen, typename TTypes<U>::Flat output) { auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); auto DoWork = [num_batches, samples_per_batch, &bcast, &counts, &probs, &gen, &output](int64_t start_output, int64_t limit_output) { const bool should_bcast = bcast.IsBroadcastingRequired(); const auto& counts_batch_indices = bcast.x_batch_indices(); const auto& probs_batch_indices = bcast.y_batch_indices(); auto output_flat = output.data(); for (int64_t output_idx = start_output; output_idx < limit_output; ) { int64_t batch_idx = output_idx / samples_per_batch; U* const output_batch_offset = output_flat + batch_idx; T count, prob; if (should_bcast) { count = counts(counts_batch_indices[batch_idx]); prob = probs(probs_batch_indices[batch_idx]); } else { count = counts(batch_idx); prob = probs(batch_idx); } double dcount = static_cast<double>(count); if (dcount <= 0.0 || prob <= T(0.0)) { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { output_batch_offset[sample_idx * num_batches] = static_cast<U>(0.0); } } else if (prob >= T(1.0)) { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { output_batch_offset[sample_idx * num_batches] = static_cast<U>(dcount); } } else if (prob <= T(0.5)) { double dp = static_cast<double>(prob); if (count * prob >= T(10)) { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(256 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>(btrs(dcount, dp, &gen_copy)); } } else { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(42 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>(binomial_inversion(dcount, dp, &gen_copy)); } } } else if (prob > T(0.5)) { T q = T(1) - prob; double dq = static_cast<double>(q); if (count * q >= T(10)) { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(256 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>(dcount - btrs(dcount, dq, &gen_copy)); } } else { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(42 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>( dcount - binomial_inversion(dcount, dq, &gen_copy)); } } } else { for (int64_t sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { output_batch_offset[sample_idx * num_batches] = static_cast<U>(NAN); } } } }; static const int kElementCost = 329 + 6 * Uniform::kElementCost + 6 * random::PhiloxRandom::kElementCost; Shard(worker_threads.num_threads, worker_threads.workers, num_elements, kElementCost, DoWork); } }; } namespace { template <typename Device, typename T, typename U> class RandomBinomialOp : public OpKernel { static constexpr int32_t kDesiredBatchSize = 100; public: explicit RandomBinomialOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& alg_tensor = ctx->input(1); const Tensor& shape_tensor = ctx->input(2); const Tensor& counts_tensor = ctx->input(3); const Tensor& probs_tensor = ctx->input(4); tensorflow::BCast bcast(counts_tensor.shape().dim_sizes(), probs_tensor.shape().dim_sizes(), false, true); OP_REQUIRES(ctx, bcast.IsValid(), errors::InvalidArgument( "counts and probs must have compatible batch dimensions: ", counts_tensor.shape().DebugString(), " vs. ", probs_tensor.shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsVector(shape_tensor.shape()), errors::InvalidArgument("Input shape should be a vector, got shape: ", shape_tensor.shape().DebugString())); OP_REQUIRES(ctx, (shape_tensor.dtype() == DataType::DT_INT32 || shape_tensor.dtype() == DataType::DT_INT64), errors::InvalidArgument( "Input shape should have dtype {int32, int64}.")); TensorShape bcast_shape = BCast::ToShape(bcast.output_shape()); TensorShape output_shape; if (shape_tensor.dtype() == DataType::DT_INT32) { OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(shape_tensor.vec<int32>(), &output_shape)); } else { OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( shape_tensor.vec<int64_t>(), &output_shape)); } OP_REQUIRES(ctx, TensorShapeUtils::EndsWith(output_shape, bcast_shape), errors::InvalidArgument( "Shape passed in must end with broadcasted shape.")); OP_REQUIRES(ctx, alg_tensor.dims() == 0, errors::InvalidArgument("algorithm must be of shape [], not ", alg_tensor.shape().DebugString())); Algorithm alg = Algorithm(alg_tensor.flat<int64_t>()(0)); int64_t samples_per_batch = 1; const int64_t num_sample_dims = (shape_tensor.dim_size(0) - bcast.output_shape().size()); for (int64_t i = 0; i < num_sample_dims; ++i) { samples_per_batch *= shape_tensor.flat<int32>()(i); } int64_t num_batches = 1; for (int64_t i = num_sample_dims; i < shape_tensor.dim_size(0); ++i) { num_batches *= shape_tensor.flat<int32>()(i); } const int64_t num_elements = num_batches * samples_per_batch; Tensor* samples_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &samples_tensor)); core::RefCountPtr<Var> var; OP_REQUIRES_OK(ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &var)); Tensor* var_tensor = var->tensor(); OP_REQUIRES( ctx, var_tensor->dtype() == STATE_ELEMENT_DTYPE, errors::InvalidArgument("dtype of RNG state variable must be ", DataTypeString(STATE_ELEMENT_DTYPE), ", not ", DataTypeString(var_tensor->dtype()))); OP_REQUIRES(ctx, var_tensor->dims() == 1, errors::InvalidArgument( "RNG state must have one and only one dimension, not ", var_tensor->dims())); auto var_tensor_flat = var_tensor->flat<StateElementType>(); OP_REQUIRES(ctx, alg == RNG_ALG_PHILOX, errors::InvalidArgument("Unsupported algorithm id: ", alg)); static_assert(std::is_same<StateElementType, int64_t>::value, "StateElementType must be int64"); static_assert(std::is_same<PhiloxRandom::ResultElementType, uint32>::value, "PhiloxRandom::ResultElementType must be uint32"); OP_REQUIRES(ctx, var_tensor_flat.size() >= PHILOX_MIN_STATE_SIZE, errors::InvalidArgument( "For Philox algorithm, the size of state must be at least ", PHILOX_MIN_STATE_SIZE, "; got ", var_tensor_flat.size())); OP_REQUIRES_OK(ctx, PrepareToUpdateVariable<Device, StateElementType>( ctx, var_tensor, var->copy_on_read_mode.load())); auto var_data = var_tensor_flat.data(); auto philox = GetPhiloxRandomFromMem(var_data); UpdateMemWithPhiloxRandom( philox, num_batches * 2 * 100 * (samples_per_batch + 3) / 4, var_data); auto binomial_functor = functor::RandomBinomialFunctor<Device, T, U>(); binomial_functor(ctx, ctx->eigen_device<Device>(), num_batches, samples_per_batch, num_elements, bcast, counts_tensor.flat<T>(), probs_tensor.flat<T>(), philox, samples_tensor->flat<U>()); } private: RandomBinomialOp(const RandomBinomialOp&) = delete; void operator=(const RandomBinomialOp&) = delete; }; template <typename Device, typename T, typename U> class StatelessRandomBinomialOp : public OpKernel { static constexpr int32_t kDesiredBatchSize = 100; public: explicit StatelessRandomBinomialOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { const Tensor& shape_tensor = ctx->input(0); const Tensor& seed_tensor = ctx->input(1); const Tensor& counts_tensor = ctx->input(2); const Tensor& probs_tensor = ctx->input(3); OP_REQUIRES(ctx, seed_tensor.dims() == 1 && seed_tensor.dim_size(0) == 2, errors::InvalidArgument("seed must have shape [2], not ", seed_tensor.shape().DebugString())); tensorflow::BCast bcast(counts_tensor.shape().dim_sizes(), probs_tensor.shape().dim_sizes(), false, true); OP_REQUIRES(ctx, bcast.IsValid(), errors::InvalidArgument( "counts and probs must have compatible batch dimensions: ", counts_tensor.shape().DebugString(), " vs. ", probs_tensor.shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsVector(shape_tensor.shape()), errors::InvalidArgument("Input shape should be a vector, got shape: ", shape_tensor.shape().DebugString())); OP_REQUIRES(ctx, (shape_tensor.dtype() == DataType::DT_INT32 || shape_tensor.dtype() == DataType::DT_INT64), errors::InvalidArgument( "Input shape should have dtype {int32, int64}.")); TensorShape bcast_shape = BCast::ToShape(bcast.output_shape()); TensorShape output_shape; if (shape_tensor.dtype() == DataType::DT_INT32) { OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(shape_tensor.vec<int32>(), &output_shape)); } else { OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( shape_tensor.vec<int64_t>(), &output_shape)); } OP_REQUIRES(ctx, TensorShapeUtils::EndsWith(output_shape, bcast_shape), errors::InvalidArgument( "Shape passed in must end with broadcasted shape.")); int64_t samples_per_batch = 1; const int64_t num_sample_dims = (shape_tensor.dim_size(0) - bcast.output_shape().size()); for (int64_t i = 0; i < num_sample_dims; ++i) { samples_per_batch *= shape_tensor.dtype() == DataType::DT_INT32 ? shape_tensor.flat<int32>()(i) : shape_tensor.flat<int64>()(i); } int64_t num_batches = 1; for (int64_t i = num_sample_dims; i < shape_tensor.dim_size(0); ++i) { num_batches *= shape_tensor.dtype() == DataType::DT_INT32 ? shape_tensor.flat<int32>()(i) : shape_tensor.flat<int64>()(i); } const int64_t num_elements = num_batches * samples_per_batch; Tensor* samples_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &samples_tensor)); if (output_shape.num_elements() == 0) return; random::PhiloxRandom::Key key; random::PhiloxRandom::ResultType counter; OP_REQUIRES_OK(ctx, GenerateKey(seed_tensor, &key, &counter)); auto philox = random::PhiloxRandom(counter, key); auto binomial_functor = functor::RandomBinomialFunctor<Device, T, U>(); binomial_functor(ctx, ctx->eigen_device<Device>(), num_batches, samples_per_batch, num_elements, bcast, counts_tensor.flat<T>(), probs_tensor.flat<T>(), philox, samples_tensor->flat<U>()); } private: StatelessRandomBinomialOp(const StatelessRandomBinomialOp&) = delete; void operator=(const StatelessRandomBinomialOp&) = delete; }; } #define REGISTER(RTYPE, TYPE) \ REGISTER_KERNEL_BUILDER(Name("StatefulRandomBinomial") \ .Device(DEVICE_CPU) \ .HostMemory("resource") \ .HostMemory("algorithm") \ .HostMemory("shape") \ .HostMemory("counts") \ .HostMemory("probs") \ .TypeConstraint<RTYPE>("dtype") \ .TypeConstraint<TYPE>("T"), \ RandomBinomialOp<CPUDevice, TYPE, RTYPE>); \ REGISTER_KERNEL_BUILDER(Name("StatelessRandomBinomial") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .HostMemory("seed") \ .HostMemory("counts") \ .HostMemory("probs") \ .TypeConstraint<RTYPE>("dtype") \ .TypeConstraint<TYPE>("T"), \ StatelessRandomBinomialOp<CPUDevice, TYPE, RTYPE>) #define REGISTER_ALL(RTYPE) \ REGISTER(RTYPE, Eigen::half); \ REGISTER(RTYPE, float); \ REGISTER(RTYPE, double); REGISTER_ALL(Eigen::half); REGISTER_ALL(float); REGISTER_ALL(double); REGISTER_ALL(int32); REGISTER_ALL(int64_t); #undef REGISTER #undef REGISTER_ALL }
#include <functional> #include <memory> #include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* RandomBinomialGraph(double count, double prob, int num_batches, int samples_per_batch) { Graph* g = new Graph(OpRegistry::Global()); Tensor shape_t(DT_INT32, TensorShape({2})); shape_t.flat<int32>().setValues({num_batches, samples_per_batch}); Tensor counts_t(DT_FLOAT, TensorShape({num_batches})); counts_t.flat<float>().setConstant(count); Tensor probs_t(DT_FLOAT, TensorShape({num_batches})); probs_t.flat<float>().setConstant(prob); Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("randombinomial"), "RandomBinomial") .Input(test::graph::Constant(g, shape_t)) .Input(test::graph::Constant(g, counts_t)) .Input(test::graph::Constant(g, probs_t)) .Attr("dtype", DT_FLOAT) .Finalize(g, &ret)); return g; } static Graph* RandomBinomialInv(int num_batches, int samples_per_batch) { return RandomBinomialGraph(10., 0.3, num_batches, samples_per_batch); } static Graph* RandomBinomialRej(int num_batches, int samples_per_batch) { return RandomBinomialGraph(100., 0.3, num_batches, samples_per_batch); } static Graph* RandomBinomialInvComplement(int num_batches, int samples_per_batch) { return RandomBinomialGraph(10., 0.8, num_batches, samples_per_batch); } static Graph* RandomBinomialRejComplement(int num_batches, int samples_per_batch) { return RandomBinomialGraph(100., 0.2, num_batches, samples_per_batch); } #define BM_RandomBinomialInv(DEVICE, B, S) \ static void BM_RandomBinomialInv_##DEVICE##_##B##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, RandomBinomialInv(B, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \ } \ BENCHMARK(BM_RandomBinomialInv_##DEVICE##_##B##_##S); #define BM_RandomBinomialRej(DEVICE, B, S) \ static void BM_RandomBinomialRej_##DEVICE##_##B##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, RandomBinomialRej(B, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \ } \ BENCHMARK(BM_RandomBinomialRej_##DEVICE##_##B##_##S); #define BM_RandomBinomialInvComplement(DEVICE, B, S) \ static void BM_RandomBinomialInvComplement_##DEVICE##_##B##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, RandomBinomialInvComplement(B, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \ } \ BENCHMARK(BM_RandomBinomialInvComplement_##DEVICE##_##B##_##S); #define BM_RandomBinomialRejComplement(DEVICE, B, S) \ static void BM_RandomBinomialRejComplement_##DEVICE##_##B##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, RandomBinomialRejComplement(B, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \ } \ BENCHMARK(BM_RandomBinomialRejComplement_##DEVICE##_##B##_##S); BM_RandomBinomialInv(cpu, 1000, 1000); BM_RandomBinomialRej(cpu, 1000, 1000); BM_RandomBinomialInvComplement(cpu, 1000, 1000); BM_RandomBinomialRejComplement(cpu, 1000, 1000); BM_RandomBinomialInv(gpu, 1000, 1000); BM_RandomBinomialRej(gpu, 1000, 1000); BM_RandomBinomialInvComplement(gpu, 1000, 1000); BM_RandomBinomialRejComplement(gpu, 1000, 1000); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_binomial_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_binomial_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9906f79c-113e-4101-9d0a-a3ebc20508da
cpp
tensorflow/tensorflow
sparse_to_dense_op
tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc
tensorflow/core/kernels/sparse_to_dense_op_test.cc
#include <vector> #include "tensorflow/compiler/tf2xla/lib/scatter.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/value_inference.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { class SparseToDenseOp : public XlaOpKernel { public: explicit SparseToDenseOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { const TensorShape indices_shape = context->InputShape(0); OP_REQUIRES(context, indices_shape.dims() <= 2, errors::InvalidArgument( "sparse_indices should be a scalar, vector, or matrix, " "got shape ", indices_shape.DebugString())); const int64_t num_elems = indices_shape.dims() > 0 ? indices_shape.dim_size(0) : 1; const int64_t num_dims = indices_shape.dims() > 1 ? indices_shape.dim_size(1) : 1; TensorShape output_shape; OP_REQUIRES_OK(context, context->ConstantInputAsShape( 1, &output_shape, xla::ValueInferenceMode::kUpperBound)); OP_REQUIRES(context, output_shape.dims() == num_dims, errors::InvalidArgument( "output_shape has incorrect number of elements: ", output_shape.num_elements(), " should be: ", num_dims)); const TensorShape sparse_values_shape = context->InputShape(2); const int64_t num_values = sparse_values_shape.num_elements(); OP_REQUIRES( context, sparse_values_shape.dims() == 0 || (sparse_values_shape.dims() == 1 && num_values == num_elems), errors::InvalidArgument("sparse_values has incorrect shape ", sparse_values_shape.DebugString(), ", should be [] or [", num_elems, "]")); const TensorShape default_value_shape = context->InputShape(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(default_value_shape), errors::InvalidArgument("default_value should be a scalar.")); xla::XlaOp indices = context->Input(0); xla::XlaOp sparse_values = context->Input(2); xla::XlaOp default_value = context->Input(3); if (sparse_values_shape.dims() == 0 && num_elems != 1) { sparse_values = Broadcast(sparse_values, {num_elems}); } xla::XlaBuilder* builder = context->builder(); auto buffer = Broadcast(default_value, output_shape.dim_sizes()); std::vector<bool> dynamic_dims; OP_REQUIRES_OK( context, context->ResolveInputDynamismIntoPredVector(1, &dynamic_dims)); for (int64_t i = 0; i < dynamic_dims.size(); ++i) { if (dynamic_dims[i]) { auto dynamic_dim_size = xla::Slice(context->Input(1), {i}, {i + 1}, {1}); dynamic_dim_size = xla::Reshape(dynamic_dim_size, {}); dynamic_dim_size = xla::ConvertElementType(dynamic_dim_size, xla::S32); buffer = xla::SetDimensionSize(buffer, dynamic_dim_size, i); } } auto result = XlaScatter(buffer, sparse_values, indices, indices_shape.dims() > 1, false, {}, builder); context->SetOutput(0, builder->ReportErrorOrReturn(result)); } }; REGISTER_XLA_OP(Name("SparseToDense").CompileTimeConstantInput("output_shape"), SparseToDenseOp); } }
#include <functional> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/public/version.h" namespace tensorflow { namespace { class SparseToDenseTest : public OpsTestBase { protected: void MakeOp(int dim, DataType index_type, DataType value_type) { TF_ASSERT_OK(NodeDefBuilder("sparsetodense", "SparseToDense") .Input(FakeInput(index_type)) .Input(FakeInput(index_type)) .Input(FakeInput(value_type)) .Input(FakeInput(value_type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SparseToDenseTest, OneD_OneValue) { MakeOp(1, DT_INT32, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3}), {1, 3, 4}); AddInputFromArray<int32>(TensorShape({1}), {5}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {-2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, {5}); test::FillValues<float>(&expected, {-2, 2, -2, 2, 2}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseToDenseTest, OneD_OneValue_int64_double) { MakeOp(1, DT_INT64, DT_DOUBLE); AddInputFromArray<int64_t>(TensorShape({3}), {1, 3, 4}); AddInputFromArray<int64_t>(TensorShape({1}), {5}); AddInputFromArray<double>(TensorShape({}), {2}); AddInputFromArray<double>(TensorShape({}), {-2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, {5}); test::FillValues<double>(&expected, {-2, 2, -2, 2, 2}); test::ExpectTensorEqual<double>(expected, *GetOutput(0)); } TEST_F(SparseToDenseTest, OneD_MultValues) { MakeOp(1, DT_INT32, DT_FLOAT); AddInputFromArray<int32>({3}, {1, 3, 4}); AddInputFromArray<int32>({1}, {5}); AddInputFromArray<float>({3}, {3, 4, 5}); AddInputFromArray<float>({}, {-2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, {5}); test::FillValues<float>(&expected, {-2, 3, -2, 4, 5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseToDenseTest, TwoD_OneValue) { MakeOp(2, DT_INT32, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3, 2}), {0, 1, 0, 2, 2, 3}); AddInputFromArray<int32>(TensorShape({2}), {3, 4}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {-2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, {3, 4}); expected.flat<float>().setConstant(-2); expected.tensor<float, 2>()(0, 1) = 2; expected.tensor<float, 2>()(0, 2) = 2; expected.tensor<float, 2>()(2, 3) = 2; test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseToDenseTest, TwoD_MultValues) { MakeOp(2, DT_INT32, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3, 2}), {0, 1, 0, 2, 2, 3}); AddInputFromArray<int32>(TensorShape({2}), {3, 4}); AddInputFromArray<float>(TensorShape({3}), {3, 4, 5}); AddInputFromArray<float>(TensorShape({}), {-2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, {3, 4}); expected.flat<float>().setConstant(-2); expected.tensor<float, 2>()(0, 1) = 3; expected.tensor<float, 2>()(0, 2) = 4; expected.tensor<float, 2>()(2, 3) = 5; test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseToDenseTest, ThreeD_OneValue) { MakeOp(3, DT_INT32, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3, 3}), {0, 1, 1, 0, 2, 0, 2, 3, 1}); AddInputFromArray<int32>(TensorShape({3}), {3, 4, 2}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {-2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, {3, 4, 2}); expected.flat<float>().setConstant(-2); expected.tensor<float, 3>()(0, 1, 1) = 2; expected.tensor<float, 3>()(0, 2, 0) = 2; expected.tensor<float, 3>()(2, 3, 1) = 2; test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseToDenseTest, ThreeD_MultValues) { MakeOp(3, DT_INT32, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3, 3}), {0, 1, 1, 0, 2, 0, 2, 3, 1}); AddInputFromArray<int32>(TensorShape({3}), {3, 4, 2}); AddInputFromArray<float>(TensorShape({3}), {3, 4, 5}); AddInputFromArray<float>(TensorShape({}), {-2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, {3, 4, 2}); expected.flat<float>().setConstant(-2); expected.tensor<float, 3>()(0, 1, 1) = 3; expected.tensor<float, 3>()(0, 2, 0) = 4; expected.tensor<float, 3>()(2, 3, 1) = 5; test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } } static void BM_SparseToDense(::testing::benchmark::State& state) { const int NDIM = state.range(0); const int N = state.range(1); const int IndexDim = (NDIM == 1) ? 0 : 1; std::unique_ptr<Device> device( DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0")); absl::InlinedVector<TensorValue, 4UL> inputs; Tensor output_shape(DT_INT32, TensorShape({NDIM})); Tensor sparse_indices(DT_INT64, TensorShape({N, NDIM})); Tensor sparse_values(DT_FLOAT, TensorShape({N})); Tensor default_value(DT_FLOAT, TensorShape({})); auto output_shape_t = output_shape.vec<int32>(); for (int d = 0; d < NDIM; ++d) { output_shape_t(d) = (d == IndexDim) ? N : 3; } auto sparse_indices_t = sparse_indices.matrix<int64_t>(); for (int n = 0; n < N; ++n) { for (int d = 0; d < NDIM; ++d) sparse_indices_t(n, d) = (d == IndexDim) ? n : 0; } for (auto* ptr : {&sparse_indices, &output_shape, &sparse_values, &default_value}) { inputs.push_back({nullptr, ptr}); } NodeDef sparse_node_def; TF_CHECK_OK(NodeDefBuilder("sparsetodense", "SparseToDense") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(&sparse_node_def)); Status status; std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(), cpu_allocator(), sparse_node_def, TF_GRAPH_DEF_VERSION, &status)); OpKernelContext::Params params; params.device = device.get(); params.frame_iter = FrameAndIter(0, 0); params.inputs = inputs; params.op_kernel = op.get(); std::vector<AllocatorAttributes> attrs; test::SetOutputAttrs(&params, &attrs); std::unique_ptr<OpKernelContext> sparse_context(new OpKernelContext(&params)); op->Compute(sparse_context.get()); for (auto s : state) { delete sparse_context->release_output(0).tensor; op->Compute(sparse_context.get()); TF_ASSERT_OK(sparse_context->status()); } int64_t bytes_per_iter = static_cast<int64_t>((N + N * NDIM) * sizeof(float)); state.SetBytesProcessed(bytes_per_iter * state.iterations()); } BENCHMARK(BM_SparseToDense) ->ArgPair(1, 10) ->ArgPair(1, 100) ->ArgPair(1, 1000) ->ArgPair(1, 10000) ->ArgPair(2, 10) ->ArgPair(2, 100) ->ArgPair(2, 1000) ->ArgPair(2, 10000) ->ArgPair(3, 10) ->ArgPair(3, 100) ->ArgPair(3, 1000) ->ArgPair(3, 10000) ->ArgPair(5, 10) ->ArgPair(5, 100) ->ArgPair(5, 1000) ->ArgPair(5, 10000); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_to_dense_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
effcb6f2-119c-4733-ac16-417d8c58d04c
cpp
tensorflow/tensorflow
tensor_map
tensorflow/core/kernels/tensor_map.cc
tensorflow/core/kernels/tensor_map_test.cc
#include "tensorflow/core/kernels/tensor_map.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/variant_op_registry.h" #include "tensorflow/core/lib/core/coding.h" namespace tensorflow { TensorMap::~TensorMap() { if (tensors_) tensors_->Unref(); } void TensorMap::Encode(VariantTensorData* data) const { data->set_type_name(TypeName()); absl::flat_hash_map<TensorKey, Tensor>::const_iterator map_it = tensors().begin(); while (map_it != tensors().end()) { Tensor k = map_it->first; Tensor v = map_it->second; CHECK_NE(k.dtype(), DT_INVALID); CHECK_NE(v.dtype(), DT_INVALID); *data->add_tensors() = k; *data->add_tensors() = v; map_it++; } } static Status TensorMapDeviceCopy( const TensorMap& from, TensorMap* to, const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) { for (const std::pair<TensorKey, Tensor>& p : from.tensors()) { TensorKey to_key(p.first.dtype()); Tensor to_val(p.second.dtype()); TF_RETURN_IF_ERROR(copy(p.first, &to_key)); TF_RETURN_IF_ERROR(copy(p.second, &to_val)); to->tensors().emplace(to_key, to_val); } return absl::OkStatus(); } #define REGISTER_LIST_COPY(DIRECTION) \ INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION(TensorMap, DIRECTION, \ TensorMapDeviceCopy) REGISTER_LIST_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE); REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST); REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE); REGISTER_UNARY_VARIANT_DECODE_FUNCTION(TensorMap, TensorMap::kTypeName); bool TensorMap::Decode(const VariantTensorData& data) { std::vector<Tensor>::const_iterator tensors_it = data.tensors().begin(); while (tensors_it != data.tensors().end()) { if (std::next(tensors_it) == data.tensors().end()) { return false; } tensors().emplace(tensors_it[0], tensors_it[1]); tensors_it += 2; } return true; } const char TensorMap::kTypeName[] = "tensorflow::TensorMap"; }
#include "tensorflow/core/kernels/tensor_map.h" #include "absl/container/flat_hash_map.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { TEST(TensorMapTest, Empty) { TensorMap tm; EXPECT_EQ(tm.tensors().size(), 0); EXPECT_EQ(tm.tensors().begin(), tm.tensors().end()); } TEST(TensorKeyTest, Equal) { TensorKey k1 = Tensor(15); TensorKey k2 = Tensor(15); EXPECT_EQ(k1, k2); EXPECT_EQ(k1.shape(), k2.shape()); EXPECT_EQ(k1.dtype(), k2.dtype()); TensorKey k3 = Tensor(37.0); EXPECT_NE(k1, k3); EXPECT_NE(k1.dtype(), k3.dtype()); } TEST(TensorMapTest, Insert) { TensorMap tm; TensorKey k = Tensor(11); Tensor v = Tensor(22); tm.insert(k, v); absl::flat_hash_map<TensorKey, Tensor> am; am.try_emplace(k, v); absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.tensors().begin(); EXPECT_EQ(map_it->first, k); test::ExpectTensorEqual<int32>(map_it->second, v); map_it++; EXPECT_EQ(map_it, tm.tensors().end()); } TEST(TensorMapTest, Lookup) { TensorMap tm; TensorKey k = Tensor(11); Tensor v = Tensor(22); tm.insert(k, v); absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k); Tensor f = map_it->second; EXPECT_EQ(map_it->first, k); test::ExpectTensorEqual<int32>(f, v); } TEST(TensorMapTest, Erase) { TensorMap tm; TensorKey k = Tensor(11); Tensor v = Tensor(22); tm.insert(k, v); tm.erase(k); EXPECT_EQ(tm.find(k), tm.tensors().end()); } TEST(TensorMapTest, SameKeyInsert) { TensorMap tm; TensorKey k = Tensor(11); Tensor v1 = Tensor(22); Tensor v2 = Tensor(23); bool b1 = tm.insert(k, v1); bool b2 = tm.insert(k, v2); EXPECT_EQ(b1, true); EXPECT_EQ(b2, false); absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k); EXPECT_EQ(map_it->first, k); test::ExpectTensorEqual<int32>(map_it->second, v1); } TEST(TensorMapTest, Replace) { TensorMap tm; TensorKey k = Tensor(11); Tensor v1 = Tensor(22); Tensor v2 = Tensor(23); tm[k] = v2; absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k); EXPECT_EQ(map_it->first, k); test::ExpectTensorEqual<int32>(map_it->second, v2); } TEST(TensorMapTest, ListKeys) { TensorMap tm; TensorKey k = Tensor(11.0); TensorKey k2 = Tensor(12.0); Tensor v = Tensor(22); Tensor v2 = Tensor(23); tm.insert(k, v); tm.insert(k2, v2); std::vector<Tensor> keys = tm.keys(); std::vector<std::pair<double, int>> key_doubles; for (int i = 0; i < keys.size(); i++) { double x = keys[i].scalar<double>()(); std::pair<double, int> p = std::pair<double, int>(x, i); key_doubles.push_back(p); } sort(key_doubles.begin(), key_doubles.end()); EXPECT_EQ(keys.size(), 2); EXPECT_EQ(key_doubles[0].first, 11.0); EXPECT_EQ(key_doubles[1].first, 12.0); int ind1 = key_doubles[0].second; int ind2 = key_doubles[1].second; EXPECT_EQ(keys[ind1].shape(), k.shape()); EXPECT_EQ(keys[ind2].shape(), k2.shape()); } TEST(TensorMapTest, Size) { TensorMap tm; EXPECT_EQ(tm.size(), 0); TensorKey k = Tensor(11); Tensor v = Tensor(22); tm.insert(k, v); EXPECT_EQ(tm.size(), 1); } TEST(TensorMapTest, Copy) { TensorMap tm; TensorKey k = Tensor(11); Tensor v = Tensor(22); tm.insert(k, v); TensorMap tmc = tm.Copy(); EXPECT_EQ(tm.size(), tmc.size()); EXPECT_NE(tm.find(k), tm.tensors().end()); EXPECT_NE(tmc.find(k), tmc.tensors().end()); EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first); test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second); } TEST(TensorMapTest, EncodeDecode) { TensorMap tm; TensorKey k = Tensor(11); Tensor v = Tensor(22); tm.insert(k, v); VariantTensorData data; tm.Encode(&data); TensorMap tmc; tmc.Decode(data); EXPECT_EQ(tm.size(), tmc.size()); EXPECT_NE(tm.find(k), tm.tensors().end()); EXPECT_NE(tmc.find(k), tmc.tensors().end()); EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first); test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_map.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_map_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1f1b2c1e-3bb9-4fc4-ae64-7e71e3f290d5
cpp
tensorflow/tensorflow
encode_wav_op
tensorflow/core/kernels/encode_wav_op.cc
tensorflow/core/kernels/encode_wav_op_test.cc
#include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/wav/wav_io.h" namespace tensorflow { class EncodeWavOp : public OpKernel { public: explicit EncodeWavOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& audio = context->input(0); OP_REQUIRES(context, audio.dims() == 2, errors::InvalidArgument("audio must be 2-dimensional", audio.shape().DebugString())); const Tensor& sample_rate_tensor = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsScalar(sample_rate_tensor.shape()), errors::InvalidArgument( "Input sample_rate should be a scalar tensor, got ", sample_rate_tensor.shape().DebugString(), " instead.")); const int32_t sample_rate = sample_rate_tensor.scalar<int32>()(); OP_REQUIRES( context, FastBoundsCheck(audio.NumElements(), std::numeric_limits<int32>::max()), errors::InvalidArgument( "Cannot encode audio with >= max int32 elements")); const int32_t channel_count = static_cast<int32>(audio.dim_size(1)); const int32_t sample_count = static_cast<int32>(audio.dim_size(0)); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({}), &output)); OP_REQUIRES_OK(context, wav::EncodeAudioAsS16LEWav( audio.flat<float>().data(), sample_rate, channel_count, sample_count, &output->scalar<tstring>()())); } }; REGISTER_KERNEL_BUILDER(Name("EncodeWav").Device(DEVICE_CPU), EncodeWavOp); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/audio_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace ops { namespace { TEST(EncodeWavOpTest, EncodeWavTest) { Scope root = Scope::DisabledShapeInferenceScope(); Tensor audio_tensor(DT_FLOAT, {4, 2}); test::FillValues<float>( &audio_tensor, {0.0f, 0.5f, 1.0f, -1.0f, 0.25f, 0.75f, 1.25f, -0.5f}); Output audio_op = Const(root.WithOpName("audio_op"), Input::Initializer(audio_tensor)); Output sample_rate_op = Const(root.WithOpName("sample_rate_op"), 44100); EncodeWav encode_wav_op = EncodeWav(root.WithOpName("encode_wav_op"), audio_op, sample_rate_op); DecodeWav decode_wav_op = DecodeWav(root.WithOpName("decode_wav_op"), encode_wav_op); TF_ASSERT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {decode_wav_op.audio, decode_wav_op.sample_rate}, &outputs)); const Tensor& audio = outputs[0]; const int sample_rate = outputs[1].flat<int32>()(0); EXPECT_EQ(2, audio.dims()); EXPECT_EQ(2, audio.dim_size(1)); EXPECT_EQ(4, audio.dim_size(0)); EXPECT_NEAR(0.0f, audio.flat<float>()(0), 1e-4f); EXPECT_NEAR(0.5f, audio.flat<float>()(1), 1e-4f); EXPECT_NEAR(1.0f, audio.flat<float>()(2), 1e-4f); EXPECT_NEAR(-1.0f, audio.flat<float>()(3), 1e-4f); EXPECT_NEAR(0.25f, audio.flat<float>()(4), 1e-4f); EXPECT_NEAR(0.75f, audio.flat<float>()(5), 1e-4f); EXPECT_NEAR(1.0f, audio.flat<float>()(6), 1e-4f); EXPECT_NEAR(-0.5f, audio.flat<float>()(7), 1e-4f); EXPECT_EQ(44100, sample_rate); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/encode_wav_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/encode_wav_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fba84131-3d0e-4f17-933c-2161333e37ff
cpp
tensorflow/tensorflow
dynamic_stitch_op
tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc
tensorflow/core/kernels/dynamic_stitch_op_test.cc
#include <algorithm> #include <vector> #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal_util.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" namespace tensorflow { namespace { class DynamicStitchOp : public XlaOpKernel { public: explicit DynamicStitchOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES( ctx, ctx->num_inputs() > 0, errors::InvalidArgument("DynamicStitchOp: Must have some inputs")); OP_REQUIRES(ctx, ctx->num_inputs() % 2 == 0, errors::InvalidArgument( "DynamicStitchOp: Must have even number of arguments")); const int n = ctx->num_inputs() / 2; const DataType dt = ctx->input_type(n); DataTypeVector expected; for (int i = 0; i < n; i++) { expected.push_back(DT_INT32); } for (int i = 0; i < n; i++) { expected.push_back(dt); } OP_REQUIRES_OK(ctx, ctx->MatchSignature(expected, {dt})); } void Compile(XlaOpKernelContext* ctx) override { std::vector<xla::Literal> indices_input; OP_REQUIRES_OK(ctx, ctx->ConstantInputList("indices", &indices_input)); std::vector<xla::XlaOp> data; std::vector<TensorShape> data_shapes; OP_REQUIRES_OK(ctx, ctx->InputList("data", &data, &data_shapes)); std::vector<xla::Literal> indices(indices_input.size()); const TensorShape& data0_shape = data_shapes[0]; TensorShape indices0_shape; OP_REQUIRES_OK( ctx, XLAShapeToTensorShape(indices_input[0].shape(), &indices0_shape)); for (int input_num = 0; input_num < indices_input.size(); input_num++) { TensorShape indices_shape; OP_REQUIRES_OK(ctx, XLAShapeToTensorShape(indices_input[input_num].shape(), &indices_shape)); TensorShape& data_shape = data_shapes[input_num]; if (!TensorShapeUtils::StartsWith(data_shape, indices_shape)) { for (int64_t i = 0; i < indices_shape.dims(); ++i) { data_shape.set_dim(i, indices_shape.dim_size(i)); data[input_num] = xla::SliceInDim(data[input_num], 0, indices_shape.dim_size(i), 1, i); } } OP_REQUIRES( ctx, TensorShapeUtils::StartsWith(data_shape, indices_shape), errors::InvalidArgument("data[", input_num, "].shape = ", data_shape.DebugString(), " does not start with indices[", input_num, "].shape = ", indices_shape.DebugString())); OP_REQUIRES( ctx, input_num == 0 || SameExtraShape(data0_shape, indices0_shape, data_shape, indices_shape), errors::InvalidArgument( "Need data[0].shape[", indices0_shape.dims(), ":] = data[", input_num, "].shape[", indices_shape.dims(), ":], got data[0].shape = ", data0_shape.DebugString(), ", data[", input_num, "].shape = ", data_shape.DebugString(), ", indices[0].shape = ", indices0_shape.DebugString(), ", indices[", input_num, "].shape = ", indices_shape.DebugString())); OP_REQUIRES_OK(ctx, XlaHelpers::ReshapeLiteral(indices_input[input_num], {indices_shape.num_elements()}, &indices[input_num])); } int max_index = -1; for (int input_num = 0; input_num < indices.size(); input_num++) { for (int i = 0; i < indices[input_num].shape().dimensions(0); ++i) { max_index = std::max(max_index, indices[input_num].Get<int>({i})); } } int number_of_indices = max_index + 1; int64_t result_rank = 1 + data0_shape.dims() - indices0_shape.dims(); if (number_of_indices == 0) { std::vector<int64_t> result_shape(result_rank); for (int d = indices0_shape.dims(); d < data0_shape.dims(); d++) { result_shape[d - indices0_shape.dims() + 1] = data0_shape.dim_size(d); } xla::PrimitiveType element_type = ctx->input_xla_type(ctx->num_inputs() - 1); xla::Literal empty_literal = xla::Literal::CreateFromShape( xla::ShapeUtil::MakeShape(element_type, result_shape)); ctx->SetOutput(0, xla::ConstantLiteral(ctx->builder(), empty_literal)); return; } std::vector<int32> src_input_vector(number_of_indices); std::vector<int32> src_slice_vector(number_of_indices); std::vector<bool> src_index_used(number_of_indices); int index_used_count = 0; for (int input_num = 0; input_num < indices.size(); input_num++) { for (int i = 0; i < indices[input_num].shape().dimensions(0); ++i) { int index = indices[input_num].Get<int>({i}); OP_REQUIRES( ctx, index >= 0, errors::InvalidArgument("indices[", index, "] is out of range")); src_input_vector[index] = input_num; src_slice_vector[index] = i; if (!src_index_used[index]) { src_index_used[index] = true; ++index_used_count; } } } OP_REQUIRES(ctx, index_used_count == number_of_indices, errors::InvalidArgument("not all indices are used")); std::vector<xla::XlaOp> input(indices.size()); for (int input_num = 0; input_num < indices.size(); input_num++) { TensorShape new_shape; new_shape.AddDim(indices[input_num].shape().dimensions(0)); for (int d = indices0_shape.dims(); d < data0_shape.dims(); d++) { new_shape.AddDim(data0_shape.dim_size(d)); } auto handle = data[input_num]; if (new_shape == data_shapes[input_num]) { input[input_num] = handle; } else { input[input_num] = xla::Reshape(handle, new_shape.dim_sizes()); } } std::vector<int64_t> slice_start(result_rank); std::vector<int64_t> slice_limit(result_rank); std::vector<int64_t> stride(result_rank, 1); for (int d = indices0_shape.dims(); d < data0_shape.dims(); d++) { slice_limit[1 + d - indices0_shape.dims()] = data0_shape.dim_size(d); } std::vector<xla::XlaOp> to_concat(number_of_indices); for (int index_num = 0; index_num < number_of_indices; index_num++) { const auto& expression = input[src_input_vector[index_num]]; slice_start[0] = src_slice_vector[index_num]; slice_limit[0] = src_slice_vector[index_num] + 1; to_concat[index_num] = xla::Slice(expression, slice_start, slice_limit, stride); } ctx->SetOutput(0, xla::ConcatInDim(ctx->builder(), to_concat, 0)); } private: static bool SameExtraShape(const TensorShape& data0_shape, const TensorShape& indices0, const TensorShape& data1_shape, const TensorShape& indices1) { const int extra0 = data0_shape.dims() - indices0.dims(); const int extra1 = data1_shape.dims() - indices1.dims(); if (extra0 != extra1) return false; for (int i = 0; i < extra0; i++) { if (data0_shape.dim_size(indices0.dims() + i) != data1_shape.dim_size(indices1.dims() + i)) { return false; } } return true; } }; REGISTER_XLA_OP(Name("DynamicStitch").CompileTimeConstantInput("indices"), DynamicStitchOp); REGISTER_XLA_OP( Name("ParallelDynamicStitch").CompileTimeConstantInput("indices"), DynamicStitchOp); } }
#include <functional> #include <memory> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class DynamicStitchOpTest : public OpsTestBase { protected: void MakeOp(int n, DataType dt) { TF_ASSERT_OK(NodeDefBuilder("myop", "DynamicStitch") .Input(FakeInput(n, DT_INT32)) .Input(FakeInput(n, dt)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(DynamicStitchOpTest, Simple_OneD) { MakeOp(2, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7}); AddInputFromArray<int32>(TensorShape({5}), {1, 6, 2, 3, 5}); AddInputFromArray<float>(TensorShape({3}), {0, 40, 70}); AddInputFromArray<float>(TensorShape({5}), {10, 60, 20, 30, 50}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({8})); test::FillValues<float>(&expected, {0, 10, 20, 30, 40, 50, 60, 70}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(DynamicStitchOpTest, Simple_TwoD) { MakeOp(3, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7}); AddInputFromArray<int32>(TensorShape({2}), {1, 6}); AddInputFromArray<int32>(TensorShape({3}), {2, 3, 5}); AddInputFromArray<float>(TensorShape({3, 2}), {0, 1, 40, 41, 70, 71}); AddInputFromArray<float>(TensorShape({2, 2}), {10, 11, 60, 61}); AddInputFromArray<float>(TensorShape({3, 2}), {20, 21, 30, 31, 50, 51}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({8, 2})); test::FillValues<float>(&expected, {0, 1, 10, 11, 20, 21, 30, 31, 40, 41, 50, 51, 60, 61, 70, 71}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(DynamicStitchOpTest, IndicesNotCoverAllPortions) { MakeOp(1, DT_FLOAT); AddInputFromArray<int32>(TensorShape({1}), {2}); AddInputFromArray<float>(TensorShape({1}), {1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected, {0, 0, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(DynamicStitchOpTest, Error_IndicesMultiDimensional) { MakeOp(2, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7}); AddInputFromArray<int32>(TensorShape({1, 5}), {1, 6, 2, 3, 5}); AddInputFromArray<float>(TensorShape({3}), {0, 40, 70}); AddInputFromArray<float>(TensorShape({5}), {10, 60, 20, 30, 50}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "data[1].shape = [5] does not start with indices[1].shape = [1,5]")) << s; } TEST_F(DynamicStitchOpTest, Error_DataNumDimsMismatch) { MakeOp(2, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7}); AddInputFromArray<int32>(TensorShape({5}), {1, 6, 2, 3, 5}); AddInputFromArray<float>(TensorShape({3}), {0, 40, 70}); AddInputFromArray<float>(TensorShape({1, 5}), {10, 60, 20, 30, 50}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "data[1].shape = [1,5] does not start with indices[1].shape = [5]")) << s; } TEST_F(DynamicStitchOpTest, Error_DataDimSizeMismatch) { MakeOp(2, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 5}); AddInputFromArray<int32>(TensorShape({4}), {1, 6, 2, 3}); AddInputFromArray<float>(TensorShape({3, 1}), {0, 40, 70}); AddInputFromArray<float>(TensorShape({4, 2}), {10, 11, 60, 61, 20, 21, 30, 31}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "Need data[0].shape[1:] = data[1].shape[1:], got " "data[0].shape = [3,1], data[1].shape = [4,2]")) << s; } TEST_F(DynamicStitchOpTest, Error_DataAndIndicesSizeMismatch) { MakeOp(2, DT_FLOAT); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7}); AddInputFromArray<int32>(TensorShape({5}), {1, 6, 2, 3, 5}); AddInputFromArray<float>(TensorShape({3}), {0, 40, 70}); AddInputFromArray<float>(TensorShape({4}), {10, 60, 20, 30}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "data[1].shape = [4] does not start with indices[1].shape = [5]")) << s; } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/dynamic_stitch_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
83a42cf8-3caa-495f-ade3-622f32a471a3
cpp
tensorflow/tensorflow
ragged_tensor_from_variant_op
tensorflow/core/kernels/ragged_tensor_from_variant_op.cc
tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc
#include <utility> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/kernels/ragged_tensor_variant.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { namespace { Status RaggedComponentsFromVariant( const Tensor& encoded_variant, int input_ragged_rank, int output_ragged_rank, DataType value_dtype, DataType split_dtype, std::vector<RaggedTensorVariant>* decoded_ragged) { const auto& flat_variants = encoded_variant.flat<Variant>(); decoded_ragged->reserve(flat_variants.size()); for (int i = 0; i < flat_variants.size(); i++) { const auto& flat_variant = flat_variants(i); const RaggedTensorVariant* decoded = flat_variant.get<RaggedTensorVariant>(); if (decoded == nullptr) { return errors::InvalidArgument( "Input Variant element at index ", i, " doesn't hold a RaggedTensorVariant: ", flat_variant.DebugString()); } decoded_ragged->push_back(*decoded); decoded = &decoded_ragged->back(); if (decoded->ragged_rank() != input_ragged_rank) { return errors::InvalidArgument( "Encoded input RaggedTensorVariant has ragged_rank=", decoded->ragged_rank(), ". Expected ragged_rank=", input_ragged_rank, "."); } if (decoded->values().dtype() != value_dtype) { return errors::InvalidArgument( "Expected values Tensor dtype: ", DataTypeString(value_dtype), ", found: ", DataTypeString(decoded->values().dtype())); } if (decoded->values().dims() < 1 && output_ragged_rank != 0) { return errors::InvalidArgument( "Ragged values must have rank >= 1; encoded scalar element at index ", i, " has values Tensor: ", decoded->values().DebugString()); } for (const auto& splits : decoded->nested_splits()) { if (splits.dtype() != split_dtype) { return errors::InvalidArgument( "Expected row_splits Tensor dtype: ", DataTypeString(split_dtype), ", found: ", DataTypeString(splits.dtype())); } if (splits.dims() != 1) { return errors::InvalidArgument( "Ragged splits must have rank 1; encoded scalar element at index ", i, " has splits Tensor ", splits.DebugString()); } } } return absl::OkStatus(); } template <typename VALUE_TYPE> Status StackNonRaggedTensors( const std::vector<RaggedTensorVariant>& ragged_components, RaggedTensorVariant* output_ragged) { if (ragged_components.empty()) { output_ragged->set_values(Tensor(DataTypeToEnum<VALUE_TYPE>::value, {0})); return absl::OkStatus(); } TensorShape component_values_shape = ragged_components[0].values().shape(); TensorShape result_shape = component_values_shape; result_shape.InsertDim(0, ragged_components.size()); output_ragged->set_values( Tensor(DataTypeToEnum<VALUE_TYPE>::value, result_shape)); auto output_values_flat = output_ragged->mutable_values()->flat<VALUE_TYPE>(); int values_index = 0; for (int i = 0; i < ragged_components.size(); i++) { auto& component_values = ragged_components[i].values(); if (component_values.shape() != component_values_shape) { return errors::InvalidArgument( "All flat_values must have compatible shapes. Shape at index 0: ", component_values_shape, ". Shape at index ", i, ": ", component_values.shape()); } auto component_values_flat = component_values.flat<VALUE_TYPE>(); for (int j = 0; j < component_values_flat.size(); j++) { output_values_flat(values_index++) = component_values_flat(j); } } return absl::OkStatus(); } template <typename VALUE_TYPE, typename SPLIT_TYPE> Status NestedStackRaggedTensors( const std::vector<RaggedTensorVariant>& ragged_components, const std::vector<int>& nested_dim_sizes, const int input_ragged_rank, const int output_ragged_rank, RaggedTensorVariant* output_ragged) { output_ragged->mutable_nested_splits()->reserve(output_ragged_rank); const int dims = nested_dim_sizes.size(); if (output_ragged_rank == 0) { if (input_ragged_rank > 0) { return errors::InvalidArgument( "Expected input_ragged_rank=0 if output_ragged_rank==0. " "Got input_ragged_rank=", input_ragged_rank); } return StackNonRaggedTensors<VALUE_TYPE>(ragged_components, output_ragged); } for (int i = 0; i < dims - 1; i++) { int dims_splits_size = nested_dim_sizes[i] + 1; output_ragged->append_splits(Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({dims_splits_size}))); auto splits_vec = output_ragged->mutable_splits(i)->vec<SPLIT_TYPE>(); int split_diff = nested_dim_sizes[i + 1]; for (int j = 0; j < dims_splits_size; j++) { splits_vec(j) = j * split_diff; } } int splits_size = ragged_components.size() + 1; output_ragged->append_splits( Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({splits_size}))); auto dims_splits_vec = output_ragged->mutable_splits(dims - 1)->vec<SPLIT_TYPE>(); dims_splits_vec(0) = 0; for (int i = 0; i < ragged_components.size(); i++) { int split_val = ragged_components[i].values().shape().dim_size(0); if (input_ragged_rank != 0 && ragged_components[i].ragged_rank() > 0) { split_val = ragged_components[i].splits(0).NumElements() - 1; } dims_splits_vec(i + 1) = dims_splits_vec(i) + split_val; } for (int i = 0; i < input_ragged_rank; i++) { int split_index = dims + i; int split_size = 1; for (int j = 0; j < ragged_components.size(); j++) { if (!ragged_components[j].nested_splits().empty()) { split_size += ragged_components[j].splits(i).NumElements() - 1; } } output_ragged->append_splits( Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({split_size}))); auto splits_vec = output_ragged->mutable_splits(split_index)->vec<SPLIT_TYPE>(); splits_vec(0) = 0; SPLIT_TYPE last_split_value = 0; int index = 1; for (int j = 0; j < ragged_components.size(); j++) { if (ragged_components[j].nested_splits().empty()) { continue; } auto component_splits_vec = ragged_components[j].splits(i).vec<SPLIT_TYPE>(); for (int k = 1; k < component_splits_vec.size(); k++, index++) { splits_vec(index) = component_splits_vec(k) + last_split_value; } last_split_value = splits_vec(index - 1); } } TensorShape component_values_shape; if (ragged_components.empty()) { component_values_shape = TensorShape({0}); } else { component_values_shape = ragged_components[0].values().shape(); } int values_size = component_values_shape.dim_size(0); for (int i = 1; i < ragged_components.size(); i++) { if (ragged_components[i].values().dims() != component_values_shape.dims()) { return errors::InvalidArgument( "Rank of values must match for all " "components; values shape at index 0: ", component_values_shape.DebugString(), ", values shape at index ", i, ": ", ragged_components[i].values().shape().DebugString()); } values_size += ragged_components[i].values().shape().dim_size(0); } component_values_shape.set_dim(0, values_size); output_ragged->set_values( Tensor(DataTypeToEnum<VALUE_TYPE>::value, component_values_shape)); auto output_values_flat = output_ragged->mutable_values()->flat_outer_dims<VALUE_TYPE, 2>(); int values_index = 0; TensorShape expected_value_shape = component_values_shape; expected_value_shape.RemoveDim(0); for (int i = 0; i < ragged_components.size(); i++) { TensorShape value_shape = ragged_components[i].values().shape(); value_shape.RemoveDim(0); if (value_shape != expected_value_shape) { return errors::InvalidArgument( "All flat_values must have compatible shapes. Shape at index 0: ", expected_value_shape, ". Shape at index ", i, ": ", value_shape, ". If you are using tf.map_fn, then you may need to specify an " "explicit fn_output_signature with appropriate ragged_rank, and/or " "convert output tensors to RaggedTensors."); } auto component_values_flat = ragged_components[i].values().flat_outer_dims<VALUE_TYPE, 2>(); int num_inner_elements = ragged_components[i].values().NumElements(); if (ragged_components[i].values().dim_size(0) > 0) { num_inner_elements /= ragged_components[i].values().dim_size(0); } for (int j = 0; j < ragged_components[i].values().dim_size(0); j++, values_index++) { for (int k = 0; k < num_inner_elements; k++) { output_values_flat(values_index, k) = component_values_flat(j, k); } } } return absl::OkStatus(); } } template <typename VALUE_TYPE, typename SPLIT_TYPE> class RaggedTensorFromVariantOp : public OpKernel { public: explicit RaggedTensorFromVariantOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("input_ragged_rank", &input_ragged_rank_attr_)); OP_REQUIRES_OK( context, context->GetAttr("output_ragged_rank", &output_ragged_rank_)); } void Compute(OpKernelContext* context) override { const Tensor& encoded_variant = context->input(0); auto input_ragged_rank_ = input_ragged_rank_attr_; if (input_ragged_rank_ == -1) { input_ragged_rank_ = output_ragged_rank_ - encoded_variant.dims(); if (output_ragged_rank_ == 0 && input_ragged_rank_ < 0) { input_ragged_rank_ = 0; } OP_REQUIRES(context, input_ragged_rank_ >= 0, errors::InvalidArgument( "Inferred input_ragged_rank (output_ragged_rank - " "encoded_variant.dims()) must be >= 0, found " "output_ragged_rank: ", output_ragged_rank_, ", encoded_variant.dims(): ", encoded_variant.dims(), ", inferred input_ragged_rank: ", input_ragged_rank_)); } OP_REQUIRES( context, (output_ragged_rank_ == 0 && input_ragged_rank_ == 0) || (output_ragged_rank_ == encoded_variant.dims() + input_ragged_rank_), errors::InvalidArgument( "output_ragged_rank must be equal to input_ragged_rank + " "encoded_ragged.dims(); output_ragged_rank: ", output_ragged_rank_, ", input_ragged_rank: ", input_ragged_rank_, ", encoded_variant.dims(): ", encoded_variant.dims(), ".")); const auto value_dtype = DataTypeToEnum<VALUE_TYPE>::v(); const auto split_dtype = DataTypeToEnum<SPLIT_TYPE>::v(); std::vector<RaggedTensorVariant> decoded_components; OP_REQUIRES_OK(context, RaggedComponentsFromVariant( encoded_variant, input_ragged_rank_, output_ragged_rank_, value_dtype, split_dtype, &decoded_components)); if (encoded_variant.dims() == 0) { ReturnRaggedTensor(context, decoded_components[0]); return; } std::vector<int> encoded_dim_sizes(encoded_variant.dims(), 0); for (int i = 0; i < encoded_variant.dims(); i++) { encoded_dim_sizes[i] = encoded_variant.dim_size(i); } RaggedTensorVariant output_ragged; OP_REQUIRES_OK( context, NestedStackRaggedTensors<VALUE_TYPE, SPLIT_TYPE>( decoded_components, encoded_dim_sizes, input_ragged_rank_, output_ragged_rank_, &output_ragged)); ReturnRaggedTensor(context, output_ragged); } private: int input_ragged_rank_attr_; int output_ragged_rank_; void ReturnRaggedTensor(OpKernelContext* context, const RaggedTensorVariant& ragged_tensor) { int ragged_rank = ragged_tensor.ragged_rank(); OpOutputList splits_out; OP_REQUIRES_OK(context, context->output_list("output_nested_splits", &splits_out)); for (int i = 0; i < ragged_rank; i++) { splits_out.set(i, ragged_tensor.splits(i)); } context->set_output(ragged_rank, ragged_tensor.values()); } }; #define REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, split_type) \ REGISTER_KERNEL_BUILDER(Name("RaggedTensorFromVariant") \ .Device(DEVICE_CPU) \ .TypeConstraint<value_type>("Tvalues") \ .TypeConstraint<split_type>("Tsplits"), \ RaggedTensorFromVariantOp<value_type, split_type>) \ REGISTER_KERNEL_BUILDER(Name("RaggedTensorFromVariant") \ .Device(DEVICE_GPU) \ .TypeConstraint<value_type>("Tvalues") \ .TypeConstraint<split_type>("Tsplits") \ .HostMemory("encoded_ragged") \ .HostMemory("output_nested_splits") \ .HostMemory("output_dense_values"), \ RaggedTensorFromVariantOp<value_type, split_type>) \ REGISTER_KERNEL_BUILDER(Name("RaggedTensorFromVariant") \ .Device(DEVICE_TPU) \ .TypeConstraint<value_type>("Tvalues") \ .TypeConstraint<split_type>("Tsplits") \ .HostMemory("encoded_ragged") \ .HostMemory("output_nested_splits") \ .HostMemory("output_dense_values"), \ RaggedTensorFromVariantOp<value_type, split_type>); #define REGISTER_KERNELS(value_type) \ REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \ REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64_t) TF_CALL_POD_TYPES(REGISTER_KERNELS); TF_CALL_tstring(REGISTER_KERNELS); TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS); TF_CALL_quint16(REGISTER_KERNELS); TF_CALL_qint16(REGISTER_KERNELS); #undef REGISTER_KERNELS #undef REGISTER_KERNELS_WITH_SPLIT_TYPE }
#include <utility> #include <vector> #include "absl/strings/match.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ragged_tensor_variant.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class RaggedTensorFromVariantKernelTest : public ::tensorflow::OpsTestBase { protected: template <typename VALUE_TYPE, typename SPLIT_TYPE> void BuildDecodeRaggedTensorGraph( const int input_ragged_rank, const int output_ragged_rank, const TensorShape& variant_shape, const std::vector<Variant>& variant_values) { const auto value_dtype = DataTypeToEnum<VALUE_TYPE>::v(); const auto split_dtype = DataTypeToEnum<SPLIT_TYPE>::v(); TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedTensorFromVariant") .Input(FakeInput(DT_VARIANT)) .Attr("input_ragged_rank", input_ragged_rank) .Attr("output_ragged_rank", output_ragged_rank) .Attr("Tvalues", value_dtype) .Attr("Tsplits", split_dtype) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<Variant>(variant_shape, variant_values); } template <typename VALUE_TYPE, typename SPLIT_TYPE> RaggedTensorVariant CreateVariantFromRagged( const std::vector<std::vector<SPLIT_TYPE>>& ragged_splits, const TensorShape& ragged_values_shape, const std::vector<VALUE_TYPE>& ragged_values) { RaggedTensorVariant encoded; for (auto ragged_split : ragged_splits) { int splits_size = ragged_split.size(); Tensor splits(DataTypeToEnum<SPLIT_TYPE>::v(), TensorShape({splits_size})); test::FillValues<SPLIT_TYPE>(&splits, ragged_split); encoded.append_splits(splits); } Tensor values(DataTypeToEnum<VALUE_TYPE>::v(), ragged_values_shape); test::FillValues<VALUE_TYPE>(&values, ragged_values); encoded.set_values(values); return encoded; } }; TEST_F(RaggedTensorFromVariantKernelTest, ScalarInput) { const std::vector<int64_t> split_1 = {0, 1, 2, 3, 4, 5}; const std::vector<int64_t> split_2 = {0, 1, 2, 5, 6, 7}; const std::vector<int> values = {0, 1, 1, 2, 2, 3, 4}; auto encoded_variant = CreateVariantFromRagged<int, int64_t>( {split_1, split_2}, TensorShape({7}), values); Tensor expected_splits_1(DT_INT64, TensorShape({6})); Tensor expected_splits_2(DT_INT64, TensorShape({6})); Tensor expected_values(DT_INT32, TensorShape({7})); test::FillValues<int64_t>(&expected_splits_1, split_1); test::FillValues<int64_t>(&expected_splits_2, split_2); test::FillValues<int>(&expected_values, values); int input_ragged_rank = 2; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({}), {encoded_variant}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int>(*GetOutput(2), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, OneInputElement) { const std::vector<int64_t> split_1 = {0, 1, 2, 3, 4, 5}; const std::vector<int64_t> split_2 = {0, 1, 2, 5, 6, 7}; const std::vector<int> values = {0, 1, 1, 2, 2, 3, 4}; const std::vector<int64_t> batched_splits_1 = {0, 5}; auto encoded_variant = CreateVariantFromRagged<int, int64_t>( {split_1, split_2}, TensorShape({7}), values); Tensor expected_splits_1(DT_INT64, TensorShape({2})); Tensor expected_splits_2(DT_INT64, TensorShape({6})); Tensor expected_splits_3(DT_INT64, TensorShape({6})); Tensor expected_values(DT_INT32, TensorShape({7})); test::FillValues<int64_t>(&expected_splits_1, batched_splits_1); test::FillValues<int64_t>(&expected_splits_2, split_1); test::FillValues<int64_t>(&expected_splits_3, split_2); test::FillValues<int>(&expected_values, values); int input_ragged_rank = 2; int output_ragged_rank = 3; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({1}), {encoded_variant}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int64_t>(*GetOutput(2), expected_splits_3); test::ExpectTensorEqual<int>(*GetOutput(3), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, TensorIn2DOut) { const std::vector<int> values_1 = {1, 2, 3}; const std::vector<int> values_2 = {}; const std::vector<int> values_3 = {4, 5}; const std::vector<int> values_4 = {6}; const std::vector<int64_t> batched_splits_1 = {0, 2, 4}; const std::vector<int64_t> batched_splits_2 = {0, 3, 3, 5, 6}; const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6}; auto component_variant_1 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({3}), values_1); auto component_variant_2 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({0}), values_2); auto component_variant_3 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({2}), values_3); auto component_variant_4 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({1}), values_4); Tensor expected_splits_1(DT_INT64, TensorShape({3})); Tensor expected_splits_2(DT_INT64, TensorShape({5})); Tensor expected_values(DT_INT32, TensorShape({6})); test::FillValues<int64_t>(&expected_splits_1, batched_splits_1); test::FillValues<int64_t>(&expected_splits_2, batched_splits_2); test::FillValues<int>(&expected_values, batched_values); int input_ragged_rank = 0; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2, 2}), {component_variant_1, component_variant_2, component_variant_3, component_variant_4}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int>(*GetOutput(2), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, NonEmpty1DIn3DOut) { const std::vector<int64_t> component_split_1_1 = {0, 1}; const std::vector<int64_t> component_split_2_1 = {0, 1, 2}; const std::vector<int64_t> component_split_3_1 = {0, 2}; const std::vector<int64_t> component_split_4_1 = {0, 2, 3}; const std::vector<int64_t> component_split_5_1 = {0, 1, 3}; const std::vector<int> component_values_1 = {0}; const std::vector<int> component_values_2 = {0, 1}; const std::vector<int> component_values_3 = {0, 1}; const std::vector<int> component_values_4 = {0, 1, 2}; const std::vector<int> component_values_5 = {0, 1, 2}; const std::vector<int64_t> batched_splits_1 = {0, 5, 10}; const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 4, 6, 8, 10, 12, 13, 14, 16}; const std::vector<int64_t> batched_splits_3 = { 0, 1, 2, 3, 5, 7, 8, 9, 11, 13, 14, 15, 17, 18, 20, 21, 22}; const std::vector<int> batched_values = {0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1}; Tensor expected_splits_1(DT_INT64, TensorShape({3})); Tensor expected_splits_2(DT_INT64, TensorShape({11})); Tensor expected_splits_3(DT_INT64, TensorShape({17})); Tensor expected_values(DT_INT32, TensorShape({22})); test::FillValues<int64_t>(&expected_splits_1, batched_splits_1); test::FillValues<int64_t>(&expected_splits_2, batched_splits_2); test::FillValues<int64_t>(&expected_splits_3, batched_splits_3); test::FillValues<int>(&expected_values, batched_values); auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({1}), component_values_1); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>( {component_split_2_1}, TensorShape({2}), component_values_2); auto variant_component_3 = CreateVariantFromRagged<int, int64_t>( {component_split_3_1}, TensorShape({2}), component_values_3); auto variant_component_4 = CreateVariantFromRagged<int, int64_t>( {component_split_4_1}, TensorShape({3}), component_values_4); auto variant_component_5 = CreateVariantFromRagged<int, int64_t>( {component_split_5_1}, TensorShape({3}), component_values_5); int input_ragged_rank = 1; int output_ragged_rank = 3; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2, 5}), {variant_component_1, variant_component_2, variant_component_3, variant_component_4, variant_component_5, variant_component_4, variant_component_5, variant_component_1, variant_component_3, variant_component_2}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int64_t>(*GetOutput(2), expected_splits_3); test::ExpectTensorEqual<int>(*GetOutput(3), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, NonEmpty2DIn4DOutInferredInputRaggedRank) { const std::vector<int64_t> component_split_1_1 = {0, 1, 3, 4, 6, 8}; const std::vector<int64_t> component_split_1_2 = {0, 1, 2, 3, 5, 7, 8, 9, 11}; const std::vector<int64_t> component_split_2_1 = {0, 2, 4, 5, 6, 8}; const std::vector<int64_t> component_split_2_2 = {0, 2, 3, 4, 6, 7, 9, 10, 11}; const std::vector<int> component_values_1 = {0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2}; const std::vector<int> component_values_2 = {0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1}; const std::vector<int64_t> batched_splits_1 = {0, 2, 4}; const std::vector<int64_t> batched_splits_2 = {0, 5, 10, 15, 20}; const std::vector<int64_t> batched_splits_3 = {0, 1, 3, 4, 6, 8, 10, 12, 13, 14, 16, 18, 20, 21, 22, 24, 25, 27, 28, 30, 32}; const std::vector<int64_t> batched_splits_4 = { 0, 1, 2, 3, 5, 7, 8, 9, 11, 13, 14, 15, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 31, 32, 33, 34, 35, 36, 38, 40, 41, 42, 44}; const std::vector<int> batched_values = { 0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2}; Tensor expected_splits_1(DT_INT64, TensorShape({3})); Tensor expected_splits_2(DT_INT64, TensorShape({5})); Tensor expected_splits_3(DT_INT64, TensorShape({21})); Tensor expected_splits_4(DT_INT64, TensorShape({33})); Tensor expected_values(DT_INT32, TensorShape({44})); test::FillValues<int64_t>(&expected_splits_1, batched_splits_1); test::FillValues<int64_t>(&expected_splits_2, batched_splits_2); test::FillValues<int64_t>(&expected_splits_3, batched_splits_3); test::FillValues<int64_t>(&expected_splits_4, batched_splits_4); test::FillValues<int>(&expected_values, batched_values); auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1, component_split_1_2}, TensorShape({11}), component_values_1); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>( {component_split_2_1, component_split_2_2}, TensorShape({11}), component_values_2); int input_ragged_rank = -1; int output_ragged_rank = 4; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2, 2}), {variant_component_1, variant_component_2, variant_component_2, variant_component_1}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int64_t>(*GetOutput(2), expected_splits_3); test::ExpectTensorEqual<int64_t>(*GetOutput(3), expected_splits_4); test::ExpectTensorEqual<int>(*GetOutput(4), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, EmptyRow1DIn2DOut) { const std::vector<int64_t> component_split_1_1 = {0, 3, 3}; const std::vector<int> component_values_1 = {1, 2, 3}; const std::vector<int64_t> component_split_2_1 = {0}; const std::vector<int64_t> batched_splits_1 = {0, 2, 2}; const std::vector<int64_t> batched_splits_2 = {0, 3, 3}; const std::vector<int> batched_values = {1, 2, 3}; Tensor expected_splits_1(DT_INT64, TensorShape({3})); Tensor expected_splits_2(DT_INT64, TensorShape({3})); Tensor expected_values(DT_INT32, TensorShape({3})); test::FillValues<int64_t>(&expected_splits_1, batched_splits_1); test::FillValues<int64_t>(&expected_splits_2, batched_splits_2); test::FillValues<int>(&expected_values, batched_values); auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({3}), component_values_1); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>( {component_split_2_1}, TensorShape({0}), {}); int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2}), {variant_component_1, variant_component_2}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int>(*GetOutput(2), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, NDValues1DIn2DOut) { const std::vector<int64_t> component_split_1_1 = {0, 1}; const std::vector<int> component_values_1 = {1, 2}; const std::vector<int64_t> component_split_2_1 = {0, 1, 2}; const std::vector<int> component_values_2 = {1, 2, 3, 4}; const std::vector<int64_t> batched_splits_1 = {0, 1, 3}; const std::vector<int64_t> batched_splits_2 = {0, 1, 2, 3}; const std::vector<int> batched_values = {1, 2, 1, 2, 3, 4}; Tensor expected_splits_1(DT_INT64, TensorShape({3})); Tensor expected_splits_2(DT_INT64, TensorShape({4})); Tensor expected_values(DT_INT32, TensorShape({3, 2})); test::FillValues<int64_t>(&expected_splits_1, batched_splits_1); test::FillValues<int64_t>(&expected_splits_2, batched_splits_2); test::FillValues<int>(&expected_values, batched_values); auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({1, 2}), component_values_1); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>( {component_split_2_1}, TensorShape({2, 2}), component_values_2); int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2}), {variant_component_1, variant_component_2}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int64_t>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int>(*GetOutput(2), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, NonEmpty1DIn3DOutInt32Splits) { const std::vector<int> component_split_1_1 = {0, 1}; const std::vector<int> component_split_2_1 = {0, 1, 2}; const std::vector<int> component_split_3_1 = {0, 2}; const std::vector<int> component_split_4_1 = {0, 2, 3}; const std::vector<int> component_split_5_1 = {0, 1, 3}; const std::vector<int> component_values_1 = {0}; const std::vector<int> component_values_2 = {0, 1}; const std::vector<int> component_values_3 = {0, 1}; const std::vector<int> component_values_4 = {0, 1, 2}; const std::vector<int> component_values_5 = {0, 1, 2}; const std::vector<int> batched_splits_1 = {0, 5, 10}; const std::vector<int> batched_splits_2 = {0, 1, 3, 4, 6, 8, 10, 12, 13, 14, 16}; const std::vector<int> batched_splits_3 = {0, 1, 2, 3, 5, 7, 8, 9, 11, 13, 14, 15, 17, 18, 20, 21, 22}; const std::vector<int> batched_values = {0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0, 1, 0, 1}; Tensor expected_splits_1(DT_INT32, TensorShape({3})); Tensor expected_splits_2(DT_INT32, TensorShape({11})); Tensor expected_splits_3(DT_INT32, TensorShape({17})); Tensor expected_values(DT_INT32, TensorShape({22})); test::FillValues<int>(&expected_splits_1, batched_splits_1); test::FillValues<int>(&expected_splits_2, batched_splits_2); test::FillValues<int>(&expected_splits_3, batched_splits_3); test::FillValues<int>(&expected_values, batched_values); auto variant_component_1 = CreateVariantFromRagged<int, int>( {component_split_1_1}, TensorShape({1}), component_values_1); auto variant_component_2 = CreateVariantFromRagged<int, int>( {component_split_2_1}, TensorShape({2}), component_values_2); auto variant_component_3 = CreateVariantFromRagged<int, int>( {component_split_3_1}, TensorShape({2}), component_values_3); auto variant_component_4 = CreateVariantFromRagged<int, int>( {component_split_4_1}, TensorShape({3}), component_values_4); auto variant_component_5 = CreateVariantFromRagged<int, int>( {component_split_5_1}, TensorShape({3}), component_values_5); int input_ragged_rank = 1; int output_ragged_rank = 3; BuildDecodeRaggedTensorGraph<int, int>( input_ragged_rank, output_ragged_rank, TensorShape({2, 5}), {variant_component_1, variant_component_2, variant_component_3, variant_component_4, variant_component_5, variant_component_4, variant_component_5, variant_component_1, variant_component_3, variant_component_2}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int>(*GetOutput(1), expected_splits_2); test::ExpectTensorEqual<int>(*GetOutput(2), expected_splits_3); test::ExpectTensorEqual<int>(*GetOutput(3), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, InvalidInferredInputRaggedRank) { auto component_variant_1 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({3}), {1, 2, 3}); auto component_variant_2 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({0}), {}); auto component_variant_3 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({2}), {1, 2}); auto component_variant_4 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({1}), {1}); int input_ragged_rank = -1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({1, 1, 1, 4}), {component_variant_1, component_variant_2, component_variant_3, component_variant_4}); EXPECT_TRUE( absl::StartsWith(RunOpKernel().message(), "Inferred input_ragged_rank (output_ragged_rank - " "encoded_variant.dims()) must be >= 0")); } TEST_F(RaggedTensorFromVariantKernelTest, InputDimsAndRaggedRankAttrsMismatch) { const std::vector<int64_t> component_split_1_1 = {0, 1}; const std::vector<int64_t> component_split_2_1 = {0, 1, 2}; const std::vector<int> component_values_1 = {0}; const std::vector<int> component_values_2 = {0, 1}; auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({1}), component_values_1); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>( {component_split_2_1}, TensorShape({2}), component_values_2); int input_ragged_rank = 1; int output_ragged_rank = 4; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2}), {variant_component_1, variant_component_2}); EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(), "output_ragged_rank must be equal to " "input_ragged_rank + encoded_ragged.dims()")); } TEST_F(RaggedTensorFromVariantKernelTest, InputDoesNotHoldRaggedTensorVariant) { int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2}), {1, 2}); EXPECT_TRUE(absl::StartsWith( RunOpKernel().message(), "Input Variant element at index 0 doesn't hold a RaggedTensorVariant")); } TEST_F(RaggedTensorFromVariantKernelTest, InputScalarElementDoesNotMatchInputRaggedRank) { const std::vector<int64_t> component_split_1_1 = {0, 1}; const std::vector<int> component_values_1 = {1, 2}; auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({1, 2}), component_values_1); int input_ragged_rank = 2; int output_ragged_rank = 3; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({1}), {variant_component_1}); EXPECT_TRUE( absl::StartsWith(RunOpKernel().message(), "Encoded input RaggedTensorVariant has ragged_rank=1. " "Expected ragged_rank=2.")); } TEST_F(RaggedTensorFromVariantKernelTest, RaggedSplitTypeMismatch) { const std::vector<int64_t> component_split_1_1 = {0, 1}; const std::vector<int> component_values_1 = {0}; auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({1}), component_values_1); int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int>(input_ragged_rank, output_ragged_rank, TensorShape({1}), {variant_component_1}); EXPECT_TRUE(absl::StartsWith( RunOpKernel().message(), "Expected row_splits Tensor dtype: int32, found: int64")); } TEST_F(RaggedTensorFromVariantKernelTest, RaggedSplitRankNotOne) { RaggedTensorVariant encoded(Tensor(DT_INT32, {2}), {Tensor(DT_INT64, {2, 1})}); test::FillValues<int64_t>(encoded.mutable_splits(0), {1, 2}); test::FillValues<int>(encoded.mutable_values(), {1, 2}); int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({1}), {encoded}); EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(), "Ragged splits must have rank 1")); } TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesTypeMismatch) { const std::vector<int64_t> component_split_1_1 = {0, 1}; const std::vector<int> component_values_1 = {0}; auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({1}), component_values_1); int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<tstring, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({1}), {variant_component_1}); EXPECT_TRUE( absl::StartsWith(RunOpKernel().message(), "Expected values Tensor dtype: string, found: int32")); } TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesRankNotGreaterThanOne) { auto variant_component_1 = CreateVariantFromRagged<int, int64_t>({{0, 1}}, TensorShape({}), {1}); int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({1}), {variant_component_1}); EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(), "Ragged values must have rank >= 1")); } TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesRankMismatch) { const std::vector<int64_t> component_split_1_1 = {0, 1}; const std::vector<int64_t> component_split_2_1 = {0, 1, 2}; const std::vector<int> component_values_1 = {0}; const std::vector<int> component_values_2 = {0, 1, 2, 3}; auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {component_split_1_1}, TensorShape({1}), component_values_1); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>( {component_split_2_1}, TensorShape({2, 2}), component_values_2); int input_ragged_rank = 1; int output_ragged_rank = 2; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2}), {variant_component_1, variant_component_2}); EXPECT_TRUE(absl::StartsWith(RunOpKernel().message(), "Rank of values must match for all components")); } TEST_F(RaggedTensorFromVariantKernelTest, OutputRaggedRank0) { auto variant_component_1 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({4}), {0, 1, 2, 3}); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({4}), {4, 5, 6, 7}); int input_ragged_rank = -1; int output_ragged_rank = 0; Tensor expected_values(DT_INT32, TensorShape({2, 4})); test::FillValues<int>(&expected_values, {0, 1, 2, 3, 4, 5, 6, 7}); BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({2}), {variant_component_1, variant_component_2}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int>(*GetOutput(0), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, OutputRaggedRank0Empty) { int input_ragged_rank = -1; int output_ragged_rank = 0; Tensor expected_values(DT_INT32, TensorShape({0})); test::FillValues<int>(&expected_values, {}); BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({0}), {}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int>(*GetOutput(0), expected_values); } TEST_F(RaggedTensorFromVariantKernelTest, ShapeFnTest) { ShapeInferenceTestOp op("RaggedTensorFromVariant"); (*op.node_def.mutable_attr())["input_ragged_rank"].set_i(0); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(1); INFER_OK(op, "?", "[?];?"); INFER_OK(op, "[?]", "[?];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?]"); (*op.node_def.mutable_attr())["input_ragged_rank"].set_i(1); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(1); INFER_OK(op, "?", "[?];?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[?,?]"); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(2); INFER_OK(op, "?", "[?];[?];?"); INFER_OK(op, "[?]", "[?];[?];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?]"); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(3); INFER_OK(op, "?", "[?];[?];[?];?"); INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[?]"); INFER_OK(op, "[?,?]", "[?];[?];[?];?"); INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?,?]"); (*op.node_def.mutable_attr())["input_ragged_rank"].set_i(3); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(3); INFER_OK(op, "?", "[?];[?];[?];?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]"); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(4); INFER_OK(op, "?", "[?];[?];[?];[?];?"); INFER_OK(op, "[?]", "[?];[?];[?];[?];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?]"); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(5); INFER_OK(op, "?", "[?];[?];[?];[?];[?];?"); INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[?]"); INFER_OK(op, "[?,?]", "[?];[?];[?];[?];[?];?"); (*op.node_def.mutable_attr())["output_ragged_rank"].set_i(6); INFER_OK(op, "?", "[?];[?];[?];[?];[?];[?];?"); INFER_ERROR("Shape must be rank 3 but is rank 1", op, "[?]"); INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[?,?]"); INFER_OK(op, "[?,?,?]", "[?];[?];[?];[?];[?];[?];?"); } TEST_F(RaggedTensorFromVariantKernelTest, 2DValuesTensorIn1DOut) { const std::vector<int64_t> batched_splits_1 = {0, 2, 3, 3, 5}; const std::vector<int> batched_values = {1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5}; auto variant_component_1 = CreateVariantFromRagged<int, int64_t>( {}, TensorShape({2, 2, 2}), {1, 1, 1, 1, 2, 2, 2, 2}); auto variant_component_2 = CreateVariantFromRagged<int, int64_t>( {}, TensorShape({1, 2, 2}), {3, 3, 3, 3}); auto variant_component_3 = CreateVariantFromRagged<int, int64_t>({}, TensorShape({0, 2, 2}), {}); auto variant_component_4 = CreateVariantFromRagged<int, int64_t>( {}, TensorShape({2, 2, 2}), {4, 4, 4, 4, 5, 5, 5, 5}); Tensor expected_splits_1(DT_INT64, TensorShape({5})); Tensor expected_values(DT_INT32, TensorShape({5, 2, 2})); test::FillValues<int64_t>(&expected_splits_1, batched_splits_1); test::FillValues<int>(&expected_values, batched_values); int input_ragged_rank = 0; int output_ragged_rank = 1; BuildDecodeRaggedTensorGraph<int, int64_t>( input_ragged_rank, output_ragged_rank, TensorShape({4}), {variant_component_1, variant_component_2, variant_component_3, variant_component_4}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(0), expected_splits_1); test::ExpectTensorEqual<int>(*GetOutput(1), expected_values); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c582be11-3b89-42ab-9c64-744706ded791
cpp
tensorflow/tensorflow
quantized_matmul_op
tensorflow/core/kernels/quantized_matmul_op.cc
tensorflow/core/kernels/quantized_matmul_op_test.cc
#define EIGEN_USE_THREADS #define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK #include "public/gemmlowp.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/kernels/reference_gemm.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { template <bool TransposeA, bool TransposeB, bool TransposeC> void GemmlowpMultiply(OpKernelContext* op_context, const quint8* a_data, const quint8* b_data, qint32* c_data, int m, int n, int k, int offset_a, int offset_b, int lda, int ldb, int ldc) { const uint8* a_data_as_uint8 = &(a_data->value); const uint8* b_data_as_uint8 = &(b_data->value); int32* c_data_as_int32 = &(c_data->value); static const gemmlowp::MapOrder ResultOrder = !TransposeC ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; static const gemmlowp::MapOrder LhsOrder = !TransposeA ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; static const gemmlowp::MapOrder RhsOrder = !TransposeB ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; gemmlowp::MatrixMap<const std::uint8_t, LhsOrder> lhs(a_data_as_uint8, m, k, lda); gemmlowp::MatrixMap<const std::uint8_t, RhsOrder> rhs(b_data_as_uint8, k, n, ldb); gemmlowp::MatrixMap<std::int32_t, ResultOrder> result(c_data_as_int32, m, n, ldc); const std::tuple<> empty_pipeline = {}; auto& worker_threads = *(op_context->device()->tensorflow_cpu_worker_threads()); TensorflowGemmContext context(worker_threads.num_threads, worker_threads.workers); gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::int32_t, gemmlowp::DefaultL8R8BitDepthParams>( &context, lhs, rhs, &result, -offset_a, -offset_b, empty_pipeline); TF_ANNOTATE_MEMORY_IS_INITIALIZED(c_data_as_int32, m * n * sizeof(int32)); } template <class T1, class T2, class Toutput> class QuantizedMatMulOp : public OpKernel { public: explicit QuantizedMatMulOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("transpose_a", &transpose_a_)); OP_REQUIRES_OK(context, context->GetAttr("transpose_b", &transpose_b_)); } void Compute(OpKernelContext* context) override { const Tensor& a = context->input(0); const Tensor& b = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(2).shape()), errors::InvalidArgument("min_a must be a scalar, but got shape", context->input(2).shape())); const float min_a = context->input(2).flat<float>()(0); OP_REQUIRES(context, context->input(3).NumElements() == 1, errors::InvalidArgument("max_a must be a scalar, but got shape", context->input(3).shape())); const float max_a = context->input(3).flat<float>()(0); OP_REQUIRES(context, context->input(4).NumElements() == 1, errors::InvalidArgument("min_b must be a scalar, but got shape", context->input(4).shape())); const float min_b = context->input(4).flat<float>()(0); OP_REQUIRES(context, context->input(5).NumElements() == 1, errors::InvalidArgument("max_b must be a scalar, but got shape", context->input(5).shape())); const float max_b = context->input(5).flat<float>()(0); OP_REQUIRES(context, (max_a > min_a), errors::InvalidArgument("max_a must be larger than min_a.")); OP_REQUIRES(context, (max_b > min_b), errors::InvalidArgument("max_b must be larger than min_b.")); const int32_t offset_a = FloatToQuantizedUnclamped<T1>(0.0f, min_a, max_a); const int32_t offset_b = FloatToQuantizedUnclamped<T2>(0.0f, min_b, max_b); const int32_t offset_c = 0; const int32_t mult_c = 1; const int32_t shift_c = 0; OP_REQUIRES(context, TensorShapeUtils::IsMatrix(a.shape()), errors::InvalidArgument("In[0] is not a matrix")); OP_REQUIRES(context, TensorShapeUtils::IsMatrix(b.shape()), errors::InvalidArgument("In[1] is not a matrix")); Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair; dim_pair[0].first = transpose_a_ ? 0 : 1; dim_pair[0].second = transpose_b_ ? 1 : 0; OP_REQUIRES(context, a.dim_size(dim_pair[0].first) == b.dim_size(dim_pair[0].second), errors::InvalidArgument("Matrix size-incompatible: In[0]: ", a.shape().DebugString(), ", In[1]: ", b.shape().DebugString())); OP_REQUIRES(context, ((shift_c >= 0) && (shift_c <= 31)), errors::InvalidArgument("shift_c must be between 0 and 31, " "inclusive.")); int a_dim_remaining = 1 - dim_pair[0].first; int b_dim_remaining = 1 - dim_pair[0].second; TensorShape out_shape( {a.dim_size(a_dim_remaining), b.dim_size(b_dim_remaining)}); Tensor* c = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &c)); CHECK(c); const T1* a_data = a.flat<T1>().data(); const T2* b_data = b.flat<T2>().data(); Toutput* c_data = c->flat<Toutput>().data(); const bool transpose_c = false; const size_t m = a.dim_size(a_dim_remaining); const size_t n = b.dim_size(b_dim_remaining); const size_t k = a.dim_size(dim_pair[0].first); const size_t lda = a.dim_size(1); const size_t ldb = b.dim_size(1); const size_t ldc = n; if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && std::is_same<Toutput, qint32>() && (offset_c == 0) && (mult_c == 1) && (shift_c == 0) && (transpose_c == false) && (k <= 2048)) { meta::QuantizedGemm(context, transpose_a_, transpose_b_, a_data, b_data, c_data, m, n, k, -offset_a, -offset_b, lda, ldb, ldc); } else if (std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && std::is_same<Toutput, qint32>() && (offset_c == 0) && (mult_c == 1) && (shift_c == 0) && (transpose_c == false)) { if (transpose_a_) { if (transpose_b_) { GemmlowpMultiply<true, true, false>(context, a_data, b_data, c_data, m, n, k, offset_a, offset_b, lda, ldb, ldc); } else { GemmlowpMultiply<true, false, false>(context, a_data, b_data, c_data, m, n, k, offset_a, offset_b, lda, ldb, ldc); } } else { if (transpose_b_) { GemmlowpMultiply<false, true, false>(context, a_data, b_data, c_data, m, n, k, offset_a, offset_b, lda, ldb, ldc); } else { GemmlowpMultiply<false, false, false>(context, a_data, b_data, c_data, m, n, k, offset_a, offset_b, lda, ldb, ldc); } } } else { ReferenceGemm<T1, T2, Toutput>( transpose_a_, transpose_b_, transpose_c, m, n, k, a_data, offset_a, lda, b_data, offset_b, ldb, c_data, shift_c, offset_c, mult_c, ldc); } float min_c_value; float max_c_value; QuantizationRangeForMultiplication<T1, T2, Toutput>( min_a, max_a, min_b, max_b, &min_c_value, &max_c_value); Tensor* c_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &c_min)); c_min->flat<float>()(0) = min_c_value; Tensor* c_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &c_max)); c_max->flat<float>()(0) = max_c_value; } private: bool transpose_a_; bool transpose_b_; }; REGISTER_KERNEL_BUILDER(Name("QuantizedMatMul") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("Toutput"), QuantizedMatMulOp<quint8, quint8, qint32>); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class QuantizedMatMulTest : public OpsTestBase { protected: }; TEST_F(QuantizedMatMulTest, Small_NoParams) { TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "QuantizedMatMul") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("Toutput", DataTypeToEnum<qint32>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<quint8>(TensorShape({3, 4}), {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4})); test::FillValues<qint32>(&expected, {74, 80, 86, 92, 173, 188, 203, 218}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(QuantizedMatMulTest, VerySmall_WithParams) { const bool transpose_a = true; const int a_rows = 1; const int a_cols = 1; const int b_rows = 1; const int b_cols = 1; const bool transpose_b = false; TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "QuantizedMatMul") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("Toutput", DataTypeToEnum<qint32>::v()) .Attr("transpose_a", transpose_a) .Attr("transpose_b", transpose_b) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11}); AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0}); AddInputFromArray<float>(TensorShape({}), {-12.0f}); AddInputFromArray<float>(TensorShape({}), {243.0f}); AddInputFromArray<float>(TensorShape({}), {1.0f}); AddInputFromArray<float>(TensorShape({}), {256.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({a_cols, b_cols})); test::FillValues<qint32>(&expected, {-1}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(QuantizedMatMulTest, VerySmall_BadRange) { const bool transpose_a = true; const int a_rows = 1; const int a_cols = 1; const int b_rows = 1; const int b_cols = 1; const bool transpose_b = false; TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "QuantizedMatMul") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("Toutput", DataTypeToEnum<qint32>::v()) .Attr("transpose_a", transpose_a) .Attr("transpose_b", transpose_b) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11}); AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0}); AddInputFromArray<float>(TensorShape({}), {-12.0f}); AddInputFromArray<float>(TensorShape({}), {243.0f}); AddInputFromArray<float>(TensorShape({}), {1.0f}); AddInputFromArray<float>(TensorShape({}), {1.0f}); EXPECT_EQ(::absl::StatusCode::kInvalidArgument, RunOpKernel().code()); } TEST_F(QuantizedMatMulTest, VerySmall_BadMinMax) { const bool transpose_a = true; const int a_rows = 1; const int a_cols = 1; const int b_rows = 1; const int b_cols = 1; const bool transpose_b = false; TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "QuantizedMatMul") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("Toutput", DataTypeToEnum<qint32>::v()) .Attr("transpose_a", transpose_a) .Attr("transpose_b", transpose_b) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11}); AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0}); AddInputFromArray<float>(TensorShape({1}), {2}); AddInputFromArray<float>(TensorShape({}), {243.0f}); AddInputFromArray<float>(TensorShape({}), {1.0f}); AddInputFromArray<float>(TensorShape({}), {256.0f}); EXPECT_EQ(::absl::StatusCode::kInvalidArgument, RunOpKernel().code()); } TEST_F(QuantizedMatMulTest, Small_WithParams) { const bool transpose_a = true; const int a_rows = 3; const int a_cols = 4; const int b_rows = 3; const int b_cols = 2; const bool transpose_b = false; TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "QuantizedMatMul") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("Toutput", DataTypeToEnum<qint32>::v()) .Attr("transpose_a", transpose_a) .Attr("transpose_b", transpose_b) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), { 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, }); AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), { 1, 4, 2, 5, 3, 6, }); AddInputFromArray<float>(TensorShape({}), {-12.0f}); AddInputFromArray<float>(TensorShape({}), {243.0f}); AddInputFromArray<float>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {255.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({a_cols, b_cols})); test::FillValues<qint32>(&expected, { -38, -83, -44, -98, -50, -113, -56, -128, }); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(QuantizedMatMulTest, Medium_WithParams) { const bool transpose_a = true; const bool transpose_b = false; TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "QuantizedMatMul") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("Toutput", DataTypeToEnum<qint32>::v()) .Attr("transpose_a", transpose_a) .Attr("transpose_b", transpose_b) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int a_rows = 8; const int a_cols = 8; const float a_min = -2164.25f; const float a_max = 2006.27f; Tensor a_float(DT_FLOAT, {a_rows, a_cols}); test::FillValues<float>( &a_float, {-1014.12, -157.382, -810.17, 1435.28, 1016.37, 219.684, -316.054, -2164.25, 2006.27, -547.444, 857.376, 404.376, 9.72115, 332.588, 194.385, -286.57, 26.062, 23.1125, 110.436, 247.055, -127.683, -376.275, -124.81, -846.826, -77.1507, 305.581, -202.747, 12.9528, 9.64886, 872.686, 40.9069, 197.816, 44.16, -306.768, -1457.52, -368.939, -1049.42, -486.353, 1745.87, 95.7695, 395.773, -254.333, -404.27, 787.16, -2.44114, 199.37, -1024.08, 784.901, 235.055, -42.7295, 241.498, -245.365, 470.763, 186.159, 186.579, -220.163, 1304.58, 386.272, -358.853, -755.996, 360.109, -866.007, 55.2828, -508.801}); Tensor a_quantized = FloatTensorToQuantized<quint8>(a_float, a_min, a_max); const int b_rows = 8; const int b_cols = 8; const float b_min = -0.739539f; const float b_max = 0.641057f; Tensor b_float(DT_FLOAT, {b_rows, b_cols}); test::FillValues<float>( &b_float, {-0.294619, -0.0670519, 0.261507, -0.126274, 0.127229, -0.176945, -0.251223, 0.231086, 0.453694, 0.415666, -0.288733, 0.508717, 0.211551, 0.0435907, -0.582383, -0.308779, 0.0696883, -0.438122, 0.114, 0.433964, 0.109883, 0.284931, -0.149661, 0.108657, 0.458333, -0.130231, -0.35805, -0.123206, -0.437968, 0.0282411, 0.628818, -0.0522173, -0.0233403, 0.124863, 0.217165, 0.262294, -0.171005, -0.254693, -0.200433, -0.287354, 0.488166, -0.0354688, -0.118091, -0.590444, 0.491537, -0.739539, 0.083117, 0.282482, 0.275269, -0.36574, 0.107476, 0.0511428, -0.136887, -0.0149852, -0.259694, 0.641057, 0.264054, -0.295126, -0.0218791, 0.361211, 0.012448, 0.0709718, -0.392394, -0.434215}); Tensor b_quantized = FloatTensorToQuantized<quint8>(b_float, b_min, b_max); AddInputFromArray<quint8>(a_quantized.shape(), a_quantized.flat<quint8>()); AddInputFromArray<quint8>(b_quantized.shape(), b_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {a_min}); AddInputFromArray<float>(TensorShape({}), {a_max}); AddInputFromArray<float>(TensorShape({}), {b_min}); AddInputFromArray<float>(TensorShape({}), {b_max}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_float(DT_FLOAT, {a_cols, b_cols}); test::FillValues<float>( &expected_float, {1776.82f, 421.058f, -854.308f, 1430.65f, 503.105f, 57.2744f, -1514.97f, -1163.66f, -87.0979f, -394.577f, -39.4983f, -79.1938f, -329.029f, 313.475f, 446.929f, -59.5855f, 350.837f, 238.655f, -609.21f, 350.499f, 192.238f, 847.576f, -103.177f, 185.886f, -90.5335f, 200.787f, 99.1981f, -717.076f, 763.815f, -703.726f, -125.164f, 732.325f, -51.5303f, -418.826f, 60.0783f, -299.658f, 231.41f, 72.0622f, -289.244f, 663.776f, 391.177f, 294.415f, -484.148f, -677.932f, -180.342f, -194.764f, 761.715f, 553.061f, -283.355f, 321.109f, 351.269f, 1171.7f, -857.497f, 343.804f, -494.599f, -844.119f, 725.237f, 586.052f, -735.013f, -897.723f, -122.434f, -502.907f, 1264.6f, -239.991f}); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 15.0); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_matmul_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_matmul_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
21c797d2-7a99-4da7-bc42-efd402a917c9
cpp
tensorflow/tensorflow
quantized_activation_ops
tensorflow/core/kernels/quantized_activation_ops.cc
tensorflow/core/kernels/quantized_activation_ops_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { template <typename T> class QuantizedReluOp : public OpKernel { public: explicit QuantizedReluOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& min_input_tensor = context->input(1); const Tensor& max_input_tensor = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(min_input_tensor.shape()), errors::InvalidArgument("`min_input` must be rank 0 but is rank ", min_input_tensor.dims())); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_input_tensor.shape()), errors::InvalidArgument("`max_input` must be rank 0 but is rank ", max_input_tensor.dims())); const float min_input = min_input_tensor.scalar<float>()(); const float max_input = max_input_tensor.scalar<float>()(); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); const T min_as_quantized = FloatToQuantized<T>(0.0f, min_input, max_input); if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) { auto input_ui8_array = input.flat<quint8>(); meta::Clamp(context, input_ui8_array.data(), input_ui8_array.size(), min_as_quantized, 255, output->flat<quint8>().data()); } else { output->flat<T>().device(context->eigen_cpu_device()) = input.flat<T>().cwiseMax(min_as_quantized).template cast<T>(); } Tensor* output_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); output_min->flat<float>()(0) = min_input; Tensor* output_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_max->flat<float>()(0) = max_input; } }; template <typename T> class QuantizedRelu6Op : public OpKernel { public: explicit QuantizedRelu6Op(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& min_input_tensor = context->input(1); const Tensor& max_input_tensor = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(min_input_tensor.shape()), errors::InvalidArgument("`min_input` must be rank 0 but is rank ", min_input_tensor.dims())); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_input_tensor.shape()), errors::InvalidArgument("`max_input` must be rank 0 but is rank ", max_input_tensor.dims())); const float min_input = min_input_tensor.scalar<float>()(); const float max_input = max_input_tensor.scalar<float>()(); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); const T min_as_quantized = FloatToQuantized<T>(0.0f, min_input, max_input); const T max_as_quantized = FloatToQuantized<T>(6.0f, min_input, max_input); if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) { auto input_ui8_array = input.flat<quint8>(); meta::Clamp(context, input_ui8_array.data(), input_ui8_array.size(), min_as_quantized, max_as_quantized, output->flat<quint8>().data()); } else { output->flat<T>().device(context->eigen_cpu_device()) = input.flat<T>() .cwiseMax(min_as_quantized) .cwiseMin(max_as_quantized) .template cast<T>(); } Tensor* output_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); output_min->flat<float>()(0) = min_input; Tensor* output_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_max->flat<float>()(0) = max_input; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedRelu") .Device(DEVICE_CPU) .TypeConstraint<qint32>("Tinput") .TypeConstraint<qint32>("out_type"), QuantizedReluOp<qint32>); REGISTER_KERNEL_BUILDER(Name("QuantizedRelu") .Device(DEVICE_CPU) .TypeConstraint<quint8>("Tinput") .TypeConstraint<quint8>("out_type"), QuantizedReluOp<quint8>); REGISTER_KERNEL_BUILDER(Name("QuantizedRelu6") .Device(DEVICE_CPU) .TypeConstraint<qint32>("Tinput") .TypeConstraint<qint32>("out_type"), QuantizedRelu6Op<qint32>); REGISTER_KERNEL_BUILDER(Name("QuantizedRelu6") .Device(DEVICE_CPU) .TypeConstraint<quint8>("Tinput") .TypeConstraint<quint8>("out_type"), QuantizedRelu6Op<quint8>); }
#define EIGEN_USE_THREADS #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class QuantizedActivationsTest : public OpsTestBase { protected: }; TEST_F(QuantizedActivationsTest, TestRelu) { TF_ASSERT_OK(NodeDefBuilder("quantized_relu_op", "QuantizedRelu") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = -128.0f; const float input_max = 127.0f; const int input_width = 2; const int input_height = 4; Tensor input_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>(&input_float, {-100, -1, 0, 1, 3, 6, 7, 100}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); Tensor expected_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>(&expected_float, {0, 0, 0, 1, 3, 6, 7, 100}); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {input_min}); AddInputFromArray<float>(TensorShape({}), {input_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.2); } TEST_F(QuantizedActivationsTest, TestRelu6) { TF_ASSERT_OK(NodeDefBuilder("quantized_relu6_op", "QuantizedRelu6") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = -128.0f; const float input_max = 127.0f; const int input_width = 2; const int input_height = 4; Tensor input_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>(&input_float, {-100, -1, 0, 1, 3, 6, 7, 100}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); Tensor expected_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>(&expected_float, {0, 0, 0, 1, 3, 6, 6, 6}); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {input_min}); AddInputFromArray<float>(TensorShape({}), {input_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.2); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_activation_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_activation_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
72eef4bf-eeb4-4ef8-b020-011e9d46a778
cpp
tensorflow/tensorflow
broadcast_to_op
tensorflow/compiler/tf2xla/kernels/broadcast_to_op.cc
tensorflow/core/kernels/broadcast_to_op_test.cc
#include <vector> #include "tensorflow/compiler/tf2xla/lib/broadcast.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { class BroadcastToOp : public XlaOpKernel { public: explicit BroadcastToOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { TensorShape output_shape; OP_REQUIRES_OK(context, context->ConstantInputAsShape( 1, &output_shape, xla::ValueInferenceMode::kUpperBound)); auto output_status_or = BroadcastTo(context->Input(0), output_shape.dim_sizes()); OP_REQUIRES_OK(context, output_status_or.status()); auto output = output_status_or.value(); std::vector<bool> dynamic_dims; OP_REQUIRES_OK( context, context->ResolveInputDynamismIntoPredVector(1, &dynamic_dims)); for (int64_t dim = 0; dim < dynamic_dims.size(); ++dim) { if (dynamic_dims[dim]) { output = xla::SetDimensionSize( output, xla::Reshape(xla::Slice(context->Input(1), {dim}, {dim + 1}, {1}), {}), dim); } } context->SetOutput(0, output); } }; REGISTER_XLA_OP(Name("BroadcastTo").CompileTimeConstantInput("shape"), BroadcastToOp); } }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { template <typename InputShape> static Graph* BroadcastTo(int dim0, int dim1, InputShape input_shape) { Graph* g = new Graph(OpRegistry::Global()); Tensor input(DT_FLOAT, input_shape(dim0, dim1)); input.flat<float>() = input.flat<float>().setRandom(); Tensor shape(DT_INT32, TensorShape({2})); shape.flat<int32>()(0) = dim0; shape.flat<int32>()(1) = dim1; Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo") .Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, shape)) .Attr("T", DT_FLOAT) .Attr("Tidx", DT_INT32) .Finalize(g, &node)); return g; } #define BM_BroadcastTo_InnerDim(DIM0, DIM1, type) \ static void BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1( \ ::testing::benchmark::State& state) { \ test::Benchmark(#type, \ BroadcastTo(DIM0, DIM1, \ [](int dim0, int dim1) { \ return TensorShape({dim0, 1}); \ }), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * DIM0 * \ DIM1); \ } \ BENCHMARK(BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1)->UseRealTime(); #define BM_BroadcastTo_OuterDim(DIM0, DIM1, type) \ static void BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1( \ ::testing::benchmark::State& state) { \ test::Benchmark(#type, \ BroadcastTo(DIM0, DIM1, \ [](int dim0, int dim1) { \ return TensorShape({1, dim1}); \ }), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * DIM0 * \ DIM1); \ } \ BENCHMARK(BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1)->UseRealTime(); BM_BroadcastTo_InnerDim(64, 64, cpu); BM_BroadcastTo_InnerDim(128, 128, cpu); BM_BroadcastTo_InnerDim(256, 256, cpu); BM_BroadcastTo_InnerDim(512, 512, cpu); BM_BroadcastTo_InnerDim(1024, 1024, cpu); BM_BroadcastTo_InnerDim(500, 20000, cpu); BM_BroadcastTo_OuterDim(64, 64, cpu); BM_BroadcastTo_OuterDim(128, 128, cpu); BM_BroadcastTo_OuterDim(256, 256, cpu); BM_BroadcastTo_OuterDim(512, 512, cpu); BM_BroadcastTo_OuterDim(1024, 1024, cpu); BM_BroadcastTo_OuterDim(500, 20000, cpu); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/broadcast_to_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/broadcast_to_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c36b89df-44db-42b2-bb20-ef6a5f8cc238
cpp
tensorflow/tensorflow
quantized_concat_op
tensorflow/core/kernels/quantized_concat_op.cc
tensorflow/core/kernels/quantized_concat_op_test.cc
#define EIGEN_USE_THREADS #include <limits> #include <utility> #include <vector> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/concat_lib_cpu.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { namespace { template <typename T> struct RequantizeCopier { RequantizeCopier( const std::vector<std::pair<float, float>>* input_min_and_max, float output_min, float output_max) : output_min(output_min), output_max(output_max), input_min_and_max(input_min_and_max) {} inline void Copy(T* dst, const T* src, int input_index, size_t n) { const float input_min = (*input_min_and_max)[input_index].first; const float input_max = (*input_min_and_max)[input_index].second; if (input_min == output_min && input_max == output_max) { DCHECK(DataTypeCanUseMemcpy(DataTypeToEnum<T>::v())); memcpy(dst, src, n * sizeof(T)); } else { Eigen::array<Eigen::DenseIndex, 1> dims; dims[0] = n; typename TTypes<T, 1>::UnalignedConstTensor input_array(src, dims); typename TTypes<T, 1>::UnalignedTensor output_array(dst, dims); QuantizedToFloatStruct<T> q2f(input_min, input_max); auto input_float = DEQUANTIZE_WITH_EIGEN(input_array, q2f); FloatToQuantizedStruct<T> f2q(output_min, output_max); auto input_requantized = QUANTIZE_WITH_EIGEN(input_float, f2q, T); output_array = input_requantized; } } float output_min; float output_max; const std::vector<std::pair<float, float>>* input_min_and_max; }; } template <typename T> class QuantizedConcatOp : public OpKernel { public: typedef std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>> ConstMatrixVector; explicit QuantizedConcatOp(OpKernelConstruction* c) : OpKernel(c) {} Status CalculateInputAndOutputRange( const OpInputList& input_mins, const OpInputList& input_maxes, const size_t N, std::vector<std::pair<float, float>>* input_mins_and_maxes, float* output_min, float* output_max) { input_mins_and_maxes->reserve(N); float overall_min = std::numeric_limits<float>::max(); float overall_max = std::numeric_limits<float>::lowest(); for (int i = 0; i < N; ++i) { if (input_mins[i].NumElements() != 1) { return errors::InvalidArgument( "input_mins each tensor's num elements must be 1, given num " "elements ", input_mins[i].NumElements(), " in index ", i); } if (input_maxes[i].NumElements() != 1) { return errors::InvalidArgument( "input_maxes each tensor's num elements must be 1, given num " "elements ", input_maxes[i].NumElements(), " in index ", i); } const float input_min = input_mins[i].flat<float>()(0); const float input_max = input_maxes[i].flat<float>()(0); input_mins_and_maxes->emplace_back(input_min, input_max); overall_min = std::min(overall_min, input_min); overall_max = std::max(overall_max, input_max); } overall_min = std::min(0.0f, overall_min); if (std::is_signed<T>::value) { const float largest_value = std::max(std::abs(overall_min), std::abs(overall_max)); *output_min = -largest_value; *output_max = largest_value; } else { *output_min = overall_min; *output_max = overall_max; } return absl::OkStatus(); } int64_t CalculateInputsDim(const TensorShape& input_shape, const int32_t concat_dim) { int64_t inputs_flat_dim0 = 1; for (int d = 0; d < concat_dim; ++d) { inputs_flat_dim0 *= input_shape.dim_size(d); } return inputs_flat_dim0; } Status CalculateConcatDims(const size_t N, const TensorShape& input_shape, int input_dims, const OpInputList& values, const int32_t concat_dim, const int64_t inputs_flat_dim0, ConstMatrixVector* inputs_flat, int* output_concat_dim) { inputs_flat->reserve(N); *output_concat_dim = 0; const bool input_is_scalar = TensorShapeUtils::IsScalar(input_shape); for (int i = 0; i < N; ++i) { const auto in = values[i]; const bool in_is_scalar = TensorShapeUtils::IsScalar(in.shape()); if (!(in.dims() == input_dims || (input_is_scalar && in_is_scalar))) { return errors::InvalidArgument( "ConcatOp : Ranks of all input tensors should match: shape[0] = ", input_shape.DebugString(), " vs. shape[", i, "] = ", in.shape().DebugString()); } for (int j = 0; j < input_dims; ++j) { if (j == concat_dim) { continue; } if (in.dim_size(j) != input_shape.dim_size(j)) { return errors::InvalidArgument( "ConcatOp : Dimensions of inputs should match: shape[0] = ", input_shape.DebugString(), " vs. shape[", i, "] = ", in.shape().DebugString()); } } if (in.NumElements() > 0) { int64_t inputs_flat_dim1 = in.NumElements() / inputs_flat_dim0; inputs_flat->emplace_back(new typename TTypes<T, 2>::ConstMatrix( in.shaped<T, 2>({inputs_flat_dim0, inputs_flat_dim1}))); } *output_concat_dim += in.dims() > 0 ? in.dim_size(concat_dim) : 1; } return absl::OkStatus(); } void Compute(OpKernelContext* context) override { const Tensor* concat_dim_tensor = nullptr; OP_REQUIRES_OK(context, context->input("concat_dim", &concat_dim_tensor)); OP_REQUIRES( context, TensorShapeUtils::IsScalar(concat_dim_tensor->shape()), errors::InvalidArgument( "Concat dim tensor should be a scalar integer, but got shape ", concat_dim_tensor->shape().DebugString())); const int32_t concat_dim = concat_dim_tensor->scalar<int32>()(); OpInputList values; OP_REQUIRES_OK(context, context->input_list("values", &values)); const size_t N = values.size(); OpInputList input_mins; OP_REQUIRES_OK(context, context->input_list("input_mins", &input_mins)); OP_REQUIRES(context, (input_mins.size() == N), errors::InvalidArgument( "QuantizedConcatOp : Expected mins input list length ", input_mins.size(), " to equal values length ", N)); OpInputList input_maxes; OP_REQUIRES_OK(context, context->input_list("input_maxes", &input_maxes)); OP_REQUIRES(context, (input_maxes.size() == N), errors::InvalidArgument( "QuantizedConcatOp : Expected maxes input list length ", input_maxes.size(), " to equal values length ", N)); const int input_dims = values[0].dims(); const TensorShape& input_shape = values[0].shape(); OP_REQUIRES( context, (0 <= concat_dim && concat_dim < input_dims), errors::InvalidArgument( "ConcatOp : Expected concatenating dimensions in the range [", 0, ", ", input_dims, "), but got ", concat_dim)); float output_min = std::numeric_limits<float>::max(); float output_max = std::numeric_limits<float>::lowest(); std::vector<std::pair<float, float>> input_mins_and_maxes; OP_REQUIRES_OK(context, CalculateInputAndOutputRange(input_mins, input_maxes, N, &input_mins_and_maxes, &output_min, &output_max)); const int64_t inputs_flat_dim0 = CalculateInputsDim(input_shape, concat_dim); ConstMatrixVector inputs_flat; int output_concat_dim; OP_REQUIRES_OK( context, CalculateConcatDims(N, input_shape, input_dims, values, concat_dim, inputs_flat_dim0, &inputs_flat, &output_concat_dim)); TensorShape output_shape(input_shape); if (output_shape.dims() == 0) { output_shape.AddDim(output_concat_dim); } else { output_shape.set_dim(concat_dim, output_concat_dim); } Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); if (output->NumElements() > 0) { int64_t output_dim1 = output->NumElements() / inputs_flat_dim0; auto output_flat = output->shaped<T, 2>({inputs_flat_dim0, output_dim1}); ConcatCPUImpl<T>( context->device(), inputs_flat, sizeof(T) , RequantizeCopier<T>(&input_mins_and_maxes, output_min, output_max), &output_flat); } Tensor* output_min_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min_tensor)); output_min_tensor->flat<float>()(0) = output_min; Tensor* output_max_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max_tensor)); output_max_tensor->flat<float>()(0) = output_max; } }; #define REGISTER_QUANTIZED_CONCAT(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedConcat") \ .Device(DEVICE_CPU) \ .TypeConstraint<type>("T") \ .HostMemory("concat_dim"), \ QuantizedConcatOp<type>) REGISTER_QUANTIZED_CONCAT(quint8); REGISTER_QUANTIZED_CONCAT(qint32); #undef REGISTER_QUANTIZED_CONCAT }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { using test::graph::Constant; class QuantizedConcatTest : public OpsTestBase { protected: QuantizedConcatTest() {} void TestSmall8Bit(float first_min, float first_max, float second_min, float second_max); void TestSmall32Bit(float first_min, float first_max, float second_min, float second_max); void TestSecondDim8Bit(float first_min, float first_max, float second_min, float second_max); void TestInvalidMinMax(const Tensor& first_min, const Tensor& first_max); }; TEST_F(QuantizedConcatTest, InvalidMin) { Tensor first_min(DT_FLOAT, {3}); test::FillValues<float>(&first_min, {0.0, 0.0, 0.0}); Tensor first_max(DT_FLOAT, {}); test::FillValues<float>(&first_max, {0.0}); TestInvalidMinMax(first_min, first_max); } TEST_F(QuantizedConcatTest, InvalidMax) { Tensor first_min(DT_FLOAT, {}); test::FillValues<float>(&first_min, {0.0}); Tensor first_max(DT_FLOAT, {3, 0, 2}); TestInvalidMinMax(first_min, first_max); } void QuantizedConcatTest::TestInvalidMinMax(const Tensor& first_min, const Tensor& first_max) { TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat") .Input(FakeInput(DT_INT32)) .Input(FakeInput(2, DT_QUINT8)) .Input(FakeInput(2, DT_FLOAT)) .Input(FakeInput(2, DT_FLOAT)) .Attr("N", 2) .Attr("T", DataTypeToEnum<quint8>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); Tensor first_quantized(DT_QUINT8, {1}); test::FillValues<quint8>(&first_quantized, {1}); Tensor second_quantized(DT_QUINT8, {1}); test::FillValues<quint8>(&second_quantized, {1}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<quint8>(first_quantized.shape(), first_quantized.flat<quint8>()); AddInputFromArray<quint8>(second_quantized.shape(), second_quantized.flat<quint8>()); AddInputFromArray<float>(first_min.shape(), first_min.flat<float>()); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<float>(first_max.shape(), first_max.flat<float>()); AddInputFromArray<float>(TensorShape({}), {2.0}); EXPECT_TRUE(errors::IsInvalidArgument(RunOpKernel())); } TEST_F(QuantizedConcatTest, Small8Bit) { TestSmall8Bit(0.0f, 255.0f, 0.0f, 25.0f); } TEST_F(QuantizedConcatTest, Small8BitSameRange) { TestSmall8Bit(0.0f, 255.0f, 0.0f, 255.0f); } void QuantizedConcatTest::TestSmall8Bit(float first_min, float first_max, float second_min, float second_max) { TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat") .Input(FakeInput(DT_INT32)) .Input(FakeInput(2, DT_QUINT8)) .Input(FakeInput(2, DT_FLOAT)) .Input(FakeInput(2, DT_FLOAT)) .Attr("N", 2) .Attr("T", DataTypeToEnum<quint8>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int first_batch = 2; const int first_height = 2; const int first_width = 3; Tensor first_float(DT_FLOAT, {first_batch, first_height, first_width}); test::FillValues<float>(&first_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Tensor first_quantized = FloatTensorToQuantized<quint8>(first_float, first_min, first_max); const int second_batch = 2; const int second_height = 2; const int second_width = 3; Tensor second_float(DT_FLOAT, {second_batch, second_height, second_width}); test::FillValues<float>(&second_float, {13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); Tensor second_quantized = FloatTensorToQuantized<quint8>(second_float, second_min, second_max); const int expected_batch = first_batch + second_batch; Tensor expected_float(DT_FLOAT, {expected_batch, first_height, first_width}); test::FillValues<float>(&expected_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<quint8>(first_quantized.shape(), first_quantized.flat<quint8>()); AddInputFromArray<quint8>(second_quantized.shape(), second_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {first_min}); AddInputFromArray<float>(TensorShape({}), {second_min}); AddInputFromArray<float>(TensorShape({}), {first_max}); AddInputFromArray<float>(TensorShape({}), {second_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.2); } TEST_F(QuantizedConcatTest, Small32Bit) { TestSmall32Bit(0.0f, 1200.0f, 0.0f, 2400.0f); } TEST_F(QuantizedConcatTest, Small32BitSameRange) { TestSmall32Bit(-2400.0f, 2400.0f, -2400.0f, 2400.0f); } TEST_F(QuantizedConcatTest, Small32BitOneDimSameRangeAsOutput) { TestSmall32Bit(-2400.0f, 2400.0f, -1200.0f, 2400.0f); } void QuantizedConcatTest::TestSmall32Bit(float first_min, float first_max, float second_min, float second_max) { TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat") .Input(FakeInput(DT_INT32)) .Input(FakeInput(2, DT_QINT32)) .Input(FakeInput(2, DT_FLOAT)) .Input(FakeInput(2, DT_FLOAT)) .Attr("N", 2) .Attr("T", DataTypeToEnum<qint32>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int first_batch = 2; const int first_height = 2; const int first_width = 3; Tensor first_float(DT_FLOAT, {first_batch, first_height, first_width}); test::FillValues<float>(&first_float, {100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200}); Tensor first_quantized = FloatTensorToQuantized<qint32>(first_float, first_min, first_max); const int second_batch = 2; const int second_height = 2; const int second_width = 3; Tensor second_float(DT_FLOAT, {second_batch, second_height, second_width}); test::FillValues<float>(&second_float, {1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400}); Tensor second_quantized = FloatTensorToQuantized<qint32>(second_float, second_min, second_max); const int expected_batch = first_batch + second_batch; Tensor expected_float(DT_FLOAT, {expected_batch, first_height, first_width}); test::FillValues<float>( &expected_float, {100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<qint32>(first_quantized.shape(), first_quantized.flat<qint32>()); AddInputFromArray<qint32>(second_quantized.shape(), second_quantized.flat<qint32>()); AddInputFromArray<float>(TensorShape({}), {first_min}); AddInputFromArray<float>(TensorShape({}), {second_min}); AddInputFromArray<float>(TensorShape({}), {first_max}); AddInputFromArray<float>(TensorShape({}), {second_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.2); } TEST_F(QuantizedConcatTest, SecondDim8Bit) { TestSecondDim8Bit(-10.0f, 150.0f, 0.0f, 200.0f); } TEST_F(QuantizedConcatTest, SecondDim8BitSameRange) { TestSecondDim8Bit(-10.0f, 150.0f, -10.0f, 150.0f); } void QuantizedConcatTest::TestSecondDim8Bit(float first_min, float first_max, float second_min, float second_max) { TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat") .Input(FakeInput(DT_INT32)) .Input(FakeInput(2, DT_QUINT8)) .Input(FakeInput(2, DT_FLOAT)) .Input(FakeInput(2, DT_FLOAT)) .Attr("N", 2) .Attr("T", DataTypeToEnum<quint8>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int first_batch = 2; const int first_height = 2; const int first_width = 3; Tensor first_float(DT_FLOAT, {first_batch, first_height, first_width}); test::FillValues<float>(&first_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Tensor first_quantized = FloatTensorToQuantized<quint8>(first_float, first_min, first_max); const int second_batch = 2; const int second_height = 2; const int second_width = 3; Tensor second_float(DT_FLOAT, {second_batch, second_height, second_width}); test::FillValues<float>(&second_float, {13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); Tensor second_quantized = FloatTensorToQuantized<quint8>(second_float, second_min, second_max); const int expected_height = first_height + second_height; Tensor expected_float(DT_FLOAT, {first_batch, expected_height, first_width}); test::FillValues<float>(&expected_float, {1, 2, 3, 4, 5, 6, 13, 14, 15, 16, 17, 18, 7, 8, 9, 10, 11, 12, 19, 20, 21, 22, 23, 24}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<quint8>(first_quantized.shape(), first_quantized.flat<quint8>()); AddInputFromArray<quint8>(second_quantized.shape(), second_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {first_min}); AddInputFromArray<float>(TensorShape({}), {second_min}); AddInputFromArray<float>(TensorShape({}), {first_max}); AddInputFromArray<float>(TensorShape({}), {second_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 1.0); } template <typename T> static void ConcatHelper(::testing::benchmark::State& state, int concat_dimension, bool same_limits, int dim2) { Graph* g = new Graph(OpRegistry::Global()); DataType dt = DataTypeToEnum<T>::v(); const int kDim1 = 100; TensorShape shape({kDim1, dim2}); Tensor concat_dim = test::AsScalar<int32>(concat_dimension); Tensor in0(dt, shape); in0.flat<T>().setRandom(); Tensor in1(dt, shape); in1.flat<T>().setRandom(); Tensor mins0 = test::AsScalar<float>(-1.0); Tensor maxes0 = test::AsScalar<float>(1.0); Tensor mins1 = test::AsScalar<float>(same_limits ? -1.0 : -255.0); Tensor maxes1 = test::AsScalar<float>(same_limits ? 1.0 : 255.0); Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "QuantizedConcat") .Input(Constant(g, concat_dim)) .Input({Constant(g, in0), Constant(g, in1)}) .Input({Constant(g, mins0), Constant(g, mins1)}) .Input({Constant(g, maxes0), Constant(g, maxes1)}) .Attr("N", 2) .Attr("T", dt) .Finalize(g, &node)); test::Benchmark("cpu", g, false).Run(state); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * ((kDim1 * dim2) + (kDim1 * dim2)) * sizeof(T)); } static void BM_QConcatDim0SameLimitQInt32(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 0 , true , dim2); } static void BM_QConcatDim1SameLimitQInt32(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 1 , true , dim2); } static void BM_QConcatDim0DifferLimitQInt32( ::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 0 , false , dim2); } static void BM_QConcatDim1DifferLimitQInt32( ::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 1 , false , dim2); } BENCHMARK(BM_QConcatDim0SameLimitQInt32) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); BENCHMARK(BM_QConcatDim1SameLimitQInt32) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); BENCHMARK(BM_QConcatDim0DifferLimitQInt32) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); BENCHMARK(BM_QConcatDim1DifferLimitQInt32) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); static void BM_QConcatDim0SameLimitQUint8(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 0 , true , dim2); } static void BM_QConcatDim1SameLimitQUint8(::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 1 , true , dim2); } static void BM_QConcatDim0DifferLimitQUint8( ::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 0 , false , dim2); } static void BM_QConcatDim1DifferLimitQUint8( ::testing::benchmark::State& state) { const int dim2 = state.range(0); ConcatHelper<qint32>(state, 1 , false , dim2); } BENCHMARK(BM_QConcatDim0SameLimitQUint8) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); BENCHMARK(BM_QConcatDim1SameLimitQUint8) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); BENCHMARK(BM_QConcatDim0DifferLimitQUint8) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); BENCHMARK(BM_QConcatDim1DifferLimitQUint8) ->UseRealTime() ->Arg(1000) ->Arg(20000) ->Arg(100000); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_concat_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_concat_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
19496dcf-cbb6-4057-b940-876a7c3c6632
cpp
tensorflow/tensorflow
conv_ops
tensorflow/compiler/tf2xla/kernels/conv_ops.cc
tensorflow/core/kernels/conv_ops_test.cc
#include <cstdint> #include <vector> #include "tensorflow/compiler/tf2xla/kernels/conv_op_helpers.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/matrix.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal_util.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/ops_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { namespace { class ConvOp : public XlaOpKernel { public: explicit ConvOp(OpKernelConstruction* ctx, int num_spatial_dims, bool depthwise) : XlaOpKernel(ctx) { absl::StatusOr<ConvOpAttrs> attrs = ConvOpAttrs::Create(num_spatial_dims, depthwise, ctx); OP_REQUIRES_OK(ctx, attrs.status()); attrs_ = attrs.value(); } void Compile(XlaOpKernelContext* ctx) override { absl::StatusOr<xla::XlaOp> conv = MakeXlaForwardConvOp( ctx->op_kernel().type_string(), ctx->Input(0), ctx->Input(1), attrs_); OP_REQUIRES_OK(ctx, conv.status()); ctx->SetOutput(0, conv.value()); } protected: ConvOpAttrs attrs_; private: ConvOp(const ConvOp&) = delete; void operator=(const ConvOp&) = delete; }; class ConvNDOp : public XlaOpKernel { public: explicit ConvNDOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { absl::StatusOr<ConvNDOpAttrs> attrs = ConvNDOpAttrs::Create(ctx); OP_REQUIRES_OK(ctx, attrs.status()); attrs_ = attrs.value(); } void Compile(XlaOpKernelContext* ctx) override { OP_REQUIRES_VALUE(xla::Shape input_shape, ctx, ctx->InputXlaShape(0)); int num_spatial_dims = input_shape.rank() - 1 - attrs_.batch_dims; OP_REQUIRES_OK(ctx, CheckValidPadding(attrs_.padding, attrs_.explicit_paddings, num_spatial_dims + 2, attrs_.data_format)); ConvOpAttrs forward_attrs; forward_attrs.depthwise = false; forward_attrs.num_spatial_dims = num_spatial_dims; forward_attrs.dilations = attrs_.dilations.empty() ? std::vector<int32>(num_spatial_dims + 2, 1) : attrs_.dilations; forward_attrs.strides = attrs_.strides; forward_attrs.padding = attrs_.padding; forward_attrs.explicit_paddings = attrs_.explicit_paddings; forward_attrs.data_format = attrs_.data_format; xla::XlaOp input = ctx->Input(0); xla::XlaOp filter = ctx->Input(1); if (attrs_.batch_dims == 0) { xla::Shape expanded_input_shape(input_shape); for (int i = 0; i < expanded_input_shape.rank() - 1; ++i) { expanded_input_shape.set_dimensions(i + 1, input_shape.dimensions(i)); } expanded_input_shape.set_dimensions(0, 1); input = xla::Reshape(input, expanded_input_shape.dimensions()); } else if (attrs_.batch_dims > 1) { std::vector<int64_t> to_collapse(attrs_.batch_dims); for (int i = 0; i < attrs_.batch_dims; ++i) { to_collapse[i] = i; } input = xla::Collapse(input, to_collapse); } absl::StatusOr<xla::XlaOp> forward = MakeXlaForwardConvOp( ctx->op_kernel().type_string(), input, filter, forward_attrs); OP_REQUIRES_OK(ctx, forward.status()); xla::XlaOp out = forward.value(); auto* builder = out.builder(); OP_REQUIRES_VALUE(xla::Shape out_shape, ctx, builder->GetShape(out)); if (attrs_.batch_dims == 0) { xla::Shape no_batch_shape(out_shape); no_batch_shape.DeleteDimension(0); out = xla::Reshape(out, no_batch_shape.dimensions()); } else if (attrs_.batch_dims > 1) { xla::Shape expanded_out_shape(input_shape); for (int i = attrs_.batch_dims; i < input_shape.rank(); ++i) { expanded_out_shape.set_dimensions( i, out_shape.dimensions(i - (attrs_.batch_dims - 1))); } out = xla::Reshape(out, expanded_out_shape.dimensions()); } ctx->SetOutput(0, out); } protected: ConvNDOpAttrs attrs_; }; REGISTER_XLA_CONV_OP(Name("Conv"), ConvNDOp); class Conv2DOp : public ConvOp { public: explicit Conv2DOp(OpKernelConstruction* ctx) : ConvOp(ctx, 2, false) {} }; REGISTER_XLA_CONV_OP(Name("Conv2D"), Conv2DOp); class Conv3DOp : public ConvOp { public: explicit Conv3DOp(OpKernelConstruction* ctx) : ConvOp(ctx, 3, false) {} }; REGISTER_XLA_CONV_OP(Name("Conv3D"), Conv3DOp); class DepthwiseConv2DOp : public ConvOp { public: explicit DepthwiseConv2DOp(OpKernelConstruction* ctx) : ConvOp(ctx, 2, true) {} }; REGISTER_XLA_CONV_OP(Name("DepthwiseConv2dNative"), DepthwiseConv2DOp); class ConvBackpropInputOp : public XlaOpKernel { public: explicit ConvBackpropInputOp(OpKernelConstruction* ctx, int num_spatial_dims, bool depthwise) : XlaOpKernel(ctx) { absl::StatusOr<ConvOpAttrs> attrs = ConvOpAttrs::Create(num_spatial_dims, depthwise, ctx); OP_REQUIRES_OK(ctx, attrs.status()); attrs_ = attrs.value(); } void Compile(XlaOpKernelContext* ctx) override { TensorShape input_tensor_shape; OP_REQUIRES_OK( ctx, ctx->ConstantInputAsShape(0, &input_tensor_shape, xla::ValueInferenceMode::kUpperBound)); xla::Shape input_shape = TensorShapeToXLAShape(ctx->input_xla_type(1), input_tensor_shape); OP_REQUIRES(ctx, input_shape.rank() == attrs_.num_spatial_dims + 2, errors::InvalidArgument( "The rank of the specified input shape must be " "num_spatial_dims + 2. Expected ", attrs_.num_spatial_dims + 2, " got ", input_shape.rank())); xla::XlaOp input_sizes = ctx->Input(0); absl::StatusOr<xla::XlaOp> in_backprop = MakeXlaBackpropInputConvOp( ctx->op_kernel().type_string(), input_shape, ctx->Input(1), ctx->Input(2), attrs_, &input_sizes); OP_REQUIRES_OK(ctx, in_backprop.status()); ctx->SetOutput(0, in_backprop.value()); } protected: ConvOpAttrs attrs_; private: ConvBackpropInputOp(const ConvBackpropInputOp&) = delete; void operator=(const ConvBackpropInputOp&) = delete; }; class Conv2DBackpropInputOp : public ConvBackpropInputOp { public: explicit Conv2DBackpropInputOp(OpKernelConstruction* ctx) : ConvBackpropInputOp(ctx, 2, false) {} }; REGISTER_XLA_CONV_OP( Name("Conv2DBackpropInput").CompileTimeConstantInput("input_sizes"), Conv2DBackpropInputOp); class Conv3DBackpropInputOp : public ConvBackpropInputOp { public: explicit Conv3DBackpropInputOp(OpKernelConstruction* ctx) : ConvBackpropInputOp(ctx, 3, false) {} }; REGISTER_XLA_CONV_OP( Name("Conv3DBackpropInputV2").CompileTimeConstantInput("input_sizes"), Conv3DBackpropInputOp); class DepthwiseConv2DBackpropInputOp : public ConvBackpropInputOp { public: explicit DepthwiseConv2DBackpropInputOp(OpKernelConstruction* ctx) : ConvBackpropInputOp(ctx, 2, true) {} }; REGISTER_XLA_CONV_OP(Name("DepthwiseConv2dNativeBackpropInput") .CompileTimeConstantInput("input_sizes"), DepthwiseConv2DBackpropInputOp); class ConvBackpropFilterOp : public XlaOpKernel { public: explicit ConvBackpropFilterOp(OpKernelConstruction* ctx, int num_spatial_dims, bool depthwise) : XlaOpKernel(ctx) { absl::StatusOr<ConvOpAttrs> attrs = ConvOpAttrs::Create(num_spatial_dims, depthwise, ctx); OP_REQUIRES_OK(ctx, attrs.status()); attrs_ = attrs.value(); } void Compile(XlaOpKernelContext* ctx) override { TensorShape filter_tensor_shape; OP_REQUIRES_OK( ctx, ctx->ConstantInputAsShape(1, &filter_tensor_shape, xla::ValueInferenceMode::kUpperBound)); xla::Shape filter_shape = TensorShapeToXLAShape(ctx->input_xla_type(0), filter_tensor_shape); absl::StatusOr<xla::XlaOp> filter_backprop = MakeXlaBackpropFilterConvOp( ctx->op_kernel().type_string(), ctx->Input(0), filter_shape, ctx->Input(2), attrs_); OP_REQUIRES_OK(ctx, filter_backprop.status()); ctx->SetOutput(0, filter_backprop.value()); } protected: ConvOpAttrs attrs_; private: ConvBackpropFilterOp(const ConvBackpropFilterOp&) = delete; void operator=(const ConvBackpropFilterOp&) = delete; }; class Conv2DBackpropFilterOp : public ConvBackpropFilterOp { public: explicit Conv2DBackpropFilterOp(OpKernelConstruction* ctx) : ConvBackpropFilterOp(ctx, 2, false) { } }; REGISTER_XLA_CONV_OP( Name("Conv2DBackpropFilter").CompileTimeConstantInput("filter_sizes"), Conv2DBackpropFilterOp); class Conv3DBackpropFilterOp : public ConvBackpropFilterOp { public: explicit Conv3DBackpropFilterOp(OpKernelConstruction* ctx) : ConvBackpropFilterOp(ctx, 3, false) { } }; REGISTER_XLA_CONV_OP( Name("Conv3DBackpropFilterV2").CompileTimeConstantInput("filter_sizes"), Conv3DBackpropFilterOp); class DepthwiseConv2DBackpropFilterOp : public ConvBackpropFilterOp { public: explicit DepthwiseConv2DBackpropFilterOp(OpKernelConstruction* ctx) : ConvBackpropFilterOp(ctx, 2, true) {} }; REGISTER_XLA_CONV_OP(Name("DepthwiseConv2dNativeBackpropFilter") .CompileTimeConstantInput("filter_sizes"), DepthwiseConv2DBackpropFilterOp); } }
#include <cmath> #include <optional> #include <string> #include <type_traits> #include <vector> #include "absl/algorithm/container.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/nn_ops_internal.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/conv_ops_gpu.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/tensor_float_32_utils.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { class FusedResizePadConvOpTest : public OpsTestBase { protected: template <typename T> void HandwrittenConv(DataType dtype) { const int stride = 1; TF_EXPECT_OK(NodeDefBuilder("fused_resize_op", "FusedResizeAndPadConv2D") .Input(FakeInput(dtype)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(dtype)) .Attr("T", dtype) .Attr("resize_align_corners", false) .Attr("mode", "REFLECT") .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 3; const int image_batch_count = 1; Tensor image(dtype, {image_batch_count, image_height, image_width, depth}); test::FillValues<T>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); const int filter_size = 3; const int filter_count = 1; Tensor filter(dtype, {filter_size, filter_size, depth, filter_count}); test::FillValues<T>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9}); const int resized_width = image_width; const int resized_height = image_height; const int top_padding = 0; const int bottom_padding = 0; const int left_padding = 0; const int right_padding = 0; AddInputFromArray<T>(image.shape(), image.flat<T>()); AddInputFromArray<int32>(TensorShape({2}), {resized_height, resized_width}); AddInputFromArray<int32>( TensorShape({4, 2}), {0, 0, top_padding, bottom_padding, left_padding, right_padding, 0, 0}); AddInputFromArray<T>(filter.shape(), filter.flat<T>()); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width; const int expected_height = image_height * filter_count; Tensor expected(dtype, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<T>( &expected, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121}); const Tensor& output = *GetOutput(0); test::ExpectTensorNear<T>(expected, output, 1e-5); } template <typename T> void CompareFusedAndSeparate(int input_width, int input_height, int input_depth, int resize_width, int resize_height, int y_padding, int x_padding, int filter_size, int filter_count, bool resize_align_corners, const string& pad_mode, int stride, const string& padding, DataType dtype) { Scope root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, input_height, input_width, input_depth})); test::FillIota<float>(&input_data, 1.0f); Output input = Const(root.WithOpName("input"), Input::Initializer(input_data)); Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype); Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size, input_depth, filter_count})); test::FillIota<float>(&filter_data, 1.0f); Output filter = Const(root.WithOpName("filter"), Input::Initializer(filter_data)); Output casted_filter = Cast(root.WithOpName("casted_filter"), filter, dtype); Output resize_size = Const(root.WithOpName("resize_size"), {resize_height, resize_width}); Output resize = ResizeBilinear(root.WithOpName("resize"), input, resize_size, ResizeBilinear::AlignCorners(resize_align_corners)); Output casted_resize = Cast(root.WithOpName("cast"), resize, dtype); Output paddings = Const(root.WithOpName("paddings"), {{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}}); Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_resize, paddings, pad_mode); Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter, {1, stride, stride, 1}, padding); Output fused_conv = FusedResizeAndPadConv2D( root.WithOpName("fused_conv"), casted_input, resize_size, paddings, casted_filter, pad_mode, {1, stride, stride, 1}, padding, FusedResizeAndPadConv2D::ResizeAlignCorners(resize_align_corners)); tensorflow::GraphDef graph; TF_ASSERT_OK(root.ToGraphDef(&graph)); std::unique_ptr<tensorflow::Session> session( tensorflow::NewSession(tensorflow::SessionOptions())); TF_ASSERT_OK(session->Create(graph)); std::vector<Tensor> unfused_tensors; TF_ASSERT_OK(session->Run({}, {"conv"}, {}, &unfused_tensors)); std::vector<Tensor> fused_tensors; TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors)); test::ExpectClose(unfused_tensors[0], fused_tensors[0]); } template <typename T> void CompareFusedPadOnlyAndSeparate(int input_width, int input_height, int input_depth, int y_padding, int x_padding, int filter_size, int filter_count, const string& pad_mode, int stride, const string& padding, DataType dtype) { Scope root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_data(DT_FLOAT, TensorShape({1, input_height, input_width, input_depth})); test::FillIota<float>(&input_data, 1.0f); Output input = Const(root.WithOpName("input"), Input::Initializer(input_data)); Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype); Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size, input_depth, filter_count})); test::FillIota<float>(&filter_data, 1.0f); Output filter = Const(root.WithOpName("filter"), Input::Initializer(filter_data)); Output casted_filter = Cast(root.WithOpName("casted_filter"), filter, dtype); Output paddings = Const(root.WithOpName("paddings"), {{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}}); Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_input, paddings, pad_mode); Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter, {1, stride, stride, 1}, padding); Output fused_conv = FusedPadConv2D( root.WithOpName("fused_conv"), casted_input, paddings, casted_filter, pad_mode, {1, stride, stride, 1}, padding); tensorflow::GraphDef graph; TF_ASSERT_OK(root.ToGraphDef(&graph)); std::unique_ptr<tensorflow::Session> session( tensorflow::NewSession(tensorflow::SessionOptions())); TF_ASSERT_OK(session->Create(graph)); std::vector<Tensor> unfused_tensors; TF_ASSERT_OK(session->Run({}, {"conv"}, {}, &unfused_tensors)); std::vector<Tensor> fused_tensors; TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors)); test::ExpectClose(unfused_tensors[0], fused_tensors[0]); } }; TEST_F(FusedResizePadConvOpTest, HandwrittenConvHalf) { HandwrittenConv<Eigen::half>(DT_HALF); } TEST_F(FusedResizePadConvOpTest, HandwrittenConvFloat) { HandwrittenConv<float>(DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, HandwrittenConvDouble) { HandwrittenConv<double>(DT_DOUBLE); } TEST_F(FusedResizePadConvOpTest, IdentityComparativeHalf) { CompareFusedAndSeparate<Eigen::half>(10, 10, 1, 10, 10, 0, 0, 1, 1, false, "REFLECT", 1, "SAME", DT_HALF); } TEST_F(FusedResizePadConvOpTest, IdentityComparativeFloat) { CompareFusedAndSeparate<float>(10, 10, 1, 10, 10, 0, 0, 1, 1, false, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, IdentityComparativeDouble) { CompareFusedAndSeparate<double>(10, 10, 1, 10, 10, 0, 0, 1, 1, false, "REFLECT", 1, "SAME", DT_DOUBLE); } TEST_F(FusedResizePadConvOpTest, ConvOnlyComparative) { CompareFusedAndSeparate<float>(10, 10, 3, 10, 10, 0, 0, 4, 4, false, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeOnlyComparative) { CompareFusedAndSeparate<float>(10, 10, 1, 20, 20, 0, 0, 1, 1, false, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeAndConvComparative) { CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvComparative) { CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeAndConvStridedComparative) { CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 2, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvValidComparative) { CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1, "VALID", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, PadOnlyComparative) { CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, PadOnlyWithChannelsComparative) { CompareFusedAndSeparate<float>(4, 4, 3, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeAndPadComparative) { CompareFusedAndSeparate<float>(4, 4, 1, 6, 6, 2, 2, 1, 1, false, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, PadOnlySymmetricComparative) { CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "SYMMETRIC", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparative) { CompareFusedAndSeparate<float>(4, 4, 3, 6, 6, 2, 2, 1, 1, false, "SYMMETRIC", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparativeLarge) { CompareFusedAndSeparate<float>(1000, 1000, 3, 1006, 1006, 2, 2, 1, 1, false, "SYMMETRIC", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeHalf) { CompareFusedPadOnlyAndSeparate<Eigen::half>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1, "SAME", DT_HALF); } TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeBFloat16) { CompareFusedPadOnlyAndSeparate<bfloat16>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1, "SAME", DT_BFLOAT16); } TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeFloat) { CompareFusedPadOnlyAndSeparate<float>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeDouble) { CompareFusedPadOnlyAndSeparate<double>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1, "SAME", DT_DOUBLE); } TEST_F(FusedResizePadConvOpTest, NoResizeConvOnlyComparative) { CompareFusedPadOnlyAndSeparate<float>(10, 10, 3, 0, 0, 4, 4, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyComparative) { CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyWithChannelsComparative) { CompareFusedPadOnlyAndSeparate<float>(4, 4, 3, 2, 2, 1, 1, "REFLECT", 1, "SAME", DT_FLOAT); } TEST_F(FusedResizePadConvOpTest, NoResizePadOnlySymmetricComparative) { CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "SYMMETRIC", 1, "SAME", DT_FLOAT); } class ConvOpTest : public OpsTestBase { protected: void HandwrittenConv() { const int stride = 1; TF_EXPECT_OK(NodeDefBuilder("conv_op", "Conv2D") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DT_FLOAT) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); const int depth = 1; const int image_width = 4; const int image_height = 3; const int image_batch_count = 1; Tensor image(DT_FLOAT, {image_batch_count, image_height, image_width, depth}); test::FillValues<float>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); const int filter_size = 3; const int filter_count = 1; Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count}); test::FillValues<float>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9}); AddInputFromArray<float>(image.shape(), image.flat<float>()); AddInputFromArray<float>(filter.shape(), filter.flat<float>()); TF_ASSERT_OK(RunOpKernel()); const int expected_width = image_width; const int expected_height = image_height * filter_count; Tensor expected(DT_FLOAT, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<float>( &expected, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121}); const Tensor& output = *GetOutput(0); test::ExpectTensorNear<float>(expected, output, 1e-5); } void AnisotropicStrides() { const int stride_width = 3; const int stride_height = 1; TF_EXPECT_OK(NodeDefBuilder("conv_op", "Conv2D") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DT_FLOAT) .Attr("strides", {1, stride_height, stride_width, 1}) .Attr("padding", "VALID") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); const int depth = 1; const int image_width = 6; const int image_height = 3; const int image_batch_count = 1; Tensor image(DT_FLOAT, {image_batch_count, image_height, image_width, depth}); test::FillValues<float>(&image, { 3, 2, 1, -1, -2, -3, 4, 3, 2, -2, -3, -4, 5, 4, 3, -3, -4, -5, }); const int filter_size = 2; const int filter_count = 1; Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count}); test::FillValues<float>(&filter, { 1, 2, 3, 4, }); AddInputFromArray<float>(image.shape(), image.flat<float>()); AddInputFromArray<float>(filter.shape(), filter.flat<float>()); TF_ASSERT_OK(RunOpKernel()); const int expected_width = 2; const int expected_height = 2; Tensor expected(DT_FLOAT, TensorShape({image_batch_count, expected_height, expected_width, filter_count})); test::FillValues<float>(&expected, {31, -23, 41, -33}); const Tensor& output = *GetOutput(0); test::ExpectTensorNear<float>(expected, output, 1e-5); } }; TEST_F(ConvOpTest, HandwrittenConv) { HandwrittenConv(); } TEST_F(ConvOpTest, AnisotropicStride) { AnisotropicStrides(); } template <typename T> class FusedConv2DOpTest : public OpsTestBase { protected: static constexpr int kDepth = 4; static constexpr int kImageWidth = 32; static constexpr int kImageHeight = 32; static constexpr int kImageBatchCount = 8; static constexpr bool kIsInt8 = std::is_same<T, int8>::value || std::is_same<T, qint8>::value; using BiasAddGraphRunner = std::function<void(const Tensor& input_data, const Tensor& filter_data, const Tensor& bias_data, Tensor* out)>; using BatchNormGraphRunner = std::function<void( const Tensor& input_data, const Tensor& filter_data, const Tensor& scale_data, const Tensor& offset_data, const Tensor& mean_data, const Tensor& variance_data, Tensor* out)>; static bool HasGpuDevice() { tensorflow::SessionOptions session_options; std::unique_ptr<tensorflow::Session> session( tensorflow::NewSession(session_options)); std::vector<DeviceAttributes> available_devices; [&]() { TF_ASSERT_OK(session->ListDevices(&available_devices)); }(); const bool has_gpu_device = absl::c_any_of(available_devices, [](const DeviceAttributes& device) { return device.device_type() == DEVICE_GPU; }); return has_gpu_device; } void RunAndFetch(const tensorflow::Scope& root, const std::string& fetch, Tensor* output, bool allow_gpu_device, const NodeDef* fetch_node = nullptr) { tensorflow::GraphDef graph; TF_ASSERT_OK(root.ToGraphDef(&graph)); if (fetch_node) { *graph.add_node() = *fetch_node; } tensorflow::SessionOptions session_options; session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(OptimizerOptions::L0); tensorflow::RewriterConfig* cfg = session_options.config.mutable_graph_options() ->mutable_rewrite_options(); cfg->set_constant_folding(tensorflow::RewriterConfig::OFF); cfg->set_layout_optimizer(tensorflow::RewriterConfig::OFF); cfg->set_remapping(tensorflow::RewriterConfig::OFF); std::unique_ptr<tensorflow::Session> session( tensorflow::NewSession(session_options)); const bool has_gpu_device = HasGpuDevice(); const bool place_all_on_gpu = allow_gpu_device && has_gpu_device; const std::string device = place_all_on_gpu ? "/device:GPU:0" : "/device:CPU:0"; for (NodeDef& mutable_node : *graph.mutable_node()) { mutable_node.set_device(device); } TF_ASSERT_OK(session->Create(graph)); std::vector<Tensor> unfused_tensors; TF_ASSERT_OK(session->Run({}, {fetch}, {}, &unfused_tensors)); *output = unfused_tensors[0]; } void RunConv2DWithBias(const Tensor& input_data, const Tensor& filter_data, const Tensor& bias_data, const std::string& padding, const std::vector<int>& explicit_paddings, Tensor* output, bool allow_gpu_device = false, int stride = 1) { RunConv2DWithBiasAndActivation(input_data, filter_data, bias_data, std::nullopt, padding, explicit_paddings, output, allow_gpu_device, stride); } template <typename From, typename To> static Tensor Cast( const Tensor& from, const std::function<To(From)>& cast = [](From v) { return static_cast<To>(v); }) { Tensor to(DataTypeToEnum<To>::v(), from.shape()); for (int i = 0; i < from.NumElements(); ++i) { to.flat<To>()(i) = cast(from.flat<From>()(i)); } return to; } void RunConv2DWithBiasAndActivation( Tensor input_data, Tensor filter_data, Tensor bias_data, std::optional<std::string> activation_type, const std::string& padding, const std::vector<int>& explicit_paddings, Tensor* output, bool allow_gpu_device = false, int stride = 1) { Scope root = tensorflow::Scope::NewRootScope(); if (kIsInt8) { input_data = Cast<T, float>(input_data); filter_data = Cast<T, float>(filter_data); bias_data = Cast<T, float>(bias_data); } ops::Conv2D conv = ops::Conv2D( root.WithOpName("conv"), ops::Const(root.WithOpName("input"), Input::Initializer(input_data)), ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)), {1, stride, stride, 1}, padding, ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings)); ops::BiasAdd with_bias = ops::BiasAdd( root.WithOpName("with_bias"), conv, ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data))); if (activation_type.has_value()) { if (*activation_type == "Relu") { ops::Relu(root.WithOpName("with_activation"), with_bias); } else if (*activation_type == "Relu6") { ops::Relu6(root.WithOpName("with_activation"), with_bias); } else if (*activation_type == "Elu") { ops::Elu(root.WithOpName("with_activation"), with_bias); } else if (*activation_type == "LeakyRelu") { ops::internal::LeakyRelu(root.WithOpName("with_activation"), with_bias); } else { ops::Identity(root.WithOpName("with_activation"), with_bias); } } RunAndFetch(root, activation_type.has_value() ? "with_activation" : "with_bias", output, allow_gpu_device); if (kIsInt8) { *output = Cast<float, T>( *output, [](float v) { return static_cast<T>(std::lround(v)); }); } } void RunConv2DWithBatchNorm( const Tensor& input_data, const Tensor& filter_data, const Tensor& scale_data, const Tensor& offset_data, const Tensor& mean_data, const Tensor& variance_data, const std::string& padding, const std::vector<int>& explicit_paddings, Tensor* output, bool allow_gpu_device = false, int stride = 1) { Scope root = tensorflow::Scope::NewRootScope(); ops::Conv2D conv = ops::Conv2D( root.WithOpName("conv"), ops::Const(root.WithOpName("input"), Input::Initializer(input_data)), ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)), {1, stride, stride, 1}, padding, ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings)); ops::FusedBatchNorm::Attrs attr; attr = attr.IsTraining(false); ops::FusedBatchNorm with_fused_batch_norm = ops::FusedBatchNorm( root.WithOpName("with_fused_batch_norm"), conv, ops::Const(root.WithOpName("scale"), Input::Initializer(scale_data)), ops::Const(root.WithOpName("offset"), Input::Initializer(offset_data)), ops::Const(root.WithOpName("mean"), Input::Initializer(mean_data)), ops::Const(root.WithOpName("var"), Input::Initializer(variance_data)), attr); RunAndFetch(root, "with_fused_batch_norm", output, allow_gpu_device); } void RunConv2DWithBatchNormAndActivation( const Tensor& input_data, const Tensor& filter_data, const Tensor& scale_data, const Tensor& offset_data, const Tensor& mean_data, const Tensor& variance_data, const string& activation_type, const std::string& padding, const std::vector<int>& explicit_paddings, Tensor* output, bool allow_gpu_device = false, int stride = 1) { Scope root = tensorflow::Scope::NewRootScope(); ops::Conv2D conv = ops::Conv2D( root.WithOpName("conv"), ops::Const(root.WithOpName("input"), Input::Initializer(input_data)), ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)), {1, stride, stride, 1}, padding, ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings)); ops::FusedBatchNorm::Attrs attr; attr = attr.IsTraining(false); ops::FusedBatchNorm with_fused_batch_norm = ops::FusedBatchNorm( root.WithOpName("with_fused_batch_norm"), conv, ops::Const(root.WithOpName("scale"), Input::Initializer(scale_data)), ops::Const(root.WithOpName("offset"), Input::Initializer(offset_data)), ops::Const(root.WithOpName("mean"), Input::Initializer(mean_data)), ops::Const(root.WithOpName("var"), Input::Initializer(variance_data)), attr); if (activation_type == "Relu") { ops::Relu(root.WithOpName("with_activation"), with_fused_batch_norm.y); } else if (activation_type == "Relu6") { ops::Relu6(root.WithOpName("with_activation"), with_fused_batch_norm.y); } else if (activation_type == "Elu") { ops::Elu(root.WithOpName("with_activation"), with_fused_batch_norm.y); } else if (activation_type == "LeakyRelu") { ops::internal::LeakyRelu(root.WithOpName("with_activation"), with_fused_batch_norm.y); } else { ops::Identity(root.WithOpName("with_activation"), with_fused_batch_norm.y); } RunAndFetch(root, "with_activation", output, allow_gpu_device); } void RunFusedConv2DOp(Tensor input_data, Tensor filter_data, std::vector<Tensor> args_data, const std::vector<std::string>& fused_ops, const std::string& padding, const std::vector<int>& explicit_paddings, Tensor* output, bool allow_gpu_device = false, int stride = 1) { Scope root = tensorflow::Scope::NewRootScope(); DataType dtype = DataTypeToEnum<T>::v(); const bool has_gpu_device = HasGpuDevice(); const bool has_extra_parameters = kIsInt8; const bool has_float_bias = kIsInt8; DataType dtype_args = has_float_bias ? DataTypeToEnum<float>::v() : DataTypeToEnum<T>::v(); const int n = GetTensorDim(input_data, FORMAT_NHWC, 'N'); const int h = GetTensorDim(input_data, FORMAT_NHWC, 'H'); const int w = GetTensorDim(input_data, FORMAT_NHWC, 'W'); const int kh = GetFilterDim(filter_data, FORMAT_HWIO, 'H'); const int kw = GetFilterDim(filter_data, FORMAT_HWIO, 'W'); const int ic = GetFilterDim(filter_data, FORMAT_HWIO, 'I'); const int oc = GetFilterDim(filter_data, FORMAT_HWIO, 'O'); const int v = (kIsInt8 && allow_gpu_device && has_gpu_device) ? 4 : 1; if (v > 1) { { TensorShape shape; TF_EXPECT_OK( ShapeFromFormatWithStatus(FORMAT_NCHW_VECT_C, n, h, w, ic, &shape)); Tensor input_data_nchwv(dtype, shape); input_data_nchwv.tensor<T, 5>() = input_data.shaped<T, 5>({n, h, w, ic / v, v}) .shuffle(Eigen::array<int, 5>{0, 3, 1, 2, 4}); input_data = input_data_nchwv; } { Tensor filter_data_oihwv( dtype, ShapeFromFilterTensorFormat(FORMAT_OIHW_VECT_I, kh, kw, ic, oc)); filter_data_oihwv.tensor<T, 5>() = filter_data.shaped<T, 4>({kh, kw, ic, oc}) .reshape(Eigen::array<int, 5>{kh, kw, ic / v, v, oc}) .shuffle(Eigen::array<int, 5>{4, 2, 0, 1, 3}); filter_data = filter_data_oihwv; } } if (has_float_bias) { for (Tensor& arg_data : args_data) { TensorShape shape = arg_data.shape(); Tensor arg_data_float = Tensor(dtype_args, shape); for (int index = 0; index < arg_data.NumElements(); index++) { int8 v = *(reinterpret_cast<int8*>(arg_data.data()) + index); *(reinterpret_cast<float*>(arg_data_float.data()) + index) = static_cast<float>(v); } arg_data = arg_data_float; } } int num_args = static_cast<int>(args_data.size()); Output input = ops::Const(root.WithOpName("input"), Input::Initializer(input_data)); Output filter = ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)); std::vector<NodeDefBuilder::NodeOut> args; std::vector<DataType> args_dtypes; for (int i = 0; i < num_args; ++i) { Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)), Input::Initializer(args_data[i])); args.emplace_back(arg.name(), 0, dtype_args); args_dtypes.emplace_back(dtype_args); } Tensor side_input(dtype); if (has_extra_parameters) { Padding padding_type; ASSERT_TRUE(GetPaddingFromString(padding, &padding_type).ok()); int64_t oh, oh_padding; ASSERT_TRUE(GetWindowedOutputSize(h, kh, 1, stride, padding_type, &oh, &oh_padding) .ok()); int64_t ow, ow_padding; ASSERT_TRUE(GetWindowedOutputSize(w, kw, 1, stride, padding_type, &ow, &ow_padding) .ok()); TensorShape shape; TF_EXPECT_OK( ShapeFromFormatWithStatus(FORMAT_NCHW_VECT_C, n, oh, ow, oc, &shape)); side_input = Tensor(dtype, shape); side_input.flat<T>() = side_input.flat<T>().setConstant(0); } Tensor conv_input_scale(DT_FLOAT, {1}); Tensor side_input_scale(DT_FLOAT, {1}); std::vector<NodeDefBuilder::NodeOut> host_args; int num_host_args = 0; if (has_extra_parameters) { ++num_args; Output arg2 = ops::Const(root.WithOpName("side_input"), Input::Initializer(side_input)); args.emplace_back(arg2.name(), 0, dtype); args_dtypes.emplace_back(dtype); ++num_host_args; conv_input_scale.scalar<float>()() = 1; Output arg3 = ops::Const(root.WithOpName("conv_input_scale"), Input::Initializer(conv_input_scale)); host_args.emplace_back(arg3.name(), 0, DT_FLOAT); ++num_host_args; side_input_scale.scalar<float>()() = 1; Output arg4 = ops::Const(root.WithOpName("side_input_scale"), Input::Initializer(side_input_scale)); host_args.emplace_back(arg4.name(), 0, DT_FLOAT); } NodeDef fused_conv2d; TF_EXPECT_OK(NodeDefBuilder("fused_conv", "_FusedConv2D") .Input({input.name(), 0, dtype}) .Input({filter.name(), 0, dtype}) .Input(args) .Input(host_args) .Attr("num_args", num_args) .Attr("num_host_args", num_host_args) .Attr("T", dtype) .Attr("TArgs", args_dtypes) .Attr("data_format", v > 1 ? "NCHW_VECT_C" : "NHWC") .Attr("strides", {1, stride, stride, 1}) .Attr("padding", padding) .Attr("explicit_paddings", explicit_paddings) .Attr("fused_ops", fused_ops) .Finalize(&fused_conv2d)); RunAndFetch(root, fused_conv2d.name(), output, allow_gpu_device, &fused_conv2d); if (v > 1) { const int oh = GetTensorDim(*output, FORMAT_NCHW_VECT_C, 'H'); const int ow = GetTensorDim(*output, FORMAT_NCHW_VECT_C, 'W'); TensorShape shape; TF_EXPECT_OK( ShapeFromFormatWithStatus(FORMAT_NHWC, n, oh, ow, oc, &shape)); Tensor output_nhwc(dtype, shape); output_nhwc.tensor<T, 4>() = output->tensor<T, 5>() .shuffle(Eigen::array<int, 5>{0, 2, 3, 1, 4}) .reshape(Eigen::array<int, 4>{n, oh, ow, oc}); *output = output_nhwc; } } void ExpectMatch(const Tensor& x, const Tensor& y, double atol) { constexpr bool exact_match = std::is_same<T, int8>::value || std::is_same<T, qint8>::value; if (exact_match) { test::ExpectEqual(x, y); } else { test::ExpectClose(x, y, atol); } } void VerifyBiasAddTensorsNear(int depth, int image_width, int image_height, int image_batch_count, int filter_size, int filter_count, const BiasAddGraphRunner& run_default, const BiasAddGraphRunner& run_fused) { DataType dtype = DataTypeToEnum<T>::v(); constexpr int int8_scale = 80; using ConvT = typename std::conditional<kIsInt8, int8, T>::type; DataType dtype_conv = DataTypeToEnum<ConvT>::v(); TensorShape image_shape{image_batch_count, image_height, image_width, depth}; Tensor image_tmp(dtype_conv, image_shape); image_tmp.flat<ConvT>() = image_tmp.flat<ConvT>().setRandom(); if (kIsInt8) { image_tmp.flat<ConvT>() /= image_tmp.flat<ConvT>().constant(int8_scale); } Tensor image(dtype, image_shape); ASSERT_TRUE(image.BitcastFrom(image_tmp, dtype, image_shape).ok()); TensorShape filter_shape{filter_size, filter_size, depth, filter_count}; Tensor filter_tmp(dtype_conv, filter_shape); filter_tmp.flat<ConvT>() = filter_tmp.flat<ConvT>().setRandom(); if (kIsInt8) { filter_tmp.flat<ConvT>() /= filter_tmp.flat<ConvT>().constant(int8_scale); } else { filter_tmp.flat<ConvT>() -= filter_tmp.flat<ConvT>().constant(static_cast<ConvT>(0.5f)); } Tensor filter(dtype, filter_shape); ASSERT_TRUE(filter.BitcastFrom(filter_tmp, dtype, filter_shape).ok()); const int bias_size = filter_count; TensorShape bias_shape{bias_size}; Tensor bias_tmp(dtype_conv, bias_shape); bias_tmp.flat<ConvT>() = bias_tmp.flat<ConvT>().setRandom(); if (kIsInt8) { bias_tmp.flat<ConvT>() /= bias_tmp.flat<ConvT>().constant(int8_scale); } else { bias_tmp.flat<ConvT>() += bias_tmp.flat<ConvT>().constant(static_cast<ConvT>(0.5f)); } Tensor bias(dtype, bias_shape); ASSERT_TRUE(bias.BitcastFrom(bias_tmp, dtype, bias_shape).ok()); Tensor conv_2d; Tensor fused_conv_2d; run_default(image, filter, bias, &conv_2d); run_fused(image, filter, bias, &fused_conv_2d); ASSERT_EQ(conv_2d.dtype(), fused_conv_2d.dtype()); ASSERT_EQ(conv_2d.shape(), fused_conv_2d.shape()); if (image_width == filter_size && image_height == filter_size) { ExpectMatch(conv_2d, fused_conv_2d, 1e-4); } else { ExpectMatch(conv_2d, fused_conv_2d, 1e-5); } } void VerifyFusedBatchNormTensorsNear(int depth, int image_width, int image_height, int image_batch_count, int filter_size, int filter_count, const BatchNormGraphRunner& run_default, const BatchNormGraphRunner& run_fused) { DataType dtype = DataTypeToEnum<T>::v(); Tensor image(dtype, {image_batch_count, image_height, image_width, depth}); image.flat<T>() = image.flat<T>().setRandom(); Tensor filter(dtype, {filter_size, filter_size, depth, filter_count}); filter.flat<T>() = filter.flat<T>().setRandom(); filter.flat<T>() -= filter.flat<T>().constant(static_cast<T>(0.5f)); const int scale_size = filter_count; Tensor scale(dtype, {scale_size}); scale.flat<T>() = scale.flat<T>().setRandom(); Tensor offset(dtype, {scale_size}); offset.flat<T>() = offset.flat<T>().setRandom(); Tensor mean(dtype, {scale_size}); mean.flat<T>() = mean.flat<T>().setRandom(); Tensor variance(dtype, {scale_size}); variance.flat<T>() = variance.flat<T>().setRandom(); variance.flat<T>() += variance.flat<T>().constant(static_cast<T>(0.5f)); Tensor conv_2d; Tensor fused_conv_2d; run_default(image, filter, scale, offset, mean, variance, &conv_2d); run_fused(image, filter, scale, offset, mean, variance, &fused_conv_2d); ASSERT_EQ(conv_2d.dtype(), fused_conv_2d.dtype()); ASSERT_EQ(conv_2d.shape(), fused_conv_2d.shape()); if (image_width == filter_size && image_height == filter_size) { test::ExpectClose(conv_2d, fused_conv_2d, 1e-4); } else { test::ExpectClose(conv_2d, fused_conv_2d, 1e-5); } } void VerifyConv2DWithBias(int filter_size, int filter_count, const std::vector<int>& explicit_paddings = {}, int depth = kDepth, int image_width = kImageWidth, int image_height = kImageHeight, int image_batch_count = kImageBatchCount) { if (kIsInt8 && !explicit_paddings.empty()) { return; } std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT"; const BiasAddGraphRunner run_default = [&](const Tensor& input_data, const Tensor& filter_data, const Tensor& bias_data, Tensor* out) { RunConv2DWithBias(input_data, filter_data, bias_data, padding, explicit_paddings, out); }; const BiasAddGraphRunner run_fused = [&](const Tensor& input_data, const Tensor& filter_data, const Tensor& bias_data, Tensor* out) { RunFusedConv2DOp(input_data, filter_data, {bias_data}, {"BiasAdd"}, padding, explicit_paddings, out, kIsInt8); }; VerifyBiasAddTensorsNear(depth, image_width, image_height, image_batch_count, filter_size, filter_count, run_default, run_fused); } void VerifyConv2DWithBiasAndActivation( const std::string& activation, int filter_size, int filter_count, const std::vector<int>& explicit_paddings = {}, int depth = kDepth, int image_width = kImageWidth, int image_height = kImageHeight, int image_batch_count = kImageBatchCount) { if (kIsInt8 && (activation != "Relu" || !explicit_paddings.empty())) { return; } std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT"; const BiasAddGraphRunner run_default = [this, &activation, &explicit_paddings, &padding]( const Tensor& input_data, const Tensor& filter_data, const Tensor& bias_data, Tensor* out) { RunConv2DWithBiasAndActivation( input_data, filter_data, bias_data, activation, padding, explicit_paddings, out, activation == "Relu"); }; const BiasAddGraphRunner run_fused = [this, &activation, &explicit_paddings, padding](const Tensor& input_data, const Tensor& filter_data, const Tensor& bias_data, Tensor* out) { RunFusedConv2DOp(input_data, filter_data, {bias_data}, {"BiasAdd", activation}, padding, explicit_paddings, out, activation == "Relu"); }; VerifyBiasAddTensorsNear(depth, image_width, image_height, image_batch_count, filter_size, filter_count, run_default, run_fused); } void VerifyConv2DWithBatchNorm(int filter_size, int filter_count, const std::vector<int>& explicit_paddings = {}, int depth = kDepth, int image_width = kImageWidth, int image_height = kImageHeight, int image_batch_count = kImageBatchCount) { std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT"; const BatchNormGraphRunner run_default = [this, explicit_paddings, padding]( const Tensor& input_data, const Tensor& filter_data, const Tensor& scale_data, const Tensor& offset_data, const Tensor& mean_data, const Tensor& variance_data, Tensor* out) { RunConv2DWithBatchNorm(input_data, filter_data, scale_data, offset_data, mean_data, variance_data, padding, explicit_paddings, out); }; const BatchNormGraphRunner run_fused = [this, explicit_paddings, padding]( const Tensor& input_data, const Tensor& filter_data, const Tensor& scale_data, const Tensor& offset_data, const Tensor& mean_data, const Tensor& variance_data, Tensor* out) { RunFusedConv2DOp(input_data, filter_data, {scale_data, offset_data, mean_data, variance_data}, {"FusedBatchNorm"}, padding, explicit_paddings, out); }; VerifyFusedBatchNormTensorsNear(depth, image_width, image_height, image_batch_count, filter_size, filter_count, run_default, run_fused); } void VerifyConv2DWithBatchNormAndActivation( const string& activation, int filter_size, int filter_count, const std::vector<int>& explicit_paddings = {}, int depth = kDepth, int image_width = kImageWidth, int image_height = kImageHeight, int image_batch_count = kImageBatchCount) { std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT"; const BatchNormGraphRunner run_default = [this, &activation, explicit_paddings, padding]( const Tensor& input_data, const Tensor& filter_data, const Tensor& scale_data, const Tensor& offset_data, const Tensor& mean_data, const Tensor& variance_data, Tensor* out) { RunConv2DWithBatchNormAndActivation( input_data, filter_data, scale_data, offset_data, mean_data, variance_data, activation, padding, explicit_paddings, out); }; const BatchNormGraphRunner run_fused = [this, &activation, explicit_paddings, padding]( const Tensor& input_data, const Tensor& filter_data, const Tensor& scale_data, const Tensor& offset_data, const Tensor& mean_data, const Tensor& variance_data, Tensor* out) { RunFusedConv2DOp(input_data, filter_data, {scale_data, offset_data, mean_data, variance_data}, {"FusedBatchNorm", activation}, padding, explicit_paddings, out); }; VerifyFusedBatchNormTensorsNear(depth, image_width, image_height, image_batch_count, filter_size, filter_count, run_default, run_fused); } }; template <typename T> class FusedConv2DWithBiasOpTest : public FusedConv2DOpTest<T> {}; template <typename T> class FusedConv2DWithBatchNormOpTest : public FusedConv2DOpTest<T> {}; TYPED_TEST_SUITE_P(FusedConv2DWithBiasOpTest); TYPED_TEST_SUITE_P(FusedConv2DWithBatchNormOpTest); #ifndef TENSORFLOW_USE_ROCM TYPED_TEST_P(FusedConv2DWithBiasOpTest, OneByOneConvolution) { const int filter_size = 1; const int filter_count = 12; this->VerifyConv2DWithBias(filter_size, filter_count); } TYPED_TEST_P(FusedConv2DWithBiasOpTest, ImageSizeConvolution) { const int filter_size = TestFixture::kImageWidth; const int filter_count = 12; this->VerifyConv2DWithBias(filter_size, filter_count); } TYPED_TEST_P(FusedConv2DWithBiasOpTest, SpatialConvolution) { const int filter_size = 3; const int filter_count = 12; this->VerifyConv2DWithBias(filter_size, filter_count); } #ifndef INTEL_MKL TYPED_TEST_P(FusedConv2DWithBiasOpTest, ExplicitPaddingConvolution) { const int filter_size = 3; const int filter_count = 12; this->VerifyConv2DWithBias(filter_size, filter_count, {0, 0, 1, 2, 3, 4, 0, 0}); } #endif static auto activations = {"Relu", "Relu6", "Elu", "LeakyRelu"}; TYPED_TEST_P(FusedConv2DWithBiasOpTest, OneByOneConvolutionAndActivation) { tensorflow::enable_tensor_float_32_execution(false); const int filter_size = 1; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBiasAndActivation(activation, filter_size, filter_count); } } TYPED_TEST_P(FusedConv2DWithBiasOpTest, ImageSizeConvolutionAndActivation) { const int filter_size = TestFixture::kImageWidth; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBiasAndActivation(activation, filter_size, filter_count); } } TYPED_TEST_P(FusedConv2DWithBiasOpTest, SpatialConvolutionAndActivation) { const int filter_size = 3; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBiasAndActivation(activation, filter_size, filter_count); } } #ifndef INTEL_MKL TYPED_TEST_P(FusedConv2DWithBiasOpTest, ExplicitPaddingConvolutionAndActivation) { const int filter_size = 3; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBiasAndActivation( activation, filter_size, filter_count, {0, 0, 1, 2, 3, 4, 0, 0}); } } #endif TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, OneByOneConvolution) { const int filter_size = 1; const int filter_count = 12; this->VerifyConv2DWithBatchNorm(filter_size, filter_count); } TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, ImageSizeConvolution) { const int filter_size = TestFixture::kImageWidth; const int filter_count = 12; this->VerifyConv2DWithBatchNorm(filter_size, filter_count); } TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, SpatialConvolution) { const int filter_size = 3; const int filter_count = 12; this->VerifyConv2DWithBatchNorm(filter_size, filter_count); } #ifndef INTEL_MKL TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, ExplicitPaddingConvolution) { const int filter_size = 3; const int filter_count = 12; this->VerifyConv2DWithBatchNorm( filter_size, filter_count, {0, 0, 1, 2, 3, 4, 0, 0}); } #endif TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, OneByOneConvolutionAndActivation) { const int filter_size = 1; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBatchNormAndActivation(activation, filter_size, filter_count); } } TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, ImageSizeConvolutionAndActivation) { const int filter_size = TestFixture::kImageWidth; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBatchNormAndActivation(activation, filter_size, filter_count); } } TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, SpatialConvolutionAndActivation) { const int filter_size = 3; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBatchNormAndActivation(activation, filter_size, filter_count); } } #ifndef INTEL_MKL TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, ExplicitPaddingConvolutionAndActivation) { const int filter_size = 3; const int filter_count = 12; for (const std::string& activation : activations) { this->VerifyConv2DWithBatchNormAndActivation( activation, filter_size, filter_count, {0, 0, 1, 2, 3, 4, 0, 0}); } } #endif #ifndef INTEL_MKL REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBiasOpTest, OneByOneConvolution, ImageSizeConvolution, SpatialConvolution, ExplicitPaddingConvolution, OneByOneConvolutionAndActivation, ImageSizeConvolutionAndActivation, SpatialConvolutionAndActivation, ExplicitPaddingConvolutionAndActivation); REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBatchNormOpTest, OneByOneConvolution, ImageSizeConvolution, SpatialConvolution, ExplicitPaddingConvolution, OneByOneConvolutionAndActivation, ImageSizeConvolutionAndActivation, SpatialConvolutionAndActivation, ExplicitPaddingConvolutionAndActivation); #else REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBiasOpTest, OneByOneConvolution, ImageSizeConvolution, SpatialConvolution, OneByOneConvolutionAndActivation, ImageSizeConvolutionAndActivation, SpatialConvolutionAndActivation); REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBatchNormOpTest, OneByOneConvolution, ImageSizeConvolution, SpatialConvolution, OneByOneConvolutionAndActivation, ImageSizeConvolutionAndActivation, SpatialConvolutionAndActivation); #endif using FusedBiasAddDataTypes = ::testing::Types<float, double, int8, qint8>; INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedConv2DWithBiasOpTest, FusedBiasAddDataTypes); using FusedBatchNormDataTypes = ::testing::Types<float>; INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedConv2DWithBatchNormOpTest, FusedBatchNormDataTypes); #endif }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/conv_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/conv_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bbb29eb8-c0b8-4858-8036-e935432c2a2c
cpp
tensorflow/tensorflow
tensor_cord
tensorflow/core/kernels/tensor_cord.cc
tensorflow/core/kernels/tensor_cord_test.cc
#include "tensorflow/core/kernels/tensor_cord.h" #include <cstring> #include "tensorflow/core/framework/variant.h" namespace tensorflow { static_assert(Variant::CanInlineType<TensorCord>(), "TensorCord should be inlined into Variants"); TensorCord::CordRep::~CordRep() { if (!is_inline_ && rep_.external.releaser) { rep_.external.releaser(rep_.external.arg); } } TensorCord::~TensorCord() { Cleanup(); } void TensorCord::Encode(VariantTensorData* data) const { data->metadata_string().clear(); for (auto rep : Chunks()) { data->metadata_string().append(rep.data(), rep.size()); } } bool TensorCord::Decode(VariantTensorData data) { auto* str = new string(std::move(data.metadata_string())); Cleanup(); chunks_.push_back(new CordRep(absl::string_view(*str), &StringReleaser, str)); return true; } TensorBuffer* TensorCord::TensorBufWithRef(Tensor* tensor) { TensorBuffer* buf = tensor->buf_; buf->Ref(); return buf; } void TensorCord::TensorBufReleaser(void* tensor_buffer) { static_cast<TensorBuffer*>(tensor_buffer)->Unref(); } void TensorCord::StringReleaser(void* str_ptr) { delete static_cast<string*>(str_ptr); } namespace { template <typename string_type, typename = void> struct ResizeUninitializedTraits { using HasMember = std::false_type; static void Resize(string_type* s, size_t new_size) { s->resize(new_size); } }; template <typename string_type> struct ResizeUninitializedTraits< string_type, absl::void_t<decltype(std::declval<string_type&>() .__resize_default_init(237))> > { using HasMember = std::true_type; static void Resize(string_type* s, size_t new_size) { s->__resize_default_init(new_size); } }; static inline void STLStringResizeUninitialized(string* s, size_t new_size) { ResizeUninitializedTraits<string>::Resize(s, new_size); } } TensorCord::operator string() const { string out; STLStringResizeUninitialized(&out, size()); char* data = const_cast<char*>(out.data()); for (auto* rep : chunks_) { auto view = rep->view(); memcpy(data, view.data(), view.size()); data += view.size(); } DCHECK_EQ(data - out.data(), size()); return out; } }
#include "tensorflow/core/kernels/tensor_cord.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_reference.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/platform/cord.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { void DoNothingReleaser(void*) {} TEST(TensorCordTest, Empty) { TensorCord tc; EXPECT_EQ(tc.size(), 0); EXPECT_EQ(tc.chunk_begin(), tc.chunk_end()); auto chunks = tc.Chunks(); EXPECT_EQ(chunks.begin(), chunks.end()); } TEST(TensorCordTest, ViewOfValue) { TensorCord tc("abc", &DoNothingReleaser, nullptr); EXPECT_EQ(*tc.chunk_begin(), "abc"); auto it = tc.chunk_begin(); EXPECT_EQ(*it, "abc"); ++it; EXPECT_EQ(it, tc.chunk_end()); } TEST(TensorCordTest, Chunks) { TensorCord tc("abc", &DoNothingReleaser, nullptr); int counter = 0; for (auto string_piece : tc.Chunks()) { EXPECT_EQ(string_piece, "abc"); ++counter; } EXPECT_EQ(counter, 1); } template <typename T> CordRepReleaser CreateThunkFor(const T& fn) { return [](void* ptr) { (*static_cast<T*>(ptr))(); }; } TEST(TensorCordTest, Copy) { int cleaned = 0; auto cleaner = [&cleaned]() { ++cleaned; }; auto thunk = CreateThunkFor(cleaner); TensorCord tc_copy; string a = "abc"; { TensorCord tc(a, thunk, &cleaner); tc_copy = tc; } auto it = tc_copy.chunk_begin(); EXPECT_EQ(*it, "abc"); ++it; EXPECT_EQ(it, tc_copy.chunk_end()); EXPECT_EQ(cleaned, 0); tc_copy = TensorCord(); EXPECT_EQ(cleaned, 1); } TEST(TensorCordTest, AppendCord) { int cleaned_0 = 0; int cleaned_1 = 0; auto cleaner_0 = [&cleaned_0]() { ++cleaned_0; }; auto cleaner_1 = [&cleaned_1]() { ++cleaned_1; }; auto thunk_0 = CreateThunkFor(cleaner_0); auto thunk_1 = CreateThunkFor(cleaner_1); TensorCord tc_0("abc", thunk_0, &cleaner_0); TensorCord tc_1("cba", thunk_1, &cleaner_1); tc_0.Append(tc_1); EXPECT_EQ(string(tc_0), "abccba"); auto it = tc_0.chunk_begin(); EXPECT_EQ(*it, "abc"); ++it; EXPECT_EQ(*it, "cba"); ++it; EXPECT_EQ(it, tc_0.chunk_end()); tc_1 = TensorCord(); EXPECT_EQ(cleaned_0, 0); EXPECT_EQ(cleaned_1, 0); tc_0 = TensorCord(); EXPECT_EQ(cleaned_0, 1); EXPECT_EQ(cleaned_1, 1); } TEST(TensorCordTest, AppendView) { int cleaned_0 = 0; int cleaned_1 = 0; auto cleaner_0 = [&cleaned_0]() { ++cleaned_0; }; auto cleaner_1 = [&cleaned_1]() { ++cleaned_1; }; auto thunk_0 = CreateThunkFor(cleaner_0); auto thunk_1 = CreateThunkFor(cleaner_1); TensorCord tc_0("abc", thunk_0, &cleaner_0); tc_0.Append("cba", thunk_1, &cleaner_1); EXPECT_EQ(string(tc_0), "abccba"); auto it = tc_0.chunk_begin(); EXPECT_EQ(*it, "abc"); ++it; EXPECT_EQ(*it, "cba"); ++it; EXPECT_EQ(it, tc_0.chunk_end()); EXPECT_EQ(cleaned_0, 0); EXPECT_EQ(cleaned_1, 0); tc_0 = TensorCord(); EXPECT_EQ(cleaned_0, 1); EXPECT_EQ(cleaned_1, 1); } TEST(TensorCordTest, Move) { int cleaned = 0; auto cleaner = [&cleaned]() { ++cleaned; }; auto thunk = CreateThunkFor(cleaner); TensorCord tc_copy; string a = "abc"; { TensorCord tc(a, thunk, &cleaner); tc_copy = std::move(tc); } EXPECT_EQ(tc_copy.size(), 3); auto it = tc_copy.chunk_begin(); EXPECT_EQ(*it, "abc"); ++it; EXPECT_EQ(it, tc_copy.chunk_end()); EXPECT_EQ(cleaned, 0); tc_copy = TensorCord(); EXPECT_EQ(tc_copy.size(), 0); EXPECT_EQ(cleaned, 1); } TEST(TensorCordTest, CopyConstructor) { int cleaned = 0; auto cleaner = [&cleaned]() { ++cleaned; }; auto thunk = CreateThunkFor(cleaner); string a = "abc"; TensorCord tc(a, thunk, &cleaner); TensorCord tc_copy(tc); EXPECT_EQ(tc.size(), 3); EXPECT_EQ(tc_copy.size(), 3); auto it = tc_copy.chunk_begin(); EXPECT_EQ(*it, "abc"); ++it; EXPECT_EQ(it, tc_copy.chunk_end()); EXPECT_EQ(cleaned, 0); tc = TensorCord(); EXPECT_EQ(cleaned, 0); tc_copy = TensorCord(); EXPECT_EQ(cleaned, 1); } TEST(TensorCordTest, MoveConstructor) { int cleaned = 0; auto cleaner = [&cleaned]() { ++cleaned; }; auto thunk = CreateThunkFor(cleaner); string a = "abc"; TensorCord tc(a, thunk, &cleaner); TensorCord tc_copy(std::move(tc)); EXPECT_EQ(tc_copy.size(), 3); auto it = tc_copy.chunk_begin(); EXPECT_EQ(*it, "abc"); ++it; EXPECT_EQ(it, tc_copy.chunk_end()); EXPECT_EQ(cleaned, 0); tc_copy = TensorCord(); EXPECT_EQ(cleaned, 1); } #ifdef PLATFORM_GOOGLE void TensorCopyFromTensorBenchmark(benchmark::State& state, int num_elem, int string_size) { Tensor strings(DT_STRING, {num_elem}); auto t = strings.flat<tstring>(); for (int i = 0; i < num_elem; ++i) { t(i).insert(0, string_size, 'a'); } for (auto _ : state) { benchmark::DoNotOptimize(tensor::DeepCopy(strings)); } } void TensorCordFromTensorBenchmark(benchmark::State& state, int num_elem, int string_size) { Tensor strings(DT_STRING, {num_elem}); auto t = strings.flat<tstring>(); for (int i = 0; i < num_elem; ++i) { t(i).insert(0, string_size, 'a'); } for (auto _ : state) { Tensor copy(DT_VARIANT, {num_elem}); auto t_copy = copy.flat<Variant>(); for (int i = 0; i < num_elem; ++i) { t_copy(i) = TensorCord(t(i), &strings); } } } void CordReleaser(void* cord_ptr) { delete static_cast<absl::Cord*>(cord_ptr); } void TensorCordFromAbslCordBenchmark(benchmark::State& state, int num_elem, int string_size) { std::vector<absl::Cord> cords(num_elem); for (int i = 0; i < num_elem; ++i) { string s(string_size, 'a'); cords[i] = s; } for (auto _ : state) { Tensor copy(DT_VARIANT, {num_elem}); auto t_copy = copy.flat<Variant>(); for (int i = 0; i < num_elem; ++i) { auto my_cord = new absl::Cord(cords[i]); t_copy(i) = TensorCord(*my_cord->chunk_begin(), CordReleaser, my_cord); } } } #define CreateBM(NUM_ELEM, STRING_SIZE) \ void BM_TensorCopyFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE( \ benchmark::State& state) { \ TensorCopyFromTensorBenchmark(state, NUM_ELEM, STRING_SIZE); \ } \ BENCHMARK( \ BM_TensorCopyFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE); \ void BM_TensorCordFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE( \ benchmark::State& state) { \ TensorCordFromTensorBenchmark(state, NUM_ELEM, STRING_SIZE); \ } \ BENCHMARK( \ BM_TensorCordFromTensor_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE); \ void \ BM_TensorCordFromAbslCord_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE( \ benchmark::State& state) { \ TensorCordFromAbslCordBenchmark(state, NUM_ELEM, STRING_SIZE); \ } \ BENCHMARK( \ BM_TensorCordFromAbslCord_NumElem_##NUM_ELEM##_StringSize_##STRING_SIZE); #define CreateStringBMs(NUM_ELEM) \ CreateBM(NUM_ELEM, 16); \ CreateBM(NUM_ELEM, 32); \ CreateBM(NUM_ELEM, 128); \ CreateBM(NUM_ELEM, 1024); \ CreateBM(NUM_ELEM, 4096); CreateStringBMs(1); CreateStringBMs(16); CreateStringBMs(32); CreateStringBMs(64); CreateStringBMs(128); #endif } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_cord.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_cord_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
856f08f5-e960-4ae4-90cc-7b4516bd8de3
cpp
tensorflow/tensorflow
fingerprint_op
tensorflow/core/kernels/fingerprint_op.cc
tensorflow/core/kernels/fingerprint_op_test.cc
#include <cstddef> #include <string> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/byte_order.h" #include "tensorflow/core/platform/fingerprint.h" namespace tensorflow { namespace { template <typename T> inline void CopyToBuffer(const T& value, uint8* output) { #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ static_assert(port::kLittleEndian, ""); std::memcpy(output, &value, sizeof(value)); #else static_assert(!port::kLittleEndian, ""); std::reverse_copy(reinterpret_cast<const uint8*>(&value), reinterpret_cast<const uint8*>(&value + 1), output); #endif } void FarmhashFingerprint64(TTypes<uint8, 2>::ConstTensor input, TTypes<uint8, 2>::Matrix output) { DCHECK_EQ(output.dimension(0), input.dimension(0)); DCHECK_EQ(output.dimension(1), sizeof(uint64)); for (int64_t i = 0; i < output.dimension(0); ++i) { const uint64 fingerprint = Fingerprint64({reinterpret_cast<const char*>(&input(i, 0)), static_cast<std::size_t>(input.dimension(1))}); CopyToBuffer(fingerprint, &output(i, 0)); } } void FarmhashFingerprint64(TTypes<tstring>::ConstFlat input, TTypes<uint8, 2>::Matrix output) { DCHECK_EQ(output.dimension(0), input.dimension(0)); DCHECK_EQ(output.dimension(1), sizeof(uint64)); for (int64_t i = 0; i < input.dimension(0); ++i) { const uint64 fingerprint = Fingerprint64({input(i).data(), input(i).size()}); CopyToBuffer(fingerprint, &output(i, 0)); } } class FingerprintOp : public OpKernel { public: explicit FingerprintOp(OpKernelConstruction* context) : OpKernel(context) { DataType dtype; OP_REQUIRES_OK(context, context->GetAttr("T", &dtype)); OP_REQUIRES(context, DataTypeCanUseMemcpy(dtype) || dtype == DT_STRING, errors::InvalidArgument("Data type not supported: ", DataTypeString(dtype))); } void Compute(tensorflow::OpKernelContext* context) override { const Tensor& method_tensor = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsScalar(method_tensor.shape()), errors::InvalidArgument("`method` should be a scalar string: ", method_tensor.shape())); const tstring& method = method_tensor.scalar<tstring>()(); OP_REQUIRES( context, method == "farmhash64", errors::InvalidArgument("Unsupported fingerprint method: ", method)); const Tensor& input = context->input(0); OP_REQUIRES( context, TensorShapeUtils::IsVectorOrHigher(input.shape()), errors::InvalidArgument("`data` should have at least one dimension: ", input.shape())); const int64_t dim0 = input.shape().dim_size(0); int64_t dim1; if (dim0 == 0) { dim1 = 0; } else { dim1 = input.shape().num_elements() / dim0; } Tensor* output; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape{dim0, kFingerprintSize}, &output)); if (input.dtype() == DT_STRING) { if (dim1 > 1) { Tensor temp; OP_REQUIRES_OK(context, context->allocate_temp( DT_UINT8, TensorShape{input.shape().num_elements(), kFingerprintSize}, &temp)); FarmhashFingerprint64(input.flat<tstring>(), temp.tensor<uint8, 2>()); FarmhashFingerprint64(static_cast<const Tensor&>(temp).shaped<uint8, 2>( {dim0, dim1 * kFingerprintSize}), output->matrix<uint8>()); } else { FarmhashFingerprint64(input.flat<tstring>(), output->matrix<uint8>()); } } else { auto data = input.bit_casted_shaped<uint8, 2>( {dim0, dim1 * DataTypeSize(input.dtype())}); FarmhashFingerprint64(data, output->matrix<uint8>()); } } private: static constexpr int kFingerprintSize = sizeof(uint64); }; REGISTER_KERNEL_BUILDER(Name("Fingerprint").Device(tensorflow::DEVICE_CPU), FingerprintOp); } }
#include <memory> #include <numeric> #include <vector> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { Status MakeNodeDef(DataType dtype, NodeDef* node_def) { return NodeDefBuilder("fingerprint", "Fingerprint") .Input(FakeInput(dtype)) .Input(FakeInput(DT_STRING)) .Finalize(node_def); } class FingerprintOpTest : public OpsTestBase { protected: Status MakeFingerprintOp(Tensor* tensor) { return MakeFingerprintOp(tensor, "farmhash64"); } Status MakeFingerprintOp(Tensor* data, const string& method) { TF_RETURN_IF_ERROR(MakeNodeDef(data->dtype(), node_def())); TF_RETURN_IF_ERROR(InitOp()); inputs_.clear(); inputs_.push_back(TensorValue(data)); method_ = Tensor(DT_STRING, TensorShape{}); method_.scalar<tstring>()() = method; inputs_.push_back(TensorValue(&method_)); return absl::OkStatus(); } Tensor batch_dims_; Tensor method_; }; TEST_F(FingerprintOpTest, Empty) { Tensor tensor(DT_UINT8, {0}); TF_ASSERT_OK(MakeFingerprintOp(&tensor)); TF_ASSERT_OK(RunOpKernel()); EXPECT_EQ(GetOutput(0)->shape(), (TensorShape{0, 8})); EXPECT_EQ(GetOutput(0)->tensor_data(), ""); } TEST_F(FingerprintOpTest, GoldenValue) { Tensor tensor(DT_UINT8, {1, 3, 4, 5, 6, 7}); auto buffer = tensor.flat<uint8>(); std::iota(buffer.data(), buffer.data() + buffer.size(), static_cast<uint8>(47)); TF_ASSERT_OK(MakeFingerprintOp(&tensor)); TF_ASSERT_OK(RunOpKernel()); EXPECT_EQ(GetOutput(0)->shape(), (TensorShape{1, 8})); EXPECT_EQ(GetOutput(0)->tensor_data(), "\x2d\x90\xdf\x03\x79\x36\x3c\x43"); } TEST_F(FingerprintOpTest, StringGoldenValue) { Tensor data(DT_STRING, {1, 2, 2}); auto buffer = data.flat<tstring>(); buffer(0).resize(10); buffer(1).resize(7); buffer(2).resize(0); buffer(3).resize(19); std::iota(&buffer(0)[0], &buffer(0)[0] + buffer(0).size(), 0); std::iota(&buffer(1)[0], &buffer(1)[0] + buffer(1).size(), 7); std::iota(&buffer(2)[0], &buffer(2)[0] + buffer(2).size(), 71); std::iota(&buffer(3)[0], &buffer(3)[0] + buffer(3).size(), 41); TF_ASSERT_OK(MakeFingerprintOp(&data)); TF_ASSERT_OK(RunOpKernel()); ASSERT_EQ(GetOutput(0)->shape(), (TensorShape{1, 8})); EXPECT_EQ(GetOutput(0)->tensor_data(), "\x92\x43\x28\x52\xa3\x7c\x48\x18"); ASSERT_TRUE(data.CopyFrom(data, TensorShape{4})); TF_ASSERT_OK(MakeFingerprintOp(&data)); TF_ASSERT_OK(RunOpKernel()); ASSERT_EQ(GetOutput(0)->shape(), (TensorShape{4, 8})); EXPECT_EQ(GetOutput(0)->tensor_data(), "\xea\xff\xd6\xb2\xb2\x4d\x70\x9b" "\x6e\x9d\xed\x21\xc6\x4a\x61\x52" "\x4f\x40\x90\x2f\x3b\x6a\xe1\x9a" "\x0d\x9b\x7f\x63\x23\x14\x1c\xb8"); } TEST_F(FingerprintOpTest, Collision) { const TensorShape shape = {1, 2, 4, 6}; for (DataType dtype : kRealNumberTypes) { const int64_t size = shape.num_elements() * DataTypeSize(dtype); Tensor tensor(dtype, shape); auto buffer = tensor.bit_casted_shaped<uint8, 1>({size}); buffer.setRandom(); TF_ASSERT_OK(MakeFingerprintOp(&tensor)); TF_ASSERT_OK(RunOpKernel()); const Tensor fingerprint0 = *GetOutput(0); const int offset = buffer(0) % buffer.size(); buffer(offset) = ~buffer(offset); TF_ASSERT_OK(MakeFingerprintOp(&tensor)); TF_ASSERT_OK(RunOpKernel()); const Tensor fingerprint1 = *GetOutput(0); EXPECT_NE(fingerprint0.tensor_data(), fingerprint1.tensor_data()); } } TEST_F(FingerprintOpTest, CollisionString) { constexpr int64_t size = 256; Tensor tensor(DT_STRING, {1}); auto& input = tensor.vec<tstring>()(0); input.resize(size); TTypes<uint8>::UnalignedFlat buffer(reinterpret_cast<uint8*>(&input[0]), input.size()); buffer.setRandom(); TF_ASSERT_OK(MakeFingerprintOp(&tensor)); TF_ASSERT_OK(RunOpKernel()); const Tensor fingerprint0 = *GetOutput(0); const int offset = buffer(0) % buffer.size(); buffer(offset) = ~buffer(offset); TF_ASSERT_OK(MakeFingerprintOp(&tensor)); TF_ASSERT_OK(RunOpKernel()); const Tensor fingerprint1 = *GetOutput(0); EXPECT_NE(fingerprint0.tensor_data(), fingerprint1.tensor_data()); } TEST_F(FingerprintOpTest, CompareBytesAndString) { Tensor pods_tensor(DT_FLOAT, {4, 64}); Tensor strings_tensor(DT_STRING, {4}); auto pods = pods_tensor.matrix<float>(); pods.setRandom(); auto strings = strings_tensor.vec<tstring>(); for (int64_t i = 0; i < strings.size(); ++i) { strings(i).assign(reinterpret_cast<const char*>(&pods(i, 0)), pods.dimension(1) * sizeof(pods(i, 0))); } TF_ASSERT_OK(MakeFingerprintOp(&pods_tensor)); TF_ASSERT_OK(RunOpKernel()); Tensor pods_fingerprints = *GetOutput(0); TF_ASSERT_OK(MakeFingerprintOp(&strings_tensor)); TF_ASSERT_OK(RunOpKernel()); Tensor strings_fingerprints = *GetOutput(0); EXPECT_EQ(pods_fingerprints.tensor_data(), strings_fingerprints.tensor_data()); } TEST_F(FingerprintOpTest, SupportedMethods) { Tensor tensor(DT_STRING, TensorShape{1}); TF_ASSERT_OK(MakeFingerprintOp(&tensor, "unsupported_method")); const Status status = RunOpKernel(); EXPECT_FALSE(status.ok()); EXPECT_NE(status.message().find("unsupported_method"), string::npos); } TEST_F(FingerprintOpTest, SupportedTypes) { Tensor input(DT_RESOURCE, TensorShape{1}); EXPECT_FALSE(MakeFingerprintOp(&input).ok()); } TEST(FingerprintOpShapeFnTest, MethodKnownStatically) { ShapeInferenceTestOp op("Fingerprint"); Tensor method(DT_STRING, TensorShape{}); method.scalar<tstring>()() = "farmhash64"; op.input_tensors.assign({nullptr, &method}); TF_ASSERT_OK(MakeNodeDef(DT_UINT8, &op.node_def)); INFER_OK(op, "?;?", "[?,8]"); INFER_ERROR("must be at least rank 1", op, "[];?"); INFER_OK(op, "[?];?", "[d0_0,8]"); INFER_OK(op, "[1,?];?", "[d0_0,8]"); INFER_OK(op, "[?,2,3];?", "[d0_0,8]"); } TEST(FingerprintOpShapeFnTest, MethodUnknownStatically) { ShapeInferenceTestOp op("Fingerprint"); TF_ASSERT_OK(MakeNodeDef(DT_FLOAT, &op.node_def)); INFER_OK(op, "?;?", "[?,?]"); INFER_ERROR("must be at least rank 1", op, "[];?"); INFER_OK(op, "[?];?", "[d0_0,?]"); INFER_OK(op, "[1,?];?", "[d0_0,?]"); INFER_OK(op, "[?,2,3];?", "[d0_0,?]"); } TEST(FingerprintOpShapeFnTest, InvalidMethod) { ShapeInferenceTestOp op("Fingerprint"); INFER_ERROR("must be rank 0", op, "[1];[1]"); Tensor method(DT_STRING, TensorShape{1}); method.vec<tstring>()(0) = "farmhash64"; op.input_tensors.assign({nullptr, &method}); INFER_ERROR("must be rank 0", op, "?;?"); method = Tensor(DT_STRING, TensorShape{}); method.scalar<tstring>()() = "unsupported_method"; op.input_tensors.assign({nullptr, &method}); INFER_ERROR("unsupported_method", op, "?;?"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fingerprint_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fingerprint_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
692945d3-8470-407f-ae14-61af4fa1bf80
cpp
tensorflow/tensorflow
scatter_op
tensorflow/core/kernels/scatter_op.cc
tensorflow/core/kernels/scatter_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/scatter_functor.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/util.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; static bool ValidShapes(const Tensor& params, const Tensor& updates, const Tensor& indices) { if (updates.dims() == 0) return true; if (updates.dims() != indices.dims() + params.dims() - 1) return false; for (int d = 0; d < indices.dims(); d++) { if (updates.dim_size(d) != indices.dim_size(d)) { return false; } } for (int d = 1; d < params.dims(); d++) { if (params.dim_size(d) != updates.dim_size(d - 1 + indices.dims())) { return false; } } return true; } static void DoValidationChecking(OpKernelContext* c, const Tensor& params, const Tensor& indices, const Tensor& updates) { OP_REQUIRES(c, params.IsInitialized(), errors::FailedPrecondition("Null ref for params")); OP_REQUIRES(c, TensorShapeUtils::IsVectorOrHigher(params.shape()), errors::InvalidArgument("params must be at least 1-D, got shape ", params.shape().DebugString())); OP_REQUIRES( c, ValidShapes(params, updates, indices), errors::InvalidArgument("Must have updates.shape = indices.shape + " "params.shape[1:] or updates.shape = [], got ", "updates.shape ", updates.shape().DebugString(), ", indices.shape ", indices.shape().DebugString(), ", params.shape ", params.shape().DebugString())); } template <typename Device, typename T, typename Index, scatter_op::UpdateOp op> class ScatterUpdateOp : public OpKernel { public: explicit ScatterUpdateOp(OpKernelConstruction* c) : OpKernel(c) { OP_REQUIRES_OK(c, c->GetAttr("use_locking", &use_exclusive_lock_)); if (std::is_same<Device, GPUDevice>::value) { OP_REQUIRES( c, !OpDeterminismRequired(), errors::Unimplemented( "Determinism is not yet supported in GPU implementation of " "Scatter ops with ref inputs. Consider using resource variables " "instead if you want to run Scatter when op determinism is " "enabled.")); } } void Compute(OpKernelContext* c) override { if (use_exclusive_lock_) { mutex_lock l(*c->input_ref_mutex(0)); DoCompute(c); } else { DoCompute(c); } } private: bool use_exclusive_lock_; void DoCompute(OpKernelContext* c) { Tensor params = c->mutable_input(0, use_exclusive_lock_); const Tensor& indices = c->input(1); const Tensor& updates = c->input(2); DoValidationChecking(c, params, indices, updates); if (!c->status().ok()) return; const int64_t N_big = indices.NumElements(); OP_REQUIRES( c, N_big <= std::numeric_limits<Index>::max(), errors::InvalidArgument("indices has too many elements for ", DataTypeString(DataTypeToEnum<Index>::v()), " indexing: ", N_big, " > ", std::numeric_limits<Index>::max())); const Index N = static_cast<Index>(indices.NumElements()); OP_REQUIRES( c, params.dim_size(0) <= std::numeric_limits<Index>::max(), errors::InvalidArgument("params.shape[0] too large for ", DataTypeString(DataTypeToEnum<Index>::v()), " indexing: ", params.dim_size(0), " > ", std::numeric_limits<Index>::max())); c->forward_ref_input_to_ref_output(0, 0); if (N > 0) { auto indices_flat = indices.flat<Index>(); auto params_flat = params.flat_outer_dims<T>(); if (TensorShapeUtils::IsScalar(updates.shape())) { const auto update = updates.scalar<T>(); functor::ScatterScalarFunctor<Device, T, Index, op> functor; const Index bad_i = functor(c, c->template eigen_device<Device>(), params_flat, update, indices_flat); OP_REQUIRES(c, bad_i < 0, errors::InvalidArgument( "indices", SliceDebugString(indices.shape(), bad_i), " = ", indices_flat(bad_i), " is not in [0, ", params.dim_size(0), ")")); } else { auto updates_flat = updates.shaped<T, 2>({N, updates.NumElements() / N}); functor::ScatterFunctor<Device, T, Index, op> functor; const Index bad_i = functor(c, c->template eigen_device<Device>(), params_flat, updates_flat, indices_flat); OP_REQUIRES(c, bad_i < 0, errors::InvalidArgument( "indices", SliceDebugString(indices.shape(), bad_i), " = ", indices_flat(bad_i), " is not in [0, ", params.dim_size(0), ")")); } } } }; #define REGISTER_SCATTER_KERNEL_INDEX(type, index_type, dev, name, op) \ REGISTER_KERNEL_BUILDER(Name(name) \ .Device(DEVICE_##dev) \ .TypeConstraint<type>("T") \ .TypeConstraint<index_type>("Tindices"), \ ScatterUpdateOp<dev##Device, type, index_type, op>) #define REGISTER_SCATTER_KERNEL(type, dev, name, op) \ REGISTER_SCATTER_KERNEL_INDEX(type, int32, dev, name, op); \ REGISTER_SCATTER_KERNEL_INDEX(type, int64_t, dev, name, op); #define REGISTER_SCATTER_ARITHMETIC(type, dev) \ REGISTER_SCATTER_KERNEL(type, dev, "ScatterAdd", scatter_op::UpdateOp::ADD); \ REGISTER_SCATTER_KERNEL(type, dev, "ScatterDiv", scatter_op::UpdateOp::DIV); \ REGISTER_SCATTER_KERNEL(type, dev, "ScatterMul", scatter_op::UpdateOp::MUL); \ REGISTER_SCATTER_KERNEL(type, dev, "ScatterSub", scatter_op::UpdateOp::SUB); #define REGISTER_SCATTER_MINMAX(type, dev) \ REGISTER_SCATTER_KERNEL(type, dev, "ScatterMin", scatter_op::UpdateOp::MIN); \ REGISTER_SCATTER_KERNEL(type, dev, "ScatterMax", scatter_op::UpdateOp::MAX); #define REGISTER_SCATTER_UPDATE(type, dev) \ REGISTER_SCATTER_KERNEL(type, dev, "ScatterUpdate", \ scatter_op::UpdateOp::ASSIGN); #define REGISTER_SCATTER_ARITHMETIC_CPU(type) \ REGISTER_SCATTER_ARITHMETIC(type, CPU); #define REGISTER_SCATTER_MINMAX_CPU(type) REGISTER_SCATTER_MINMAX(type, CPU); #define REGISTER_SCATTER_UPDATE_CPU(type) REGISTER_SCATTER_UPDATE(type, CPU); TF_CALL_REAL_NUMBER_TYPES(REGISTER_SCATTER_MINMAX_CPU); TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ARITHMETIC_CPU); TF_CALL_ALL_TYPES(REGISTER_SCATTER_UPDATE_CPU); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_SCATTER_ARITHMETIC_GPU(type) \ REGISTER_SCATTER_ARITHMETIC(type, GPU); #define REGISTER_SCATTER_MINMAX_GPU(type) REGISTER_SCATTER_MINMAX(type, GPU); #define REGISTER_SCATTER_UPDATE_GPU(type) REGISTER_SCATTER_UPDATE(type, GPU); TF_CALL_GPU_NUMBER_TYPES(REGISTER_SCATTER_ARITHMETIC_GPU); TF_CALL_GPU_NUMBER_TYPES(REGISTER_SCATTER_MINMAX_GPU); TF_CALL_GPU_NUMBER_TYPES(REGISTER_SCATTER_UPDATE_GPU); #endif #undef REGISTER_SCATTER_ARITHMETIC #undef REGISTER_SCATTER_ARITHMETIC_CPU #undef REGISTER_SCATTER_ARITHMETIC_GPU #undef REGISTER_SCATTER_MINMAX #undef REGISTER_SCATTER_MINMAX_CPU #undef REGISTER_SCATTER_MINMAX_GPU #undef REGISTER_SCATTER_UPDATE #undef REGISTER_SCATTER_UPDATE_CPU #undef REGISTER_SCATTER_UPDATE_GPU #undef REGISTER_SCATTER_KERNEL #undef REGISTER_SCATTER_KERNEL_INDEX }
#include <functional> #include <memory> #include <vector> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class ScatterUpdateOpTest : public OpsTestBase { protected: void MakeOp(DataType variable_ref_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterUpdate") .Input(FakeInput(variable_ref_type)) .Input(FakeInput(index_type)) .Input(FakeInput(RemoveRefType(variable_ref_type))) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; class ScatterSubOpTest : public OpsTestBase { protected: void MakeOp(DataType variable_ref_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterSub") .Input(FakeInput(variable_ref_type)) .Input(FakeInput(index_type)) .Input(FakeInput(RemoveRefType(variable_ref_type))) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(ScatterUpdateOpTest, Simple_StringType) { MakeOp(DT_STRING_REF, DT_INT32); AddInputFromArray<tstring>(TensorShape({1}), {"Brain"}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<tstring>(TensorShape({1}), {"TensorFlow"}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_STRING, TensorShape({1})); test::FillValues<tstring>(&expected, {"TensorFlow"}); test::ExpectTensorEqual<tstring>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Simple_BoolType) { MakeOp(DT_BOOL_REF, DT_INT32); AddInputFromArray<bool>(TensorShape({1}), {false}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<bool>(TensorShape({1}), {true}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_BOOL, TensorShape({1})); test::FillValues<bool>(&expected, {true}); test::ExpectTensorEqual<bool>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Simple_TwoD32) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001, 10002, 0, 0, 0, 777, 778, 779}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Simple_Two64) { MakeOp(DT_FLOAT_REF, DT_INT64); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001, 10002, 0, 0, 0, 777, 778, 779}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Simple_ZeroD) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {101}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5})); test::FillValues<float>(&expected, {0, 0, 0, 101, 0}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Simple_OneD) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({3}), {100, 101, 102}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({5})); test::FillValues<float>(&expected, {100, 0, 102, 0, 101}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, HigherRank) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({2, 3}), {0, 4, 2, 1, 3, 6}); AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60}); TF_ASSERT_OK(RunOpKernel()); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_FLOAT, TensorShape({8})); test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0}); test::ExpectTensorEqual<float>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 99}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "indices[2] = 99 is not in [0, 5)")) << s; } TEST_F(ScatterSubOpTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({14}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 1, 99}); AddInputFromArray<float>(TensorShape({3}), {100, 101, 102}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "indices[2] = 99 is not in [0, 14)")) << s; } TEST_F(ScatterSubOpTest, StressIndexTest) { MakeOp(DT_INT32_REF, DT_INT32); const int kRows = 1; std::vector<int32> values(kRows, 0); const int kNumUpdates = 1000000; std::vector<int32> indices(kNumUpdates, 0); std::vector<int32> updates(kNumUpdates, 1); AddInputFromArray<int32>(TensorShape({kRows}), values); AddInputFromArray<int32>(TensorShape({kNumUpdates}), indices); AddInputFromArray<int32>(TensorShape({kNumUpdates}), updates); Status s = RunOpKernel(); Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int32>(&expected, {-1000000}); test::ExpectTensorEqual<int32>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Error_WrongDimsIndices) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({1, 3}), {0, 4, 99}); AddInputFromArray<float>(TensorShape({3, 3}), {100, 101, 102, 777, 778, 779, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains(s.ToString(), "Must have updates.shape = indices.shape + " "params.shape[1:] or updates.shape = [], got ")) << s; } TEST_F(ScatterUpdateOpTest, Error_MismatchedParamsAndUpdateDimensions) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2}); AddInputFromArray<float>( TensorShape({3, 4}), {100, 101, 102, 103, 777, 778, 779, 780, 10000, 10001, 10002, 10004}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains(s.ToString(), "Must have updates.shape = indices.shape + " "params.shape[1:] or updates.shape = [], got ")) << s; } TEST_F(ScatterUpdateOpTest, Error_MismatchedIndicesAndUpdateDimensions) { MakeOp(DT_FLOAT_REF, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2}); AddInputFromArray<float>(TensorShape({2, 3}), {100, 101, 102, 10000, 10001, 10002}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains(s.ToString(), "Must have updates.shape = indices.shape + " "params.shape[1:] or updates.shape = [], got ")) << s; } class ScatterUpdateBM : public ScatterUpdateOpTest { public: void TestBody() override {} void MakeBenchmarkOp(const char* op, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", op) .Input(FakeInput(DT_FLOAT_REF)) .Input(FakeInput(index_type)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_CHECK_OK(InitOp()); } }; template <typename Index> void BM_ScatterHelper(::testing::benchmark::State& state, int embedding_size, const char* op, bool big_num_updates = false) { const int kRows = 10000000 / embedding_size; std::vector<float> values; values.reserve(kRows); for (int i = 0; i < kRows * embedding_size; i++) { values.push_back(i); } const int kNumUpdates = big_num_updates ? 1000000 : 1000; random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); std::vector<Index> indices; std::vector<float> updates; for (int i = 0; i < kNumUpdates; i++) { indices.push_back(rnd.Uniform(kRows)); for (int j = 0; j < embedding_size; j++) { updates.push_back(i * 10 + j); } } ScatterUpdateBM bm; bm.MakeBenchmarkOp(op, DataTypeToEnum<Index>::v()); bm.AddInputFromArray<float>(TensorShape({kRows, embedding_size}), values); bm.AddInputFromArray<Index>(TensorShape({kNumUpdates}), indices); bm.AddInputFromArray<float>(TensorShape({kNumUpdates, embedding_size}), updates); for (auto i : state) { Status s = bm.RunOpKernel(); } state.SetItemsProcessed((static_cast<int64_t>(kNumUpdates) * embedding_size) * state.iterations()); } void BM_ScatterUpdateInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int32>(state, embedding_size, "ScatterUpdate"); } void BM_ScatterUpdateInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterUpdate"); } void BM_ScatterAddInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int32>(state, embedding_size, "ScatterAdd"); } void BM_ScatterAddInt32Large(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int32>(state, embedding_size, "ScatterAdd", true); } void BM_ScatterAddInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterAdd"); } void BM_ScatterMulInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int32>(state, embedding_size, "ScatterMul"); } void BM_ScatterMulInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterMul"); } void BM_ScatterDivInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int32>(state, embedding_size, "ScatterDiv"); } void BM_ScatterDivInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterDiv"); } void BM_ScatterMinInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int32>(state, embedding_size, "ScatterMin"); } void BM_ScatterMinInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterMin"); } void BM_ScatterMaxInt32(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int32>(state, embedding_size, "ScatterMax"); } void BM_ScatterMaxInt64(::testing::benchmark::State& state) { const int embedding_size = state.range(0); BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterMax"); } BENCHMARK(BM_ScatterUpdateInt32) ->Arg(1) ->Arg(10) ->Arg(32) ->Arg(50) ->Arg(64) ->Arg(80) ->Arg(96) ->Arg(112) ->Arg(192) ->Arg(256) ->Arg(1024) ->Arg(10000) ->Arg(100000) ->Arg(1000000); BENCHMARK(BM_ScatterUpdateInt64) ->Arg(1) ->Arg(10) ->Arg(64) ->Arg(256) ->Arg(1024) ->Arg(100000); BENCHMARK(BM_ScatterAddInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterAddInt32Large) ->Arg(1) ->Arg(10) ->Arg(64) ->Arg(256) ->Arg(1024); BENCHMARK(BM_ScatterAddInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterMulInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterMulInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterDivInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterDivInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterMinInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterMinInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterMaxInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); BENCHMARK(BM_ScatterMaxInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scatter_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scatter_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
70560aea-19e3-4db0-bb61-409c88fde8ba
cpp
tensorflow/tensorflow
quantized_pooling_ops
tensorflow/core/kernels/quantized_pooling_ops.cc
tensorflow/core/kernels/quantized_pooling_ops_test.cc
#define EIGEN_USE_THREADS #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/pooling_ops_common.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename Device, typename T> class QuantizedAvgPoolingOp : public OpKernel { public: explicit QuantizedAvgPoolingOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); PoolParameters params{context, ksize_, stride_, padding_, {}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } const Tensor& min_input_tensor = context->input(1); const Tensor& max_input_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_input_tensor.shape()), errors::InvalidArgument( "min_input shape must be rank 0 but is rank ", min_input_tensor.dims(), ", received shape: ", min_input_tensor.shape())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_input_tensor.shape()), errors::InvalidArgument( "max_input shape must be rank 0 but is rank ", max_input_tensor.dims(), ", received shape: ", max_input_tensor.shape())); const float min_input = context->input(1).scalar<float>()(); const float max_input = context->input(2).scalar<float>()(); OP_REQUIRES(context, params.depth_window == 1, errors::Unimplemented("Non-spatial pooling is not " "yet supported. Volunteers? :)")); OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional")); Tensor* output = nullptr; TensorShape params_forward_output_shape; OP_REQUIRES_OK(context, params.forward_output_shape(&params_forward_output_shape)); OP_REQUIRES_OK(context, context->allocate_output( 0, params_forward_output_shape, &output)); const int32_t highest = static_cast<int32>(Eigen::NumTraits<T>::highest()); const int32_t lowest = static_cast<int32>(Eigen::NumTraits<T>::lowest()); OP_REQUIRES_OK(context, params.forward_output_shape(&params_forward_output_shape)); Tensor int32_output(DT_INT32, params_forward_output_shape); Tensor int32_input(DT_INT32, tensor_in.shape()); int32_input.flat<int32>() = tensor_in.flat<T>().template cast<int32>(); SpatialAvgPool<Device, int32>(context, &int32_output, int32_input, params, padding_); output->flat<T>() = int32_output.flat<int32>() .cwiseMax(lowest) .cwiseMin(highest) .template cast<T>(); Tensor* output_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); output_min->flat<float>()(0) = min_input; Tensor* output_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_max->flat<float>()(0) = max_input; } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; }; template <typename Device, typename T> class QuantizedMaxPoolingOp : public MaxPoolingOp<Device, T> { public: explicit QuantizedMaxPoolingOp(OpKernelConstruction* context) : MaxPoolingOp<Device, T>(context) {} void Compute(OpKernelContext* context) override { const Tensor& min_input_tensor = context->input(1); const Tensor& max_input_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_input_tensor.shape()), errors::InvalidArgument( "min_input shape must be rank 0 but is rank ", min_input_tensor.dims(), ", received shape: ", min_input_tensor.shape())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_input_tensor.shape()), errors::InvalidArgument( "max_input shape must be rank 0 but is rank ", max_input_tensor.dims(), ", received shape: ", max_input_tensor.shape())); const float min_input = context->input(1).scalar<float>()(); const float max_input = context->input(2).scalar<float>()(); MaxPoolingOp<Device, T>::Compute(context); Tensor* output_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); output_min->flat<float>()(0) = min_input; Tensor* output_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_max->flat<float>()(0) = max_input; } }; REGISTER_KERNEL_BUILDER( Name("QuantizedAvgPool").Device(DEVICE_CPU).TypeConstraint<quint8>("T"), QuantizedAvgPoolingOp<CPUDevice, quint8>); REGISTER_KERNEL_BUILDER( Name("QuantizedMaxPool").Device(DEVICE_CPU).TypeConstraint<quint8>("T"), QuantizedMaxPoolingOp<CPUDevice, quint8>); #ifdef INTEL_MKL REGISTER_KERNEL_BUILDER( Name("QuantizedAvgPool").Device(DEVICE_CPU).TypeConstraint<qint8>("T"), QuantizedAvgPoolingOp<CPUDevice, qint8>); REGISTER_KERNEL_BUILDER( Name("QuantizedMaxPool").Device(DEVICE_CPU).TypeConstraint<qint8>("T"), QuantizedMaxPoolingOp<CPUDevice, qint8>); #endif }
#define EIGEN_USE_THREADS #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class QuantizedPoolingTest : public OpsTestBase { protected: }; TEST_F(QuantizedPoolingTest, SmallAveragePooling) { const int ksize = 2; const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_avg_pool_op", "QuantizedAvgPool") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("ksize", {1, ksize, ksize, 1}) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = 0.0f; const float input_max = 255.0f; const int input_height = 4; const int input_width = 4; const int input_channels = 2; Tensor input_float(DT_FLOAT, {1, input_height, input_width, input_channels}); test::FillValues<float>( &input_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); const int expected_width = input_width / stride; const int expected_height = input_height / stride; Tensor expected_float(DT_FLOAT, {1, expected_height, expected_width, input_channels}); test::FillValues<float>(&expected_float, {6, 7, 10, 11, 22, 23, 26, 27}); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {input_min}); AddInputFromArray<float>(TensorShape({}), {input_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.2); } TEST_F(QuantizedPoolingTest, SmallMaxPooling) { const int ksize = 2; const int stride = 2; TF_ASSERT_OK(NodeDefBuilder("quantized_max_pool_op", "QuantizedMaxPool") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("T", DataTypeToEnum<quint8>::v()) .Attr("ksize", {1, ksize, ksize, 1}) .Attr("strides", {1, stride, stride, 1}) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = 0.0f; const float input_max = 255.0f; const int input_height = 4; const int input_width = 4; const int input_channels = 2; Tensor input_float(DT_FLOAT, {1, input_height, input_width, input_channels}); test::FillValues<float>( &input_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); const int expected_width = input_width / stride; const int expected_height = input_height / stride; Tensor expected_float(DT_FLOAT, {1, expected_height, expected_width, input_channels}); test::FillValues<float>(&expected_float, {11, 12, 15, 16, 27, 28, 31, 32}); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {input_min}); AddInputFromArray<float>(TensorShape({}), {input_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.2); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_pooling_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_pooling_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2ab3cb3e-1670-4082-871b-20cdddce5f35
cpp
tensorflow/tensorflow
random_index_shuffle
tensorflow/core/kernels/random_index_shuffle.cc
tensorflow/core/kernels/random_index_shuffle_test.cc
#include "tensorflow/core/kernels/random_index_shuffle.h" #include <assert.h> #include <algorithm> #include <array> #include <bitset> #include <cmath> #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace random { constexpr int kMinBlockSize = 16; namespace impl { #define ROTL(x, r, W) (((x) << (r)) | (x >> (W - (r)))) #define ROTR(x, r, W) (((x) >> (r)) | ((x) << (W - (r)))) #define SIMON_F(x, W) ((ROTL(x, 1, W) & ROTL(x, 8, W) ^ ROTL(x, 2, W))) #define SIMON_Rx2(x, y, k1, k2, W) \ (y ^= SIMON_F(x, W), y ^= k1, x ^= SIMON_F(y, W), x ^= k2) template <int W> std::vector<std::bitset<W>> simon_key_schedule( const std::array<uint32_t, 3>& key, const int32_t rounds) { static_assert(W >= 8, "Minimum word size is 8 bits."); const auto c = std::bitset<W>(0xfffffffc); auto z = std::bitset<W>(0x7369f885192c0ef5LL); std::vector<std::bitset<W>> rk({key[0], key[1], key[2]}); rk.reserve(rounds); for (int i = 3; i < rounds; i++) { rk.push_back(c ^ (z & std::bitset<W>(1)) ^ rk[i - 3] ^ ROTR(rk[i - 1], 3, W) ^ ROTR(rk[i - 1], 4, W)); z >>= 1; } return rk; } template <int W> uint64_t simon_encrypt(const uint64_t value, const std::vector<std::bitset<W>>& round_keys) { static_assert(W >= 8, "Minimum word size is 8 bits."); std::bitset<W> left(value >> W); std::bitset<W> right(value); for (int i = 0; i < round_keys.size();) { SIMON_Rx2(right, left, round_keys[i++], round_keys[i++], W); } return (left.to_ullong() << W) | right.to_ullong(); } template <int B> uint64_t index_shuffle(const uint64_t index, const std::array<uint32_t, 3>& key, const uint64_t max_index, const int32_t rounds) { const auto round_keys = simon_key_schedule<B / 2>(key, rounds); uint64_t new_index = index; while (true) { new_index = simon_encrypt<B / 2>(new_index, round_keys); if (new_index <= max_index) { return new_index; } } } #undef ROTL #undef ROTR #undef SIMON_F #undef SIMON_RxC } uint64_t index_shuffle(const uint64_t index, const std::array<uint32_t, 3>& key, const uint64_t max_index, const int32_t rounds) { int block_size = static_cast<int>(std::ceil(std::log2(max_index))); block_size = std::max(block_size + block_size % 2, kMinBlockSize); assert(block_size > 0 && block_size % 2 == 0 && block_size <= 64); assert(rounds >= 4 && rounds % 2 == 0); #define HANDLE_BLOCK_SIZE(B) \ case B: \ return impl::index_shuffle<B>(index, key, max_index, rounds); switch (block_size) { HANDLE_BLOCK_SIZE(16); HANDLE_BLOCK_SIZE(18); HANDLE_BLOCK_SIZE(20); HANDLE_BLOCK_SIZE(22); HANDLE_BLOCK_SIZE(24); HANDLE_BLOCK_SIZE(26); HANDLE_BLOCK_SIZE(28); HANDLE_BLOCK_SIZE(30); HANDLE_BLOCK_SIZE(32); HANDLE_BLOCK_SIZE(34); HANDLE_BLOCK_SIZE(36); HANDLE_BLOCK_SIZE(38); HANDLE_BLOCK_SIZE(40); HANDLE_BLOCK_SIZE(42); HANDLE_BLOCK_SIZE(44); HANDLE_BLOCK_SIZE(46); HANDLE_BLOCK_SIZE(48); HANDLE_BLOCK_SIZE(50); HANDLE_BLOCK_SIZE(52); HANDLE_BLOCK_SIZE(54); HANDLE_BLOCK_SIZE(56); HANDLE_BLOCK_SIZE(58); HANDLE_BLOCK_SIZE(60); HANDLE_BLOCK_SIZE(62); default: return impl::index_shuffle<64>(index, key, max_index, rounds); } #undef HANDLE_BLOCK_SIZE } } }
#include "tensorflow/core/kernels/random_index_shuffle.h" #include <array> #include <vector> #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace random { namespace { class RandomIndexShuffleTest : public ::testing::TestWithParam<uint64_t> { public: uint64_t GetMaxValue() const { return GetParam(); } }; TEST_P(RandomIndexShuffleTest, Bijection) { const std::array<uint32, 3>& key = {42, 73, 1991}; const uint64_t max_value = GetMaxValue(); std::vector<bool> seen(max_value + 1, false); for (uint64_t value = 0; value <= max_value; ++value) { const uint64 output_value = index_shuffle(value, key, max_value, 4); EXPECT_GE(output_value, 0); EXPECT_LE(output_value, max_value); EXPECT_FALSE(seen[output_value]); seen[output_value] = true; } } INSTANTIATE_TEST_SUITE_P(MaxValueTests, RandomIndexShuffleTest, ::testing::Values(285, 17, 23495, 499'000)); } } } int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_index_shuffle.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_index_shuffle_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9cd2f506-a605-4a5f-9004-2be24156c63f
cpp
tensorflow/tensorflow
immutable_constant_op
tensorflow/core/kernels/immutable_constant_op.cc
tensorflow/core/kernels/immutable_constant_op_test.cc
#include "tensorflow/core/kernels/immutable_constant_op.h" #include <unordered_set> #include "tensorflow/core/framework/types.pb.h" namespace tensorflow { namespace { class MemmappedTensorAllocator : public Allocator { public: MemmappedTensorAllocator() {} Status InitializeFromRegion(const string& name, Env* env) { const auto status = env->NewReadOnlyMemoryRegionFromFile(name, &memory_region_); if (!status.ok()) { return status; } return absl::OkStatus(); } string Name() override { return "MemmappedTensorAllocator"; } void* AllocateRaw(size_t alignment, size_t num_bytes) override { if ((reinterpret_cast<intptr_t>(memory_region_->data())) % alignment != 0) { allocation_status_ = errors::Internal("Readonly memory region has wrong alignment"); return nullptr; } if (num_bytes > memory_region_->length()) { allocation_status_ = errors::Internal( "Readonly memory region has wrong length (", memory_region_->length(), ") when allocating ", num_bytes); return nullptr; } return const_cast<void*>(memory_region_->data()); } void DeallocateRaw(void* ptr) override { if (ptr != memory_region_->data()) { LOG(ERROR) << "Deallocating not allocated region for readonly memory region"; } if (delete_on_deallocate_) { delete this; } } const Status& allocation_status() const { return allocation_status_; } void set_delete_on_deallocate() { delete_on_deallocate_ = true; } bool AllocatesOpaqueHandle() const override { return true; } private: std::unique_ptr<ReadOnlyMemoryRegion> memory_region_; Status allocation_status_; bool delete_on_deallocate_ = false; MemmappedTensorAllocator(const MemmappedTensorAllocator&) = delete; void operator=(const MemmappedTensorAllocator&) = delete; }; } ImmutableConstantOp::ImmutableConstantOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr(kMemoryRegionNameAttr, &region_name_)); OP_REQUIRES_OK(context, context->GetAttr(kDTypeAttr, &dtype_)); OP_REQUIRES(context, dtype_ != DT_RESOURCE && dtype_ != DT_VARIANT, errors::InvalidArgument( "Resource and variant dtypes are invalid for this op.")); OP_REQUIRES_OK(context, context->GetAttr(kShapeAttr, &shape_)); } void ImmutableConstantOp::Compute(OpKernelContext* ctx) { std::unique_ptr<MemmappedTensorAllocator> allocator( new MemmappedTensorAllocator()); OP_REQUIRES_OK(ctx, allocator->InitializeFromRegion(region_name_, ctx->env())); OP_REQUIRES(ctx, dtype_ != DT_STRING, errors::Unimplemented("Sorry, DT_STRING is not currently " "supported for ImmutableConstOp.")); ctx->set_output(0, Tensor(allocator.get(), dtype_, shape_)); OP_REQUIRES_OK(ctx, allocator->allocation_status()); allocator.release()->set_delete_on_deallocate(); } ImmutableConstantOp::~ImmutableConstantOp() {} constexpr char const* ImmutableConstantOp::kDTypeAttr; constexpr char const* ImmutableConstantOp::kShapeAttr; constexpr char const* ImmutableConstantOp::kMemoryRegionNameAttr; REGISTER_KERNEL_BUILDER(Name("ImmutableConst").Device(DEVICE_CPU), ImmutableConstantOp); }
#include "tensorflow/core/kernels/immutable_constant_op.h" #include <algorithm> #include <tuple> #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/platform/null_file_system.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" namespace tensorflow { namespace { constexpr size_t kTestAlignment = 4096; constexpr size_t kTestTensorSize = 4; constexpr size_t kTestTensorSizeBytes = kTestTensorSize * sizeof(float); class TestReadOnlyMemoryRegion : public ReadOnlyMemoryRegion { public: TestReadOnlyMemoryRegion() = delete; explicit TestReadOnlyMemoryRegion(uint64 length) : memptr_(cpu_allocator()->AllocateRaw(kTestAlignment, length)), length_(length) {} ~TestReadOnlyMemoryRegion() override { cpu_allocator()->DeallocateRaw(memptr_); } const void* data() override { return memptr_; } float* GetWritableDataStart() { return reinterpret_cast<float*>(memptr_); } uint64 length() override { return length_; } protected: void* memptr_; uint64 length_; }; class TestFileSystem : public NullFileSystem { public: ~TestFileSystem() override = default; using NullFileSystem::NewReadOnlyMemoryRegionFromFile; Status NewReadOnlyMemoryRegionFromFile( const string& fname, TransactionToken* token, std::unique_ptr<ReadOnlyMemoryRegion>* result) override { float val = 0; StringPiece scheme, host, path; io::ParseURI(fname, &scheme, &host, &path); if (path == "/2") { val = 2.0f; } else if (path == "/3") { val = 3.0f; } else { val = 0.0f; } auto region = new TestReadOnlyMemoryRegion(kTestTensorSizeBytes); std::fill_n(region->GetWritableDataStart(), kTestTensorSize, val); result->reset(region); return absl::OkStatus(); } }; REGISTER_FILE_SYSTEM("test", TestFileSystem); struct ImmutableConstantOpTest {}; TEST(ImmutableConstantOpTest, Simple) { const TensorShape kTestTensorShape({4, 1}); const TensorShape kTestTensorShapeT({1, 4}); auto root = Scope::NewRootScope().ExitOnError(); auto node1 = ops::ImmutableConst(root, DT_FLOAT, kTestTensorShape, "test: auto node2 = ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test: auto result = ops::MatMul(root, node1, node2); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); SessionOptions session_options; session_options.env = Env::Default(); session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(OptimizerOptions::L0); std::unique_ptr<Session> session(NewSession(session_options)); ASSERT_TRUE(session != nullptr) << "Failed to create session"; TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs)); ASSERT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(kTestTensorSize - 1), 2.0f * 3.0f); } TEST(ImmutableConstantOpTest, ExecutionError) { const TensorShape kBadTensorShape({40, 100}); const TensorShape kTestTensorShapeT({1, 4}); auto root = Scope::DisabledShapeInferenceScope().ExitOnError(); auto node1 = ops::ImmutableConst(root, DT_FLOAT, kBadTensorShape, "test: auto node2 = ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test: auto result = ops::MatMul(root, node1, node2); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); SessionOptions session_options; session_options.env = Env::Default(); std::unique_ptr<Session> session(NewSession(session_options)); ASSERT_TRUE(session != nullptr) << "Failed to create session"; TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; std::vector<Tensor> outputs; EXPECT_EQ( session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(), error::INTERNAL); } Status CreateTempFileFloat(Env* env, float value, uint64 size, string* filename) { const string dir = testing::TmpDir(); *filename = io::JoinPath(dir, strings::StrCat("file_", value)); std::unique_ptr<WritableFile> file; TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file)); for (uint64 i = 0; i < size; ++i) { StringPiece sp(static_cast<char*>(static_cast<void*>(&value)), sizeof(value)); TF_RETURN_IF_ERROR(file->Append(sp)); } TF_RETURN_IF_ERROR(file->Close()); return absl::OkStatus(); } TEST(ImmutableConstantOpTest, FromFile) { const TensorShape kFileTensorShape({1000, 1}); Env* env = Env::Default(); auto root = Scope::NewRootScope().ExitOnError(); string two_file, three_file; TF_ASSERT_OK(CreateTempFileFloat(env, 2.0f, 1000, &two_file)); TF_ASSERT_OK(CreateTempFileFloat(env, 3.0f, 1000, &three_file)); auto node1 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, two_file); auto node2 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, three_file); auto result = ops::MatMul(root, node1, node2, ops::MatMul::TransposeB(true)); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); SessionOptions session_options; session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(OptimizerOptions::L0); std::unique_ptr<Session> session(NewSession(session_options)); ASSERT_TRUE(session != nullptr) << "Failed to create session"; TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs)); ASSERT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f); EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f); } Status CreateTempFileBadString(Env* env, char value, uint64 size, const string suffix, string* filename) { const string dir = testing::TmpDir(); *filename = io::JoinPath(dir, strings::StrCat("file_", suffix)); std::unique_ptr<WritableFile> file; TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file)); TF_RETURN_IF_ERROR(file->Append(std::string(size, value))); TF_RETURN_IF_ERROR(file->Close()); return absl::OkStatus(); } TEST(ImmutableConstantOpTest, FromFileStringUnimplmented) { const TensorShape kFileTensorShape({1}); Env* env = Env::Default(); auto root = Scope::NewRootScope().ExitOnError(); string bad_file; TF_ASSERT_OK(CreateTempFileBadString(env, '\xe2', 128, "bad_e2", &bad_file)); auto result = ops::ImmutableConst(root, DT_STRING, kFileTensorShape, bad_file); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); SessionOptions session_options; session_options.env = Env::Default(); std::unique_ptr<Session> session(NewSession(session_options)); ASSERT_TRUE(session != nullptr) << "Failed to create session"; TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph"; std::vector<Tensor> outputs; EXPECT_EQ( session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(), error::UNIMPLEMENTED); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/immutable_constant_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/immutable_constant_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e4d4aaa4-ff5f-4132-8cba-f11290ec6f6f
cpp
tensorflow/tensorflow
quantized_resize_bilinear_op
tensorflow/core/kernels/quantized_resize_bilinear_op.cc
tensorflow/core/kernels/quantized_resize_bilinear_op_test.cc
#define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #define QUANTIZED_RESIZE_BILINEAR_USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/image_resizer_state.h" namespace tensorflow { static constexpr bool USE_REFERENCE = false; namespace { template <typename T_SCALE> struct InterpolationCache { std::vector<int64_t> lower; std::vector<int64_t> upper; std::vector<float> lerp; std::vector<T_SCALE> ilerp; }; template <typename T_SCALE, typename Scaler> inline void ComputeInterpolationWeights( const int64_t out_size, const int64_t in_size, const float scale, const int resolution, InterpolationCache<T_SCALE>* interpolation) { const Scaler scaler; interpolation->lower.resize(out_size + 1); interpolation->upper.resize(out_size + 1); interpolation->lerp.resize(out_size + 1); interpolation->ilerp.resize(out_size + 1); interpolation->lower[out_size] = 0; interpolation->upper[out_size] = 0; for (int64_t i = out_size - 1; i >= 0; --i) { const float in = scaler(i, scale); const float in_f = std::floor(in); interpolation->lower[i] = std::max(static_cast<int64_t>(in_f), static_cast<int64_t>(0)); interpolation->upper[i] = std::min(static_cast<int64_t>(std::ceil(in)), in_size - 1); interpolation->lower[i] = std::min(interpolation->lower[i], interpolation->upper[i]); interpolation->lerp[i] = in - in_f; interpolation->ilerp[i] = static_cast<T_SCALE>((in - in_f) * (1 << resolution)); } } template <typename T_SCALE> inline InterpolationCache<T_SCALE> BuildLerpCache( const int64_t out_size, const int64_t in_size, const float scale, const int index_step, const int resolution, const bool half_pixel_centers) { InterpolationCache<T_SCALE> cache; if (half_pixel_centers) { ComputeInterpolationWeights<T_SCALE, HalfPixelScaler>( out_size, in_size, scale, resolution, &cache); } else { ComputeInterpolationWeights<T_SCALE, LegacyScaler>(out_size, in_size, scale, resolution, &cache); } CHECK(index_step > 0); if (index_step > 1) { for (int i = 0; i < cache.lower.size(); ++i) { cache.lower[i] *= index_step; cache.upper[i] *= index_step; } } return cache; } template <typename T> inline T ComputeLerpReference(const T in_top_left, const T in_top_right, const T in_bottom_left, const T in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top_left = QuantizedToFloat<T>(in_top_left, min, max); const float top_right = QuantizedToFloat<T>(in_top_right, min, max); const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max); const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; const float out = top + (bottom - top) * y_lerp; return FloatToQuantized<T>(out, min, max); } template <typename T, typename T_SCALE, typename T_CALC> inline T_CALC MulOffset(T a, T b, T_SCALE c) { return (static_cast<T_CALC>(a) - static_cast<T_CALC>(b)) * static_cast<T_CALC>(c); } template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline T ComputeLerp(const T top_left, const T top_right, const T bottom_left, const T bottom_right, const T_SCALE x_lerp, const T_SCALE y_lerp) { constexpr T_CALC RESOLUTION_MULT = (1 << RESOLUTION); const T_CALC top = static_cast<T_CALC>(top_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(top_right, top_left, x_lerp); const T_CALC bottom = static_cast<T_CALC>(bottom_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(bottom_right, bottom_left, x_lerp); const T_CALC out = top + (bottom - top) / RESOLUTION_MULT * y_lerp; return static_cast<T>( static_cast<int32>((out + RESOLUTION_MULT / 2) / RESOLUTION_MULT)); } #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON inline uint8x8_t ToUint8x8(const quint8* v0, const quint8* v1, const quint8* v2, const quint8* v3, const quint8* v4, const quint8* v5, const quint8* v6, const quint8* v7) { static const uint8x8_t ZERO_8x8 = vmov_n_u8(0); uint8x8_t ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v0), ZERO_8x8, 0); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v1), ret, 1); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v2), ret, 2); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v3), ret, 3); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v4), ret, 4); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v5), ret, 5); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v6), ret, 6); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v7), ret, 7); return ret; } inline int16x8_t ToInt16x8(const int16* v0, const int16* v1, const int16* v2, const int16* v3, const int16* v4, const int16* v5, const int16* v6, const int16* v7) { static const int16x8_t ZERO_16x8 = vmovq_n_s16(0); int16x8_t ret = vld1q_lane_s16(v0, ZERO_16x8, 0); ret = vld1q_lane_s16(v1, ret, 1); ret = vld1q_lane_s16(v2, ret, 2); ret = vld1q_lane_s16(v3, ret, 3); ret = vld1q_lane_s16(v4, ret, 4); ret = vld1q_lane_s16(v5, ret, 5); ret = vld1q_lane_s16(v6, ret, 6); ret = vld1q_lane_s16(v7, ret, 7); return ret; } inline int32x2_t ToInt32x2(const qint32* v0, const qint32* v1) { static const int32x2_t ZERO_32x2 = vmov_n_s32(0); const int32x2_t ret0 = vld1_lane_s32(reinterpret_cast<const int32*>(v0), ZERO_32x2, 0); const int32x2_t ret1 = vld1_lane_s32(reinterpret_cast<const int32*>(v1), ret0, 1); return ret1; } template <int RESOLUTION, bool X_LERP_SAME> inline int32x2_t ComputeLerpx2( const qint32* top_left0, const qint32* top_right0, const qint32* bottom_left0, const qint32* bottom_right0, const qint32* top_left1, const qint32* top_right1, const qint32* bottom_left1, const qint32* bottom_right1, const int32* x_lerp, const int32x2_t y_lerpsx) { const int32x2_t x_lerpsx = X_LERP_SAME ? vld1_dup_s32(reinterpret_cast<const int32*>(x_lerp)) : vld1_s32(reinterpret_cast<const int32*>(x_lerp)); const int32x2_t top_leftsx = ToInt32x2(top_left0, top_left1); const int32x2_t top_rightsx = ToInt32x2(top_right0, top_right1); const int32x2_t bottom_leftsx = ToInt32x2(bottom_left0, bottom_left1); const int32x2_t bottom_rightsx = ToInt32x2(bottom_right0, bottom_right1); const int32x2_t retval = ComputeLerp32x2<RESOLUTION>(top_leftsx, top_rightsx, bottom_leftsx, bottom_rightsx, x_lerpsx, y_lerpsx); return retval; } template <int RESOLUTION> inline uint8x8_t ComputeLerpx8( const quint8* tl0, const quint8* tr0, const quint8* bl0, const quint8* br0, const int16* xlp0, const quint8* tl1, const quint8* tr1, const quint8* bl1, const quint8* br1, const int16* xlp1, const quint8* tl2, const quint8* tr2, const quint8* bl2, const quint8* br2, const int16* xlp2, const quint8* tl3, const quint8* tr3, const quint8* bl3, const quint8* br3, const int16* xlp3, const quint8* tl4, const quint8* tr4, const quint8* bl4, const quint8* br4, const int16* xlp4, const quint8* tl5, const quint8* tr5, const quint8* bl5, const quint8* br5, const int16* xlp5, const quint8* tl6, const quint8* tr6, const quint8* bl6, const quint8* br6, const int16* xlp6, const quint8* tl7, const quint8* tr7, const quint8* bl7, const quint8* br7, const int16* xlp7, const int16x8_t ys_lerpsx) { const uint8x8_t tl8x8 = ToUint8x8(tl0, tl1, tl2, tl3, tl4, tl5, tl6, tl7); const uint8x8_t tr8x8 = ToUint8x8(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7); const uint8x8_t bl8x8 = ToUint8x8(bl0, bl1, bl2, bl3, bl4, bl5, bl6, bl7); const uint8x8_t br8x8 = ToUint8x8(br0, br1, br2, br3, br4, br5, br6, br7); const int16x8_t xs_lerpsx = ToInt16x8(xlp0, xlp1, xlp2, xlp3, xlp4, xlp5, xlp6, xlp7); return ComputeLerp8x8<RESOLUTION>(tl8x8, tr8x8, bl8x8, br8x8, xs_lerpsx, ys_lerpsx); } template <int RESOLUTION, int ID0, int CH0, int ID1, int CH1, int ID2, int CH2, int ID3, int CH3, int ID4, int CH4, int ID5, int CH5, int ID6, int CH6, int ID7, int CH7> inline uint8x8_t ComputeLerpx8Tmpl(const quint8* const yl, const quint8* yu, const int64* xl, const int64* xu, const int16* xlp, const int16x8_t ys_lerpsx) { return ComputeLerpx8<RESOLUTION>( yl + xl[ID0] + CH0, yl + xu[ID0] + CH0, yu + xl[ID0] + CH0, yu + xu[ID0] + CH0, xlp + ID0, yl + xl[ID1] + CH1, yl + xu[ID1] + CH1, yu + xl[ID1] + CH1, yu + xu[ID1] + CH1, xlp + ID1, yl + xl[ID2] + CH2, yl + xu[ID2] + CH2, yu + xl[ID2] + CH2, yu + xu[ID2] + CH2, xlp + ID2, yl + xl[ID3] + CH3, yl + xu[ID3] + CH3, yu + xl[ID3] + CH3, yu + xu[ID3] + CH3, xlp + ID3, yl + xl[ID4] + CH4, yl + xu[ID4] + CH4, yu + xl[ID4] + CH4, yu + xu[ID4] + CH4, xlp + ID4, yl + xl[ID5] + CH5, yl + xu[ID5] + CH5, yu + xl[ID5] + CH5, yu + xu[ID5] + CH5, xlp + ID5, yl + xl[ID6] + CH6, yl + xu[ID6] + CH6, yu + xl[ID6] + CH6, yu + xu[ID6] + CH6, xlp + ID6, yl + xl[ID7] + CH7, yl + xu[ID7] + CH7, yu + xl[ID7] + CH7, yu + xu[ID7] + CH7, xlp + ID7, ys_lerpsx); } #endif template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline void OutputLerpForChannels(const InterpolationCache<T_SCALE>& xs, const int64_t x, const T_SCALE ys_ilerp, const int channels, const float min, const float max, const T* ys_input_lower_ptr, const T* ys_input_upper_ptr, T* output_y_ptr) { const int64_t xs_lower = xs.lower[x]; const int64_t xs_upper = xs.upper[x]; const T_SCALE xs_ilerp = xs.ilerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerp<RESOLUTION, T, T_SCALE, T_CALC>( top_left, top_right, bottom_left, bottom_right, xs_ilerp, ys_ilerp); output_y_ptr[x * channels + c] = val; } } template <int RES> inline void OutputLerp8x8x1(const InterpolationCache<int16>& xs, const int64_t x_start, const int16_t ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0x7 = ComputeLerpx8Tmpl<RES, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start), x0x7); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RES> inline void OutputLerp8x8x3(const InterpolationCache<int16>& xs, const int64_t x_start, const int16_t ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0c0x2c1 = ComputeLerpx8Tmpl<RES, 0, 0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 2, 2, 0, 2, 1>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3), x0c0x2c1); const uint8x8_t x2c2x5c0 = ComputeLerpx8Tmpl<RES, 2, 2, 3, 0, 3, 1, 3, 2, 4, 0, 4, 1, 4, 2, 5, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 8), x2c2x5c0); const uint8x8_t x5c1x7c2 = ComputeLerpx8Tmpl<RES, 5, 1, 5, 2, 6, 0, 6, 1, 6, 2, 7, 0, 7, 1, 7, 2>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 16), x5c1x7c2); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x1(const InterpolationCache<int32>& xs, const int64_t x_start, const int32_t ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0x1 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1x2 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x4_t x0x1x2x3 = vcombine_s32(x0x1, x1x2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start), x0x1x2x3); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64_t>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x3(const InterpolationCache<int32>& xs, const int64_t x_start, const int32_t ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int32* const xs_ilerp1 = &xs.ilerp[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32* const xs_ilerp3 = &xs.ilerp[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0c0x0c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower0 + 1, ys_input_lower_ptr + xs_upper0 + 1, ys_input_upper_ptr + xs_lower0 + 1, ys_input_upper_ptr + xs_upper0 + 1, xs_ilerp0, y_lerpsx); const int32x2_t x0c2x1c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0 + 2, ys_input_lower_ptr + xs_upper0 + 2, ys_input_upper_ptr + xs_lower0 + 2, ys_input_upper_ptr + xs_upper0 + 2, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1c1x1c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower1 + 1, ys_input_lower_ptr + xs_upper1 + 1, ys_input_upper_ptr + xs_lower1 + 1, ys_input_upper_ptr + xs_upper1 + 1, ys_input_lower_ptr + xs_lower1 + 2, ys_input_lower_ptr + xs_upper1 + 2, ys_input_upper_ptr + xs_lower1 + 2, ys_input_upper_ptr + xs_upper1 + 2, xs_ilerp1, y_lerpsx); const int32x2_t x2c0x2c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower2 + 1, ys_input_lower_ptr + xs_upper2 + 1, ys_input_upper_ptr + xs_lower2 + 1, ys_input_upper_ptr + xs_upper2 + 1, xs_ilerp2, y_lerpsx); const int32x2_t x2c2x3c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2 + 2, ys_input_lower_ptr + xs_upper2 + 2, ys_input_upper_ptr + xs_lower2 + 2, ys_input_upper_ptr + xs_upper2 + 2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x2_t x3c1x3c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower3 + 1, ys_input_lower_ptr + xs_upper3 + 1, ys_input_upper_ptr + xs_lower3 + 1, ys_input_upper_ptr + xs_upper3 + 1, ys_input_lower_ptr + xs_lower3 + 2, ys_input_lower_ptr + xs_upper3 + 2, ys_input_upper_ptr + xs_lower3 + 2, ys_input_upper_ptr + xs_upper3 + 2, xs_ilerp3, y_lerpsx); const int32x4_t x0c0x0c1x0c2x1c0 = vcombine_s32(x0c0x0c1, x0c2x1c0); const int32x4_t x1c1x1c2x2c0x2c1 = vcombine_s32(x1c1x1c2, x2c0x2c1); const int32x4_t x2c2x3c0x3c1x3c2 = vcombine_s32(x2c2x3c0, x3c1x3c2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3), x0c0x0c1x0c2x1c0); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 4), x1c1x1c2x2c0x2c1); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 8), x2c2x3c0x3c1x3c2); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64_t>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <typename T> void ResizeImageReference(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const InterpolationCache<float> xs = BuildLerpCache<float>( out_width, in_width, width_scale, channels, 0, half_pixel_centers); const InterpolationCache<float> ys = BuildLerpCache<float>( out_height, in_height, height_scale, 1, 0, half_pixel_centers); const int64_t in_row_size = in_width * channels; const int64_t in_batch_num_values = in_height * in_row_size; const int64_t out_row_size = out_width * channels; const T* input_b_ptr = images.data(); T* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64_t y = 0; y < out_height; ++y) { const T* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const T* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const float ys_lerp = ys.lerp[y]; for (int64_t x = 0; x < out_width; ++x) { const int64_t xs_lower = xs.lower[x]; const int64_t xs_upper = xs.upper[x]; const float xs_lerp = xs.lerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerpReference<T>( top_left, top_right, bottom_left, bottom_right, xs_lerp, ys_lerp, in_min, in_max); output_y_ptr[x * channels + c] = val; } } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeImage(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } template <> void ResizeImage<qint32>(typename TTypes<qint32, 4>::ConstTensor images, const int batch_size, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<qint32, 4>::Tensor* output) { constexpr int RESOLUTION = 30; constexpr int SIMD_STEP = 4; CHECK_NOTNULL(output); const InterpolationCache<int32> xs = BuildLerpCache<int32>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int32> ys = BuildLerpCache<int32>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64_t in_row_size = in_width * channels; const int64_t in_batch_num_values = in_height * in_row_size; const int64_t out_row_size = out_width * channels; const qint32* input_b_ptr = images.data(); qint32* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64_t y = 0; y < out_height; ++y) { const qint32* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const qint32* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32_t ys_ilerp = ys.ilerp[y]; int64_t x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64_t>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <> void ResizeImage<quint8>(typename TTypes<quint8, 4>::ConstTensor images, const int batch_size, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<quint8, 4>::Tensor* output) { constexpr int RESOLUTION = 7; constexpr int SIMD_STEP = 8; CHECK_NOTNULL(output); const InterpolationCache<int16> xs = BuildLerpCache<int16>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int16> ys = BuildLerpCache<int16>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64_t in_row_size = in_width * channels; const int64_t in_batch_num_values = in_height * in_row_size; const int64_t out_row_size = out_width * channels; const quint8* input_b_ptr = images.data(); quint8* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64_t y = 0; y < out_height; ++y) { const quint8* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const quint8* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32_t ys_ilerp = ys.ilerp[y]; int64_t x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, quint8, int16, int16>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeBilinear(const typename TTypes<T, 4>::ConstTensor& images, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const int batch_size = images.dimension(0); const int64_t in_height = images.dimension(1); const int64_t in_width = images.dimension(2); const int channels = images.dimension(3); const int64_t out_height = output->dimension(1); const int64_t out_width = output->dimension(2); if (out_height == in_height && out_width == in_width) { *output = images.template cast<T>(); return; } if (USE_REFERENCE) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } else { ResizeImage<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } } } template <class T> class QuantizedResizeBilinearOp : public OpKernel { public: explicit QuantizedResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_)); OP_REQUIRES_OK( context, context->GetAttr("half_pixel_centers", &half_pixel_centers_)); } void Compute(OpKernelContext* context) override { const auto& in_min_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()), errors::InvalidArgument("min must be a scalar")); const float in_min = in_min_tensor.flat<float>()(0); const auto& in_max_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()), errors::InvalidArgument("max must be a scalar")); const float in_max = in_max_tensor.flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; } private: bool align_corners_; bool half_pixel_centers_; QuantizedResizeBilinearOp<T>(const QuantizedResizeBilinearOp<T>&) = delete; void operator=(const QuantizedResizeBilinearOp<T>&) = delete; }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedResizeBilinear") \ .Device(DEVICE_CPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ QuantizedResizeBilinearOp<type>) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); REGISTER_CPU_KERNEL(float); }
#define EIGEN_USE_THREADS #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/core/common_runtime/gradients.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { constexpr const float RESIZE_VAL_TOLERANCE = 1.0e-8; template <typename T> Tensor BuildTensor(const int batch_size, const int height, const int width, const int channels, const float ratio, const float min, const float max) { Tensor tensor(DataTypeToEnum<T>::value, TensorShape({batch_size, height, width, channels})); for (int64_t i = 0; i < tensor.NumElements(); ++i) { tensor.flat<T>()(i) = FloatToQuantized<T>(static_cast<float>(i) / ratio, min, max); } return tensor; } template <> Tensor BuildTensor<float>(const int batch_size, const int height, const int width, const int channels, const float ratio, const float min, const float max) { Tensor tensor(DT_FLOAT, TensorShape({batch_size, height, width, channels})); for (int64_t i = 0; i < tensor.NumElements(); ++i) { tensor.flat<float>()(i) = static_cast<float>(i) / ratio; } return tensor; } float CalculateResizeScale(int64_t in_size, int64_t out_size, bool align_corners) { return (align_corners && out_size > 1) ? (in_size - 1) / static_cast<float>(out_size - 1) : in_size / static_cast<float>(out_size); } inline std::tuple<int64_t, int64_t, float> GetReferenceWeight( const bool half_pixel_centers, const int64_t out_size, const int64_t in_size, const int step, const int index, const float scale) { const float in = half_pixel_centers ? (static_cast<float>(index) + 0.5f) * scale - 0.5f : index * scale; const float in_f = std::floor(in); const int64_t lower = std::max(static_cast<int64_t>(in_f), static_cast<int64_t>(0)); const int64_t upper = std::min(static_cast<int64_t>(std::ceil(in)), in_size - 1); return std::make_tuple(lower * step, upper * step, in - in_f); } template <typename T> T ComputeLerpReference(const T in_top_left, const T in_top_right, const T in_bottom_left, const T in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top_left = QuantizedToFloat<T>(in_top_left, min, max); const float top_right = QuantizedToFloat<T>(in_top_right, min, max); const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max); const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; const float out = top + (bottom - top) * y_lerp; return FloatToQuantized<T>(out, min, max); } template <> float ComputeLerpReference<float>(const float in_top_left, const float in_top_right, const float in_bottom_left, const float in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top = in_top_left + (in_top_right - in_top_left) * x_lerp; const float bottom = in_bottom_left + (in_bottom_right - in_bottom_left) * x_lerp; return top + (bottom - top) * y_lerp; } template <typename T> T CalcReferenceResizedVal(const T* image_data, const bool half_pixel_centers, const int batch_size, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int channels, const float height_scale, const float width_scale, const float min, const float max, const int b, const int64_t x, const int64_t y, const int c) { const std::tuple<int64_t, int64_t, float> x_weight = GetReferenceWeight( half_pixel_centers, out_width, in_width, channels, x, width_scale); const std::tuple<int64_t, int64_t, float> y_weight = GetReferenceWeight( half_pixel_centers, out_height, in_height, 1, y, height_scale); const int64_t in_row_size = in_width * channels; const int64_t in_batch_num_values = in_height * in_row_size; const int y_lower_index = b * in_batch_num_values + std::get<0>(y_weight) * in_row_size; const int y_upper_index = b * in_batch_num_values + std::get<1>(y_weight) * in_row_size; const int64_t xs_lower = std::get<0>(x_weight); const int64_t xs_upper = std::get<1>(x_weight); const float xs_lerp = std::get<2>(x_weight); const float ys_lerp = std::get<2>(y_weight); const float top_left = image_data[y_lower_index + xs_lower + c]; const float top_right = image_data[y_lower_index + xs_upper + c]; const float bottom_left = image_data[y_upper_index + xs_lower + c]; const float bottom_right = image_data[y_upper_index + xs_upper + c]; const float val = ComputeLerpReference<T>(top_left, top_right, bottom_left, bottom_right, xs_lerp, ys_lerp, min, max); return val; } template <typename T> void CheckTensorValue(const T* in_data, const T* out_data, const int batch_size, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int channels, const bool align_corners, const bool half_pixel_centers, const float min, const float max, const float tolerance, const bool relative) { const int64_t out_row_size = out_width * channels; const float height_scale = CalculateResizeScale(in_height, out_height, align_corners); const float width_scale = CalculateResizeScale(in_width, out_width, align_corners); for (int b = 0; b < batch_size; ++b) { for (int64_t y = 0; y < out_height; ++y) { for (int64_t x = 0; x < out_width; ++x) { for (int c = 0; c < channels; ++c) { const T ref_qval = CalcReferenceResizedVal<T>( in_data, half_pixel_centers, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, min, max, b, x, y, c); const T qval = out_data[(b * out_height + y) * out_row_size + x * channels + c]; const float ref_val = QuantizedToFloat<T>(ref_qval, min, max); const float val = QuantizedToFloat<T>(qval, min, max); if (!relative) { const int q_tolerance = std::round(tolerance); EXPECT_TRUE(std::abs(static_cast<int32>(ref_qval) - static_cast<int32>(qval)) <= q_tolerance) << "ref = " << ref_val << ", val = " << val << ", " << b << ", " << y << ", " << x << ", " << c << ", qval = " << qval << ", ref qval = " << ref_qval << ", " << q_tolerance; } else { const float rel_tolerance = std::max(ref_val, 1.0f) * tolerance; EXPECT_NEAR(ref_val, val, rel_tolerance) << "ref = " << ref_val << ", val = " << val << ", " << b << ", " << y << ", " << x << ", " << c << ", ref qval = " << qval; } } } } } } void TestResizeBilinear(const Tensor& image_tensor, const DataType dt, const Input::Initializer& new_size, const bool show_time, const int64_t iterations, const float min, const float max, const bool half_pixel_centers, std::vector<Tensor>* outputs) { Scope root = Scope::NewRootScope(); Output placeholder = ops::Placeholder(root.WithOpName("placeholder"), dt); Output size = ops::Const<int32>(root.WithOpName("size"), new_size); Output in_min = ops::Const<float>(root.WithOpName("min"), min); Output in_max = ops::Const<float>(root.WithOpName("max"), max); ops::QuantizedResizeBilinear qrb = ops::QuantizedResizeBilinear( root.WithOpName("qrb"), placeholder, size, in_min, in_max, ops::QuantizedResizeBilinear::HalfPixelCenters(half_pixel_centers)); TF_EXPECT_OK(root.status()); ClientSession session(root); int64_t total_duration = 0; outputs->clear(); for (int i = 0; i < iterations; ++i) { const int64_t start_time = Env::Default()->NowMicros(); TF_EXPECT_OK(session.Run({{placeholder, image_tensor}}, {qrb.resized_images, qrb.out_min, qrb.out_max}, outputs)); const int64_t end_time = Env::Default()->NowMicros(); total_duration += end_time - start_time; } const int64_t one_run_duration = total_duration / iterations; const int64_t num_ops = outputs->at(0).NumElements(); const double million_ops_per_second = (iterations * num_ops) / static_cast<double>(total_duration); if (show_time) { LOG(INFO) << "Time resize bilinear: " << TensorShape(image_tensor.shape()).DebugString() << ": iterations=" << iterations << ", MOps/s=" << million_ops_per_second << ", one_run_duration=" << one_run_duration << ", total_duration=" << total_duration; } } } void TestResizeBilinearOneDim() { constexpr float TOLERANCE = 1.0e-5; constexpr int IN_WIDTH = 128; constexpr int OUT_WIDTH = 256; constexpr float MIN = 0.0f; constexpr float MAX = 256.0f; constexpr float SCALE = static_cast<float>(IN_WIDTH) / OUT_WIDTH; Tensor image_quantized_tensor(DT_QINT32, TensorShape({1, 1, IN_WIDTH, 1})); for (int64_t i = 0; i < image_quantized_tensor.NumElements(); ++i) { image_quantized_tensor.flat<qint32>()(i) = FloatToQuantized<qint32>(static_cast<float>(i), MIN, MAX); } std::vector<Tensor> outputs; TestResizeBilinear(image_quantized_tensor, DT_QINT32, {1, OUT_WIDTH}, false, 1, MIN, MAX, false, &outputs); ASSERT_EQ(3, outputs.size()); ASSERT_EQ(OUT_WIDTH, outputs.at(0).NumElements()); ASSERT_EQ(4, outputs.at(0).shape().dims()); ASSERT_EQ(OUT_WIDTH, outputs.at(0).shape().dim_size(2)); for (int64_t i = 0; i < outputs.at(0).NumElements(); ++i) { const float resized_image_val = QuantizedToFloat<qint32>(outputs.at(0).flat<qint32>()(i), MIN, MAX); float expected_val = 0.0f; if (i == 0 || i == outputs.at(0).NumElements() - 1 || i % 2 == 0) { expected_val = QuantizedToFloat<qint32>( image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX); } else { const float image_val0 = QuantizedToFloat<qint32>( image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX); const float image_val1 = QuantizedToFloat<qint32>( image_quantized_tensor.flat<qint32>()(i / 2 + 1), MIN, MAX); expected_val = (image_val0 + image_val1) * SCALE; } VLOG(1) << "(" << i << ") " << expected_val << ", " << resized_image_val; EXPECT_NEAR(expected_val, resized_image_val, RESIZE_VAL_TOLERANCE) << expected_val << ", " << resized_image_val; } CheckTensorValue<qint32>(image_quantized_tensor.flat<qint32>().data(), outputs.at(0).flat<qint32>().data(), 1, IN_WIDTH, 1, OUT_WIDTH, 1, 1, false, false, MIN, MAX, TOLERANCE, true); } template <typename T> void RunTestResizeBilinearTwoDims(int batch_size, int in_height, int in_width, int out_height, int out_width, int channels, float tolerance, bool relative, const bool half_pixel_centers) { constexpr float RATIO = 100.0f; const float min = 0.0f; const float max = batch_size * in_height * in_width * channels / RATIO; const Tensor image_quantized_tensor = BuildTensor<T>( batch_size, in_height, in_width, channels, RATIO, min, max); std::vector<Tensor> outputs; TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value, {out_height, out_width}, false, 1, min, max, half_pixel_centers, &outputs); CheckTensorValue<T>( image_quantized_tensor.flat<T>().data(), outputs.at(0).flat<T>().data(), batch_size, in_height, in_width, out_height, out_width, channels, false, half_pixel_centers, min, max, tolerance, relative); } template <typename T> void RunBenchmarkResizeBilinearTwoDims(int batch_size, int in_height, int in_width, int out_height, int out_width, int channels, int iteration, const bool half_pixel_centers) { constexpr float RATIO = 100.0f; const float min = 0.0f; const float max = batch_size * in_height * in_width * channels / RATIO; const Tensor image_quantized_tensor = BuildTensor<T>( batch_size, in_height, in_width, channels, RATIO, min, max); std::vector<Tensor> outputs; TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value, {out_height, out_width}, true, iteration, min, max, false, &outputs); } template <typename T> void TestResizeBilinearTwoDimsType(const float tolerance, const bool relative, const bool half_pixel_centers) { RunTestResizeBilinearTwoDims<T>(1, 1, 1, 1, 1, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 1, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 2, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 16, 1, 32, 3, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 3, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, tolerance, relative, half_pixel_centers); RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 3, tolerance, relative, half_pixel_centers); } void TestResizeBilinearTwoDims() { for (const bool half_pixel_centers : {false, true}) { TestResizeBilinearTwoDimsType<quint8>(1.0f, false, half_pixel_centers); TestResizeBilinearTwoDimsType<qint32>(1.0e-5, true, half_pixel_centers); TestResizeBilinearTwoDimsType<float>(1.0e-5, true, half_pixel_centers); } } template <typename T> void RunBenchmarkResizeBilinearTwoDimsType() { constexpr int ITER = 100; RunBenchmarkResizeBilinearTwoDims<T>(1, 1, 1, 2, 2, 1, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 64, 64, 128, 128, 2, ITER, false); RunBenchmarkResizeBilinearTwoDims<T>(1, 32, 32, 64, 64, 16, ITER, false); } void RunBenchmarkResizeBilinearTwoDims() { LOG(INFO) << "Benchmark quint8"; RunBenchmarkResizeBilinearTwoDimsType<quint8>(); LOG(INFO) << "Benchmark qint32"; RunBenchmarkResizeBilinearTwoDimsType<qint32>(); LOG(INFO) << "Benchmark float"; RunBenchmarkResizeBilinearTwoDimsType<float>(); } } #define RUN_TEST(t) \ TEST(QuantizationResizeBilinearTest, t) { tensorflow::t(); } RUN_TEST(TestResizeBilinearOneDim); RUN_TEST(TestResizeBilinearTwoDims); #if defined(__ANDROID__) RUN_TEST(RunBenchmarkResizeBilinearTwoDims); #endif int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_resize_bilinear_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_resize_bilinear_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6e0a30ac-5324-4c52-88ed-dcda49ff08f4
cpp
tensorflow/tensorflow
collective_ops
tensorflow/core/ops/collective_ops.cc
third_party/xla/xla/tests/collective_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { REGISTER_OP("CollectiveReduce") .Input("input: T") .Output("data: T") .Attr("T: {bfloat16, float, float16, float64, int32, int64}") .Attr("group_size: int") .Attr("group_key: int") .Attr("instance_key: int") .Attr("merge_op: {'Min', 'Max', 'Mul', 'Add'}") .Attr("final_op: {'Id', 'Div'}") .Attr("subdiv_offsets: list(int)") .Attr("wait_for: list(int) = []") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("CollectiveGather") .Input("input: T") .Output("data: T") .Attr("T: {float, float16, float64, int32, int64}") .Attr("group_size: int") .Attr("group_key: int") .Attr("instance_key: int") .Attr("shape: shape") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn([](shape_inference::InferenceContext* c) { shape_inference::ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused)); shape_inference::ShapeHandle in_subshape; TF_RETURN_IF_ERROR(c->Subshape(c->input(0), 1, &in_subshape)); auto input_first_dim_value = c->Value(c->Dim(c->input(0), 0)); shape_inference::ShapeHandle output_first_dim_as_shape; if (input_first_dim_value == shape_inference::InferenceContext::kUnknownDim) { output_first_dim_as_shape = c->Vector(shape_inference::InferenceContext::kUnknownDim); } else { int group_size; TF_CHECK_OK(c->GetAttr("group_size", &group_size)); std::vector<shape_inference::DimensionHandle> output_first_dim; output_first_dim.push_back( c->MakeDim(group_size * input_first_dim_value)); output_first_dim_as_shape = c->MakeShape(output_first_dim); } shape_inference::ShapeHandle out; TF_RETURN_IF_ERROR( c->Concatenate(output_first_dim_as_shape, in_subshape, &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("CollectiveBcastSend") .Input("input: T") .Output("data: T") .Attr("T: {bool, float, float16, float64, int32, int64}") .Attr("group_size: int") .Attr("group_key: int") .Attr("instance_key: int") .Attr("shape: shape") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::ExplicitShape); REGISTER_OP("CollectiveBcastRecv") .Output("data: T") .Attr("T: {bool, float, float16, float64, int32, int64}") .Attr("group_size: int") .Attr("group_key: int") .Attr("instance_key: int") .Attr("shape: shape") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::ExplicitShape); REGISTER_OP("CollectiveAssignGroupV2") .Input("group_assignment: int32") .Input("device_index: int32") .Input("base_key: int32") .Output("group_size: int32") .Output("group_key: int32") .SetDoNotOptimize() .SetIsDistributedCommunication() .SetShapeFn([](shape_inference::InferenceContext* c) { c->set_output(0, c->Scalar()); c->set_output(1, c->Scalar()); return absl::OkStatus(); }); REGISTER_OP("CollectiveReduceV2") .Input("input: T") .Output("data: T") .Attr("T: {bfloat16, float, float16, float64, int32, int64}") .Input("group_size: int32") .Input("group_key: int32") .Input("instance_key: int32") .Input("ordering_token: Nordering_token * resource") .Attr("merge_op: {'Min', 'Max', 'Mul', 'Add'}") .Attr("final_op: {'Id', 'Div'}") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .Attr("is_stateless: bool = false") .Attr("Nordering_token: int >= 0 = 0") .Attr("max_subdivs_per_device: int = -1") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("CollectiveReduceScatterV2") .Input("input: T") .Output("data: T") .Attr("T: {bfloat16, float, float16, float64, int32, int64}") .Input("group_size: int32") .Input("group_key: int32") .Input("instance_key: int32") .Input("ordering_token: Nordering_token * resource") .Attr("merge_op: {'Min', 'Max', 'Mul', 'Add'}") .Attr("final_op: {'Id', 'Div'}") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .Attr("is_stateless: bool = false") .Attr("Nordering_token: int >= 0 = 0") .Attr("max_subdivs_per_device: int = -1") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn([](shape_inference::InferenceContext* c) { shape_inference::ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused)); shape_inference::ShapeHandle out; TF_RETURN_IF_ERROR( c->ReplaceDim(c->input(0), 0, c->UnknownDim(), &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("CollectiveGatherV2") .Input("input: T") .Output("data: T") .Attr("T: {float, float16, float64, int32, int64}") .Input("group_size: int32") .Input("group_key: int32") .Input("instance_key: int32") .Input("ordering_token: Nordering_token * resource") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .Attr("is_stateless: bool = false") .Attr("Nordering_token: int >= 0 = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn([](shape_inference::InferenceContext* c) { shape_inference::ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused)); shape_inference::ShapeHandle out; TF_RETURN_IF_ERROR( c->ReplaceDim(c->input(0), 0, c->UnknownDim(), &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("CollectiveBcastSendV2") .Input("input: T") .Output("data: T") .Attr("T: {bool, float, float16, float64, int32, int64}") .Input("group_size: int32") .Input("group_key: int32") .Input("instance_key: int32") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("CollectiveBcastRecvV2") .Output("data: T") .Attr("T: {bool, float, float16, float64, int32, int64}") .Input("group_size: int32") .Input("group_key: int32") .Input("instance_key: int32") .Input("shape: Tshape") .Attr("Tshape: {int32, int64} = DT_INT32") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn([](shape_inference::InferenceContext* c) { shape_inference::ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(3, &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("CollectiveInitializeCommunicator") .Input("group_key: int32") .Input("rank: int32") .Input("group_size: int32") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .Output("communicator: resource") .SetDoNotOptimize() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("CollectiveReduceV3") .Input("input: T") .Input("communicator: resource") .Input("group_assignment: int32") .Output("data: T") .Attr("T: {bfloat16, float, float16, float64, int32, int64}") .Attr("reduction: {'Min', 'Max', 'Mul', 'Add'}") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("CollectiveAllToAllV2") .Input("input: T") .Output("data: T") .Attr("T: {bfloat16, float, float16, float64, int32, int64}") .Input("group_size: int32") .Input("group_key: int32") .Input("instance_key: int32") .Input("ordering_token: Nordering_token * resource") .Attr("communication_hint: string = 'auto'") .Attr("timeout_seconds: float = 0") .Attr("is_stateless: bool = false") .Attr("Nordering_token: int >= 0 = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("CollectiveAllToAllV3") .Input("input: T") .Input("communicator: resource") .Input("group_assignment: int32") .Output("data: T") .Attr("T: {bfloat16, float, float16, float64, int32, int64}") .Attr("timeout_seconds: float = 0") .SetIsStateful() .SetIsDistributedCommunication() .SetShapeFn(shape_inference::UnchangedShape); }
#include <array> #include <cstdint> #include <limits> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/str_replace.h" #include "absl/types/span.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/primitive_util.h" #include "xla/service/computation_placer.h" #include "xla/service/executable.h" #include "xla/service/hlo_module_config.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/test_macros.h" #include "xla/tests/test_utils.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/blocking_counter.h" #include "tsl/platform/env.h" #include "tsl/platform/threadpool.h" namespace xla { namespace { class CollectiveOpsTest : public HloTestBase { public: CollectiveOpsTest() { VLOG(1) << "Running with " << num_devices() << " devices"; } protected: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.add_xla_disable_hlo_passes( "gpu-convert-async-collectives-to-sync"); return debug_options; } std::unique_ptr<HloModule> MakeCrsModule( const Shape& shape, std::vector<std::vector<int64_t>> replica_groups, const HloModuleConfig& config, std::string op = "add", std::string datatype = "f32") { std::string hlo_template = R"( HloModule test apply_op { x = DATATYPE[] parameter(0) y = DATATYPE[] parameter(1) ROOT apply_op = DATATYPE[] OP(x, y) } ENTRY test_computation { p = SHAPE parameter(0) p2 = SHAPE reshape(p) crs = SHAPE all-reduce(p2), replica_groups=REPLICA_GROUPS, to_apply=apply_op copy = SHAPE copy(crs) ROOT out = SHAPE reshape(copy) } )"; std::vector<std::string> replica_group_strs; replica_group_strs.reserve(replica_groups.size()); for (const auto& g : replica_groups) { replica_group_strs.push_back( absl::StrFormat("{%s}", absl::StrJoin(g, ","))); } std::string shape_str = shape.ToString(false); if (shape_str == "f32[1]") { hlo_template = absl::StrReplaceAll( hlo_template, {{"DATATYPE[SHAPE] reshape(p)", "DATATYPE[] reshape(p)"}, {"DATATYPE[SHAPE] all-reduce", "DATATYPE[] all-reduce"}, {"DATATYPE[SHAPE] copy", "DATATYPE[] copy"}}); } std::string parameterized_hlo = absl::StrReplaceAll( hlo_template, {{"SHAPE", shape_str}, {"REPLICA_GROUPS", absl::StrFormat("{%s}", absl::StrJoin(replica_group_strs, ", "))}, {"OP", op}, {"DATATYPE", datatype}}); return ParseAndReturnVerifiedModule(parameterized_hlo, config).value(); } template <typename LiteralType> void TestTwoReplicasOneOperand(std::string op, Literal input_value, Literal expected_value) { const int kNumReplicas = 2; std::string dtype = primitive_util::LowercasePrimitiveTypeName( primitive_util::NativeToPrimitiveType<LiteralType>()); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); auto module = MakeCrsModule( input_value.shape(), {}, config, op, dtype); TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> results, ExecuteReplicated(std::move(module), {&input_value}, kNumReplicas, true, true)); for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) { EXPECT_TRUE(LiteralTestUtil::NearOrEqual( expected_value, results[replica_idx], ErrorSpec{1e-5, 1e-5})); } } template <typename LiteralType> void TestAllOpsForReduce() { auto cast = [&](int value) { return static_cast<LiteralType>(value); }; auto to_literal = [&](absl::Span<const LiteralType> values) { return LiteralUtil::CreateR1<LiteralType>(values); }; Literal input_value = to_literal({cast(1), cast(2), cast(3)}); TestTwoReplicasOneOperand<LiteralType>( "add", input_value.Clone(), to_literal({cast(2), cast(4), cast(6)})); TestTwoReplicasOneOperand<LiteralType>( "multiply", input_value.Clone(), to_literal({cast(1), cast(4), cast(9)})); TestTwoReplicasOneOperand<LiteralType>( "maximum", input_value.Clone(), to_literal({cast(1), cast(2), cast(3)})); TestTwoReplicasOneOperand<LiteralType>( "minimum", input_value.Clone(), to_literal({cast(1), cast(2), cast(3)})); if constexpr (std::numeric_limits<LiteralType>::is_signed) { input_value = to_literal({cast(-1), cast(-2), cast(-3)}); TestTwoReplicasOneOperand<LiteralType>( "add", input_value.Clone(), to_literal({cast(-2), cast(-4), cast(-6)})); TestTwoReplicasOneOperand<LiteralType>( "multiply", input_value.Clone(), to_literal({cast(1), cast(4), cast(9)})); TestTwoReplicasOneOperand<LiteralType>( "maximum", input_value.Clone(), to_literal({cast(-1), cast(-2), cast(-3)})); TestTwoReplicasOneOperand<LiteralType>( "minimum", input_value.Clone(), to_literal({cast(-1), cast(-2), cast(-3)})); } } }; std::vector<std::vector<int64_t>> PowerSetOfIota(int64_t n) { std::vector<std::vector<int64_t>> power_set; for (int64_t i = 1; i < (1 << n); ++i) { power_set.emplace_back(); for (int64_t j = 0; j < n; ++j) { if (i & (1 << j)) { power_set.back().push_back(j); } } } return power_set; } DeviceAssignment MakeDeviceAssn(std::vector<int64_t> devices) { DeviceAssignment assn(devices.size(), 1); for (int64_t i = 0; i < devices.size(); ++i) { assn(i, 0) = devices[i]; } return assn; } template <typename T> static Eigen::half ToHalf(T value) { return static_cast<Eigen::half>(value); } XLA_TEST_F(CollectiveOpsTest, AllReduce_sum_float32_2D) { TestTwoReplicasOneOperand<float>( "add", LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}}), LiteralUtil::CreateR2<float>({{2, 4}, {6, 8}})); } XLA_TEST_F(CollectiveOpsTest, AllReduceSingleOutput_float32) { TestTwoReplicasOneOperand<float>( "add", LiteralUtil::CreateR1<float>({1}), LiteralUtil::CreateR1<float>({2})); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_int8) { TestAllOpsForReduce<int8_t>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_uint8) { TestAllOpsForReduce<uint8_t>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_uint32) { TestAllOpsForReduce<uint32_t>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_int32) { TestAllOpsForReduce<int32_t>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_int64) { TestAllOpsForReduce<int64_t>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_uint64) { TestAllOpsForReduce<uint64_t>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_float32) { TestAllOpsForReduce<float>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_double) { TestAllOpsForReduce<double>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_half) { TestAllOpsForReduce<Eigen::half>(); } XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_bfloat16) { TestAllOpsForReduce<bfloat16>(); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllReduce_sum_complex64)) { TestTwoReplicasOneOperand<complex64>( "add", LiteralUtil::CreateR1<complex64>({{1, 2}, {3, 4}}), LiteralUtil::CreateR1<complex64>({{2, 4}, {6, 8}})); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllReduce_sum_complex128)) { TestTwoReplicasOneOperand<complex128>( "add", LiteralUtil::CreateR1<complex128>({{1, 2}, {3, 4}}), LiteralUtil::CreateR1<complex128>({{2, 4}, {6, 8}})); } XLA_TEST_F(CollectiveOpsTest, AllReduceAnd_Pred) { TestTwoReplicasOneOperand<bool>( "and", LiteralUtil::CreateR1<bool>({true, false}), LiteralUtil::CreateR1<bool>({true, false})); const char* hlo_module = R"( HloModule test apply_op { x = pred[] parameter(0) y = pred[] parameter(1) ROOT apply_op = pred[] and(x, y) } ENTRY test_computation { id = u32[] replica-id() c = u32[] constant(0) p = pred[] compare(id, c), direction=EQ p2 = pred[1] reshape(p) crs = pred[1] all-reduce(p2), replica_groups={}, to_apply=apply_op copy = pred[1] copy(crs) ROOT out = pred[1] reshape(copy) } )"; HloModuleConfig config = GetModuleConfigForTest(2); auto module = ParseAndReturnVerifiedModule(hlo_module, config).value(); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, 2, true, true)); for (int replica_idx = 0; replica_idx < 2; replica_idx++) { EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<bool>({false}), results[replica_idx])); } } XLA_TEST_F(CollectiveOpsTest, AllReduceOr_Pred) { TestTwoReplicasOneOperand<bool>( "or", LiteralUtil::CreateR1<bool>({true, false}), LiteralUtil::CreateR1<bool>({true, false})); const char* hlo_module = R"( HloModule test apply_op { x = pred[] parameter(0) y = pred[] parameter(1) ROOT apply_op = pred[] or(x, y) } ENTRY test_computation { id = u32[] replica-id() c = u32[] constant(0) p = pred[] compare(id, c), direction=EQ p2 = pred[1] reshape(p) crs = pred[1] all-reduce(p2), replica_groups={}, to_apply=apply_op copy = pred[1] copy(crs) ROOT out = pred[1] reshape(copy) } )"; HloModuleConfig config = GetModuleConfigForTest(2); auto module = ParseAndReturnVerifiedModule(hlo_module, config).value(); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, 2, true, true)); for (int replica_idx = 0; replica_idx < 2; replica_idx++) { EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<bool>({true}), results[replica_idx])); } } XLA_TEST_F(CollectiveOpsTest, AllReduce_AllCombinations) { const int64_t kNumElems = 1024; for (std::vector<int64_t> devices : PowerSetOfIota(num_devices())) { SCOPED_TRACE(absl::StrFormat("Running on devices {%s}", absl::StrJoin(devices, ", "))); DeviceAssignment device_assn = MakeDeviceAssn(devices); HloModuleConfig config = GetModuleConfigForTest(devices.size()); config.set_static_device_assignment(device_assn); std::vector<float> input_vec(kNumElems); absl::c_iota(input_vec, 0); auto input_literal = LiteralUtil::CreateR1<float>(input_vec); auto module = MakeCrsModule(input_literal.shape(), {}, config); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), {&input_literal}, devices.size(), &device_assn, true, true)); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_GPU(AllReduce_ManyConcurrentAllReduces)) { const int64_t kNumElems = 1024; const int64_t kNumThreads = 200; const int64_t kRunsPerThread = 10; std::vector<float> input_vec(kNumElems); absl::c_iota(input_vec, 0); auto input_literal = LiteralUtil::CreateR1<float>(input_vec); HloModuleConfig config = GetModuleConfigForTest(2); auto executable = test_runner_ .CreateExecutable(MakeCrsModule(input_literal.shape(), {}, config), true) .value(); std::vector<int64_t> devices = {0, 1}; auto device_assn = MakeDeviceAssn(devices); HloRunner::ReplicatedExecuteOptions opts; opts.num_replicas = devices.size(); opts.use_threads = true; opts.arguments.push_back(&input_literal); tsl::BlockingCounter done(kNumThreads * kRunsPerThread); tsl::thread::ThreadPool pool(tsl::Env::Default(), TestName(), kNumThreads); for (int64_t i = 0; i < kNumThreads * kRunsPerThread; ++i) { pool.Schedule([&] { TF_ASSERT_OK( test_runner_.ExecuteReplicated(executable.get(), opts, &device_assn) .status()); done.DecrementCount(); }); } done.Wait(); } XLA_TEST_F(CollectiveOpsTest, AllReduce_CombinableAllReduces) { std::string hlo_string = R"( HloModule test apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY test_computation { p0 = f32[5] parameter(0) p1 = f32[5] parameter(1) crs0 = f32[5] all-reduce(p0), replica_groups={}, to_apply=apply_op crs1 = f32[5] all-reduce(p1), replica_groups={}, to_apply=apply_op ROOT out = (f32[5], f32[5]) tuple(f32[5] crs0, f32[5] crs1) } )"; static constexpr int kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string, config)); std::vector<float> input0_vec = {1., 2., 3., 4., 5.}; auto input0_literal = LiteralUtil::CreateR1<float>(input0_vec); std::vector<float> input1_vec = {7., 3., 4., 1., 2.}; auto input1_literal = LiteralUtil::CreateR1<float>(input1_vec); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), {&input0_literal, &input1_literal}, kNumReplicas, true, true)); std::vector<float> expected0_vec = {2., 4., 6., 8., 10.}; auto expected0_literal = LiteralUtil::CreateR1<float>(expected0_vec); std::vector<float> expected1_vec = {14., 6., 8., 2., 4.}; auto expected1_literal = LiteralUtil::CreateR1<float>(expected1_vec); for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) { auto rs = results[replica_idx].DecomposeTuple(); EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected0_literal, rs[0], ErrorSpec{1e-5, 1e-5})); EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected1_literal, rs[1], ErrorSpec{1e-5, 1e-5})); } } XLA_TEST_F(CollectiveOpsTest, AllReduce_ThreeReplicaGroups) { const int64_t kNumElems = 137; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); std::vector<float> input_vec(kNumElems); absl::c_iota(input_vec, 0); auto input_literal = LiteralUtil::CreateR1<float>(input_vec); auto module = MakeCrsModule( input_literal.shape(), {{0}, {1, 2}, {3}}, config); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), {&input_literal}, 4, true, true)); ASSERT_EQ(results.size(), 4); std::vector<float> input_vec_doubled; input_vec_doubled.reserve(input_vec.size()); for (float n : input_vec) { input_vec_doubled.push_back(n * 2); } auto input_literal_doubled = LiteralUtil::CreateR1<float>(input_vec_doubled); EXPECT_TRUE(LiteralTestUtil::Equal(input_literal, results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(input_literal_doubled, results[1])); EXPECT_TRUE(LiteralTestUtil::Equal(input_literal_doubled, results[2])); EXPECT_TRUE(LiteralTestUtil::Equal(input_literal, results[3])); } XLA_TEST_F(CollectiveOpsTest, AllReduce_Degenerate) { const char* const kModuleStr = R"( HloModule test apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() ROOT crs = u32[] all-reduce(id), replica_groups={{0},{1},{2},{3}}, to_apply=apply_op } )"; static constexpr int kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (int i = 0; i < kNumReplicas; ++i) { LiteralTestUtil::ExpectR0Equal<uint32_t>(i, results[i]); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllReduce)) { const absl::string_view kModuleStr = R"( HloModule test apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start = u32[] all-reduce-start(id), to_apply=apply_op, backend_config="{\"is_sync\":false}" ROOT done = u32[] all-reduce-done(start) } )"; HloModuleConfig config = GetModuleConfigForTest(num_devices()); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, num_devices(), true, false)); ASSERT_EQ(results.size(), num_devices()); uint32_t expected = num_devices() * (num_devices() - 1) / 2; for (int i = 0; i < num_devices(); ++i) { LiteralTestUtil::ExpectR0Equal<uint32_t>(expected, results[i]); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllReduceTwoOperands)) { const absl::string_view kModuleStr = R"( HloModule test apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() id2 = u32[] multiply(id, id) start = (u32[], u32[]) all-reduce-start(id, id2), to_apply=apply_op, backend_config="{\"is_sync\":false}" ROOT done = (u32[], u32[]) all-reduce-done(start) } )"; HloModuleConfig config = GetModuleConfigForTest(num_devices()); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, num_devices(), true, false)); ASSERT_EQ(results.size(), num_devices()); uint32_t expected0 = num_devices() * (num_devices() - 1) / 2; uint32_t expected1 = num_devices() * (num_devices() - 1) * (2 * num_devices() - 1) / 6; for (int i = 0; i < num_devices(); ++i) { std::vector<Literal> replica_results = results[i].DecomposeTuple(); LiteralTestUtil::ExpectR0Equal<uint32_t>(expected0, replica_results[0]); LiteralTestUtil::ExpectR0Equal<uint32_t>(expected1, replica_results[1]); } } XLA_TEST_F(CollectiveOpsTest, ReplicaId) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() ROOT out = u32[] copy(id) } )"; HloModuleConfig config = GetModuleConfigForTest(num_devices()); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, num_devices(), true, true)); ASSERT_EQ(results.size(), num_devices()); for (uint32_t i = 0; i < num_devices(); ++i) { EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR0(i), results[i])); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(CollectiveBroadcast_TwoGPUs)) { const char* const kModuleStr = R"( HloModule test collective_broadcast { p0 = u32[2] parameter(0) ROOT result = u32[2] collective-broadcast(p0), replica_groups={{1, 0}} } ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} cb = ((u32[2]), u32[2]) async-start(u32[2] %p), calls=collective_broadcast ROOT res = u32[2] async-done(cb), calls=collective_broadcast } )"; const int64_t kNumReplicas = 2; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[1])); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(CollectiveBroadcast_Simple)) { const char* const kModuleStr = R"( HloModule test collective_broadcast { p0 = u32[2] parameter(0) ROOT result = u32[2] collective-broadcast(p0), replica_groups={{1, 0, 2, 3}} } ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} cb = ((u32[2]), u32[2]) async-start(u32[2] %p), calls=collective_broadcast ROOT res = u32[2] async-done(cb), calls=collective_broadcast } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[1])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[2])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[3])); } XLA_TEST_F(CollectiveOpsTest, CollectivePermute_TwoGPUs) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} permute = u32[2] collective-permute(p), source_target_pairs={{1,0}, {0,1}} ROOT copy = u32[2] copy(permute) } )"; const int64_t kNumReplicas = 2; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}), results[1])); } XLA_TEST_F(CollectiveOpsTest, CollectivePermute_Simple) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} permute = u32[2] collective-permute(p), source_target_pairs={{1,0}, {0,1}, {2,2}} ROOT copy = u32[2] copy(permute) } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}), results[1])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}), results[2])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({0, 0}), results[3])); } XLA_TEST_F(CollectiveOpsTest, CollectivePermute_Degenerate) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} permute = u32[2] collective-permute(p), source_target_pairs={{0,0}, {1,1}, {2,2}, {3,3}} ROOT copy = u32[2] copy(permute) } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[1])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}), results[2])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({13, 13}), results[3])); } XLA_TEST_F(CollectiveOpsTest, CollectivePermute_NotDegenerate) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} permute = u32[2] collective-permute(p), source_target_pairs={{0,0}, {1,1}, {2,2}} ROOT copy = u32[2] copy(permute) } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[1])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}), results[2])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({0, 0}), results[3])); } XLA_TEST_F(CollectiveOpsTest, CollectivePermute_Rotate) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} permute = u32[2] collective-permute(p), source_target_pairs={{0,1}, {1,2}, {2,3}, {3,0}} ROOT copy = u32[2] copy(permute) } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({13, 13}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}), results[1])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[2])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}), results[3])); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncCollectivePermute)) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { replica = u32[] replica-id() ten = u32[] constant(10) sum = u32[] add(replica, ten) p = u32[2] broadcast(sum), dimensions={} start = (u32[2], u32[2]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}, backend_config="{\"is_sync\":false}" ROOT done = u32[2] collective-permute-done(start) } )"; const int64_t kNumReplicas = 2; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, false)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}), results[1])); } XLA_TEST_F(CollectiveOpsTest, AllToAll_EmptyReplicaGroups) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[2] broadcast(id), dimensions={} a0 = u32[2] constant({10, 15}) b0 = u32[2] constant({20, 25}) c0 = u32[2] constant({30, 35}) d0 = u32[2] constant({40, 45}) a1 = u32[2] add(id2, a0) b1 = u32[2] add(id2, b0) c1 = u32[2] add(id2, c0) d1 = u32[2] add(id2, d0) all2all = (u32[2], u32[2], u32[2], u32[2]) all-to-all(a1, b1, c1, d1), replica_groups={} a_prime = u32[2] get-tuple-element(all2all), index=0 b_prime = u32[2] get-tuple-element(all2all), index=1 c_prime = u32[2] get-tuple-element(all2all), index=2 d_prime = u32[2] get-tuple-element(all2all), index=3 ROOT out = u32[8] concatenate(a_prime, b_prime, c_prime, d_prime), dimensions={0} } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16, 12, 17, 13, 18}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({20, 25, 21, 26, 22, 27, 23, 28}, results[1]); LiteralTestUtil::ExpectR1Equal<uint32_t>({30, 35, 31, 36, 32, 37, 33, 38}, results[2]); LiteralTestUtil::ExpectR1Equal<uint32_t>({40, 45, 41, 46, 42, 47, 43, 48}, results[3]); } XLA_TEST_F(CollectiveOpsTest, AllToAll_OrderedReplicaGroups) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[2] broadcast(id), dimensions={} a0 = u32[2] constant({10, 15}) b0 = u32[2] constant({20, 25}) c0 = u32[2] constant({30, 35}) d0 = u32[2] constant({40, 45}) a1 = u32[2] add(id2, a0) b1 = u32[2] add(id2, b0) c1 = u32[2] add(id2, c0) d1 = u32[2] add(id2, d0) all2all = (u32[2], u32[2], u32[2], u32[2]) all-to-all(a1, b1, c1, d1), replica_groups={{3,2,1,0}} a_prime = u32[2] get-tuple-element(all2all), index=0 b_prime = u32[2] get-tuple-element(all2all), index=1 c_prime = u32[2] get-tuple-element(all2all), index=2 d_prime = u32[2] get-tuple-element(all2all), index=3 ROOT out = u32[8] concatenate(a_prime, b_prime, c_prime, d_prime), dimensions={0} } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint32_t>({43, 48, 42, 47, 41, 46, 40, 45}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({33, 38, 32, 37, 31, 36, 30, 35}, results[1]); LiteralTestUtil::ExpectR1Equal<uint32_t>({23, 28, 22, 27, 21, 26, 20, 25}, results[2]); LiteralTestUtil::ExpectR1Equal<uint32_t>({13, 18, 12, 17, 11, 16, 10, 15}, results[3]); } XLA_TEST_F(CollectiveOpsTest, AllToAll_TwoReplicaGroups) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[2] broadcast(id), dimensions={} a0 = u32[2] constant({10, 15}) b0 = u32[2] constant({20, 25}) a1 = u32[2] add(id2, a0) b1 = u32[2] add(id2, b0) all2all = (u32[2], u32[2]) all-to-all(a1, b1), replica_groups={{2,1},{3,0}} a_prime = u32[2] get-tuple-element(all2all), index=0 b_prime = u32[2] get-tuple-element(all2all), index=1 ROOT out = u32[4] concatenate(a_prime, b_prime), dimensions={0} } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint32_t>({23, 28, 20, 25}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({22, 27, 21, 26}, results[1]); LiteralTestUtil::ExpectR1Equal<uint32_t>({12, 17, 11, 16}, results[2]); LiteralTestUtil::ExpectR1Equal<uint32_t>({13, 18, 10, 15}, results[3]); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllToAll_SplitDimension)) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[4, 2] broadcast(id), dimensions={} a0 = u32[4, 2] constant({{10, 15}, {20, 25}, {30, 35}, {40, 45}}) a1 = u32[4, 2] add(id2, a0) all2all = u32[4, 2] all-to-all(a1), replica_groups={{0,1,2,3}}, dimensions={0} ROOT out = u32[8] reshape(all2all) } )"; const int64_t kNumReplicas = 4; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16, 12, 17, 13, 18}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({20, 25, 21, 26, 22, 27, 23, 28}, results[1]); LiteralTestUtil::ExpectR1Equal<uint32_t>({30, 35, 31, 36, 32, 37, 33, 38}, results[2]); LiteralTestUtil::ExpectR1Equal<uint32_t>({40, 45, 41, 46, 42, 47, 43, 48}, results[3]); } XLA_TEST_F(CollectiveOpsTest, AllGather_Dim0) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[1, 2] broadcast(id), dimensions={} a0 = u32[1, 2] constant({{10, 15}}) a1 = u32[1, 2] add(id2, a0) allgather = u32[2, 2] all-gather(a1), dimensions={0} ROOT out = u32[4] reshape(allgather) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16}, result); } } XLA_TEST_F(CollectiveOpsTest, AllGather_Dim0_UseGlobalDevices) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[1, 2] broadcast(id), dimensions={} a0 = u32[1, 2] constant({{10, 15}}) a1 = u32[1, 2] add(id2, a0) allgather = u32[2, 2] all-gather(a1), dimensions={0}, use_global_device_ids=true, channel_id=7, replica_groups={{0, 1}} ROOT out = u32[4] reshape(allgather) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16}, result); } } XLA_TEST_F(CollectiveOpsTest, AllGather_Dim1) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[2, 1] broadcast(id), dimensions={} a0 = u32[2, 1] constant({{10}, {15}}) a1 = u32[2, 1] add(id2, a0) allgather = u32[2, 2] all-gather(a1), dimensions={1} ROOT out = u32[4] reshape(allgather) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 11, 15, 16}, result); } } XLA_TEST_F(CollectiveOpsTest, AllReduce_TupleAllReduce) { if (IsMlirLoweringEnabled()) { GTEST_SKIP(); } std::string hlo_string = R"( HloModule test apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY test_computation { p0 = f32[5] parameter(0) p1 = f32[7] parameter(1) ROOT out = (f32[5], f32[7]) all-reduce(p0, p1), replica_groups={}, to_apply=apply_op } )"; static constexpr int kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string, config)); std::vector<float> input0_vec = {1., 2., 3., 4., 5.}; auto input0_literal = LiteralUtil::CreateR1<float>(input0_vec); std::vector<float> input1_vec = { 7., 3., 4., 1., 2., 3., 4., }; auto input1_literal = LiteralUtil::CreateR1<float>(input1_vec); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), {&input0_literal, &input1_literal}, kNumReplicas, true, true)); std::vector<float> expected0_vec = {2., 4., 6., 8., 10.}; auto expected0_literal = LiteralUtil::CreateR1<float>(expected0_vec); std::vector<float> expected1_vec = {14., 6., 8., 2., 4., 6., 8.}; auto expected1_literal = LiteralUtil::CreateR1<float>(expected1_vec); for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) { auto rs = results[replica_idx].DecomposeTuple(); EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected0_literal, rs[0], ErrorSpec{1e-5, 1e-5})); EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected1_literal, rs[1], ErrorSpec{1e-5, 1e-5})); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllGatherMixedTypes)) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() p0 = u32[2, 1] broadcast(id), dimensions={} p1 = f32[2, 1] convert(p0) allgather = (u32[2, 2], f32[2, 2]) all-gather(p0, p1), dimensions={1} ag0 = u32[2, 2] get-tuple-element(allgather), index=0 ag1 = f32[2, 2] get-tuple-element(allgather), index=1 r0 = u32[4] reshape(ag0) r1 = f32[4] reshape(ag1) ROOT out = (u32[4], f32[4]) tuple(r0, r1) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) { auto rs = results[replica_idx].DecomposeTuple(); LiteralTestUtil::ExpectR1Equal<uint32_t>({0, 1, 0, 1}, rs[0]); LiteralTestUtil::ExpectR1Near<float>({0.0, 1.0, 0.0, 1.0}, rs[1], ErrorSpec{1e-5, 1e-5}); } } XLA_TEST_F(CollectiveOpsTest, ReduceScatter) { const char* const kModuleStr = R"( HloModule test add { lhs = u32[] parameter(0) rhs = u32[] parameter(1) ROOT add = u32[] add(lhs, rhs) } ENTRY main { c0 = u32[8] constant({1, 2, 3, 4, 5, 6, 7, 8}) c1 = u32[8] constant({10, 11, 12, 13, 14, 15, 16, 17}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[8] broadcast(p), dimensions={} data = u32[8] select(pb, c0, c1) ROOT ars = u32[4] reduce-scatter(data), replica_groups={}, dimensions={0}, to_apply=add } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); LiteralTestUtil::ExpectR1Equal<uint32_t>({11, 13, 15, 17}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({19, 21, 23, 25}, results[1]); } XLA_TEST_F(CollectiveOpsTest, ReduceScatterConstrainLayout) { const char* const kModuleStr = R"( HloModule reduce-scatter %sum (a: u32[], b: u32[]) -> u32[] { %a = u32[] parameter(0) %b = u32[] parameter(1) ROOT %add = u32[] add(u32[] a, u32[] b) } ENTRY main { %param = u32[16] parameter(0) ROOT %rs = u32[8] reduce-scatter(u32[16] %param), replica_groups={}, constrain_layout=true, to_apply=%sum, dimensions={0} } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); std::vector<uint32_t> input_vec = { {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}; auto input_literal = LiteralUtil::CreateR1<uint32_t>(input_vec); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), {&input_literal}, kNumReplicas, true, true)); LiteralTestUtil::ExpectR1Equal<uint32_t>({2, 4, 6, 8, 10, 12, 14, 16}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({18, 20, 22, 24, 26, 28, 30, 32}, results[1]); } XLA_TEST_F(CollectiveOpsTest, ReduceScatter_Dim1) { const char* const kModuleStr = R"( HloModule test add { lhs = u32[] parameter(0) rhs = u32[] parameter(1) ROOT add = u32[] add(lhs, rhs) } ENTRY main { c0 = u32[2, 4] constant({{ 1, 2, 3, 4}, { 5, 6, 7, 8}}) c1 = u32[2, 4] constant({{10, 11, 12, 13}, {14, 15, 16, 17}}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[2, 4] broadcast(p), dimensions={} data = u32[2, 4] select(pb, c0, c1) ars = u32[2, 2] reduce-scatter(data), replica_groups={}, dimensions={1}, to_apply=add ROOT r = u32[4] reshape(ars) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); LiteralTestUtil::ExpectR1Equal<uint32_t>({11, 13, 19, 21}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({15, 17, 23, 25}, results[1]); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(ReduceScatterReassociate)) { const char* const kModuleStr = R"( HloModule m sum { a = u32[] parameter(0) b = u32[] parameter(1) ROOT add.2 = u32[] add(a, b) } ENTRY main { c0 = u32[8] constant({ 1, 2, 3, 4, 5, 6, 7, 8}) c1 = u32[8] constant({ 11, 12, 13, 14, 15, 16, 17, 18}) c2 = u32[8] constant({ 2, 3, 4, 5, 6, 7, 8, 9}) c3 = u32[8] constant({ 12, 13, 14, 15, 16, 17, 18, 19}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[8] broadcast(p), dimensions={} data0 = u32[8] select(pb, c0, c1) data1 = u32[8] select(pb, c2, c3) rs0 = u32[4] reduce-scatter(data0), replica_groups={}, dimensions={0}, to_apply=sum rs1 = u32[4] reduce-scatter(data1), replica_groups={}, dimensions={0}, to_apply=sum ROOT add = u32[4] add(rs0, rs1) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); const ErrorSpec es{1e-5, 1e-5}; LiteralTestUtil::ExpectR1Equal<uint32_t>({26, 30, 34, 38}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({42, 46, 50, 54}, results[1]); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(ReduceScatterReassociate_ReduceScatterCreator)) { const char* const kModuleStr = R"( HloModule m sum { a = u32[] parameter(0) b = u32[] parameter(1) ROOT add.2 = u32[] add(a, b) } ENTRY main { c0 = u32[8] constant({ 1, 2, 3, 4, 5, 6, 7, 8}) c1 = u32[8] constant({ 11, 12, 13, 14, 15, 16, 17, 18}) c2 = u32[8] constant({ 2, 3, 4, 5, 6, 7, 8, 9}) c3 = u32[8] constant({ 12, 13, 14, 15, 16, 17, 18, 19}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[8] broadcast(p), dimensions={} data0 = u32[8] select(pb, c0, c1) data1 = u32[8] select(pb, c2, c3) ar0 = u32[8] all-reduce(data0), replica_groups={}, to_apply=sum ar1 = u32[8] all-reduce(data1), replica_groups={}, to_apply=sum rid = u32[] replica-id() slice_size = u32[] constant(4) offset = u32[] multiply(rid, slice_size) ds0 = u32[4] dynamic-slice(ar0, offset), dynamic_slice_sizes={4} ds1 = u32[4] dynamic-slice(ar1, offset), dynamic_slice_sizes={4} ROOT add = u32[4] add(ds0, ds1) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); const ErrorSpec es{1e-5, 1e-5}; LiteralTestUtil::ExpectR1Equal<uint32_t>({26, 30, 34, 38}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({42, 46, 50, 54}, results[1]); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllReduceReassociate)) { const char* const kModuleStr = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { c0 = f32[8] constant({ 1, 2, 3, 4, 5, 6, 7, 8}) c1 = f32[8] constant({ 11, 12, 13, 14, 15, 16, 17, 18}) c2 = f32[8] constant({ 2, 3, 4, 5, 6, 7, 8, 9}) c3 = f32[8] constant({ 12, 13, 14, 15, 16, 17, 18, 19}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[8] broadcast(p), dimensions={} data0 = f32[8] select(pb, c0, c1) data1 = f32[8] select(pb, c2, c3) ar0 = f32[8] all-reduce(data0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(data1), replica_groups={}, to_apply=sum ROOT add = f32[8] add(ar0, ar1) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); const ErrorSpec es{1e-5, 1e-5}; EXPECT_TRUE(LiteralTestUtil::NearOrEqual(results[0], results[1], es)); LiteralTestUtil::ExpectR1Near<float>( {26.0, 30.0, 34.0, 38.0, 42.0, 46.0, 50.0, 54.0}, results[0], es); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllGatherBroadcastReorder_NonUniform)) { const char* const kModuleStr = R"( HloModule m ENTRY main { c0 = u32[2, 3] constant({{ 1, 2, 3}, { 4, 5, 6}}) c1 = u32[2, 3] constant({{10, 11, 12}, {13, 14, 15}}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[2, 3] broadcast(p), dimensions={} data = u32[2, 3] select(pb, c0, c1) bc = u32[2, 4, 3] broadcast(data), dimensions={0, 2} ROOT ag = u32[2, 4, 6] all-gather(bc), dimensions={2}, replica_groups={{0, 1}} } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); EXPECT_TRUE(LiteralTestUtil::Equal(results[0], results[1])); LiteralTestUtil::ExpectR3Equal<uint32_t>({{{1, 2, 3, 10, 11, 12}, {1, 2, 3, 10, 11, 12}, {1, 2, 3, 10, 11, 12}, {1, 2, 3, 10, 11, 12}}, {{4, 5, 6, 13, 14, 15}, {4, 5, 6, 13, 14, 15}, {4, 5, 6, 13, 14, 15}, {4, 5, 6, 13, 14, 15}}}, results[0]); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllGatherBroadcastReorder_Uniform)) { const char* const kModuleStr = R"( HloModule m ENTRY main { c0 = u32[2, 3] constant({{ 1, 2, 3}, { 4, 5, 6}}) c1 = u32[2, 3] constant({{10, 11, 12}, {13, 14, 15}}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[2, 3] broadcast(p), dimensions={} data = u32[2, 3] select(pb, c0, c1) bc = u32[2, 4, 3] broadcast(data), dimensions={0, 2} ROOT ag = u32[2, 8, 3] all-gather(bc), dimensions={1}, replica_groups={{0, 1}} } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); EXPECT_TRUE(LiteralTestUtil::Equal(results[0], results[1])); LiteralTestUtil::ExpectR3Equal<uint32_t>({{{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {10, 11, 12}, {10, 11, 12}, {10, 11, 12}, {10, 11, 12}}, {{4, 5, 6}, {4, 5, 6}, {4, 5, 6}, {4, 5, 6}, {13, 14, 15}, {13, 14, 15}, {13, 14, 15}, {13, 14, 15}}}, results[0]); } XLA_TEST_F(CollectiveOpsTest, AllGather_16BitInt) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id32 = u32[] replica-id() id = u16[] convert(id32) id2 = u16[1, 2] broadcast(id), dimensions={} a0 = u16[1, 2] constant({{10, 15}}) a1 = u16[1, 2] add(id2, a0) allgather = u16[2, 2] all-gather(a1), dimensions={0} ROOT out = u16[4] reshape(allgather) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<uint16_t>({10, 15, 11, 16}, result); } } XLA_TEST_F(CollectiveOpsTest, AllToAll_16BitInt) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id32 = u32[] replica-id() id = u16[] convert(id32) id2 = u16[2] broadcast(id), dimensions={} a0 = u16[2] constant({10, 15}) a1 = u16[2] add(id2, a0) ROOT a2a = u16[2] all-to-all(a1), dimensions={0} } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint16_t>({10, 11}, results[0]); LiteralTestUtil::ExpectR1Equal<uint16_t>({15, 16}, results[1]); } XLA_TEST_F(CollectiveOpsTest, CollectivePermute_16BitInt) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id32 = u32[] replica-id() id = u16[] convert(id32) id2 = u16[2] broadcast(id), dimensions={} a0 = u16[2] constant({10, 15}) a1 = u16[2] add(id2, a0) ROOT cp = u16[2] collective-permute(a1), source_target_pairs={{0,1}, {1,0}} } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint16_t>({11, 16}, results[0]); LiteralTestUtil::ExpectR1Equal<uint16_t>({10, 15}, results[1]); } XLA_TEST_F(CollectiveOpsTest, AllReduce_16BitInt) { const char* const kModuleStr = R"( HloModule test sum { a = u16[] parameter(0) b = u16[] parameter(1) ROOT add.2 = u16[] add(a, b) } ENTRY test_computation { id32 = u32[] replica-id() id = u16[] convert(id32) id2 = u16[2] broadcast(id), dimensions={} a0 = u16[2] constant({10, 15}) a1 = u16[2] add(id2, a0) ROOT cp = u16[2] all-reduce(a1), replica_groups={}, to_apply=sum } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<uint16_t>({21, 31}, result); } } XLA_TEST_F(CollectiveOpsTest, ReduceScatter_16BitInt) { const char* const kModuleStr = R"( HloModule test sum { a = u16[] parameter(0) b = u16[] parameter(1) ROOT add.2 = u16[] add(a, b) } ENTRY test_computation { id32 = u32[] replica-id() id = u16[] convert(id32) id2 = u16[2] broadcast(id), dimensions={} a0 = u16[2] constant({10, 15}) a1 = u16[2] add(id2, a0) ROOT cp = u16[1]reduce-scatter(a1), dimensions={0}, replica_groups={}, to_apply=sum } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint16_t>({21}, results[0]); LiteralTestUtil::ExpectR1Equal<uint16_t>({31}, results[1]); } XLA_TEST_F(CollectiveOpsTest, AllReduceBFloat16Min) { const char* const kModuleStr = R"( HloModule test min { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT min.2 = bf16[] minimum(a, b) } ENTRY test_computation { id32 = u32[] replica-id() one = u32[] constant(1) id32_1 = u32[] add(id32, one) id = bf16[] convert(id32_1) id2 = bf16[2] broadcast(id), dimensions={} ROOT cp = bf16[2] all-reduce(id2), replica_groups={}, to_apply=min } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); const bfloat16 one = static_cast<bfloat16>(1.0f); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<bfloat16>({one, one}, result); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllGather)) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[1, 2] broadcast(id), dimensions={} a0 = u32[1, 2] constant({{10, 15}}) a1 = u32[1, 2] add(id2, a0) ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, backend_config="{\"is_sync\":false}" allgather = u32[2,2] all-gather-done(ags) ROOT out = u32[4] reshape(allgather) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, false)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16}, result); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncReduceScatter)) { const char* const kModuleStr = R"( HloModule test add { lhs = u32[] parameter(0) rhs = u32[] parameter(1) ROOT add = u32[] add(lhs, rhs) } reduce_scatter { p0 = u32[8] parameter(0) ROOT result = u32[4] reduce-scatter(p0), replica_groups={}, dimensions={0}, to_apply=add } ENTRY main { c0 = u32[8] constant({1, 2, 3, 4, 5, 6, 7, 8}) c1 = u32[8] constant({10, 11, 12, 13, 14, 15, 16, 17}) zero = u32[] constant(0) id = u32[] replica-id() p = pred[] compare(id, zero), direction=EQ pb = pred[8] broadcast(p), dimensions={} data = u32[8] select(pb, c0, c1) rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter, backend_config="{\"is_sync\":false}" ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, false)); LiteralTestUtil::ExpectR1Equal<uint32_t>({11, 13, 15, 17}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({19, 21, 23, 25}, results[1]); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllToAll)) { const char* const kModuleStr = R"( HloModule test all_to_all { p0 = u32[2] parameter(0) ROOT result = u32[2] all-to-all(p0), dimensions={0} } ENTRY test_computation { id = u32[] replica-id() id2 = u32[2] broadcast(id), dimensions={} a0 = u32[2] constant({10, 15}) a1 = u32[2] add(id2, a0) a2a-start = ((u32[2]), u32[2]) async-start(u32[2] %a1), calls=all_to_all, backend_config="{\"is_sync\":false}" ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, false)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 11}, results[0]); LiteralTestUtil::ExpectR1Equal<uint32_t>({15, 16}, results[1]); } XLA_TEST_F(CollectiveOpsTest, AllGather_Dim1UnitDimensions) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { id = u32[] replica-id() id2 = u32[1, 1, 2, 1, 2] broadcast(id), dimensions={} offset = u32[4] iota(), iota_dimension=0 offset_reshape = u32[1, 1, 2, 1, 2] reshape(offset) agi = u32[1, 1, 2, 1, 2] add(id2, offset_reshape) allgather = u32[1, 1, 4, 1, 2] all-gather(agi), dimensions={2} ROOT out = u32[8] reshape(allgather) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<uint32_t>({0, 1, 2, 3, 1, 2, 3, 4}, result); } } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_Simple)) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { %replica = u32[] replica-id() %ten = u32[] constant(10) %sum = u32[] add(%replica, %ten) %p = u32[2] broadcast(%sum), dimensions={} %after-all = token[] after-all() %recv = (u32[2], u32[], token[]) recv(%after-all), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}" } %send = (u32[2], u32[], token[]) send(%p, %after-all), channel_id=0, control-predecessors={%recv}, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}" } %recv-done = (u32[2], token[]) recv-done(%recv), channel_id=0 %recv-data = u32[2] get-tuple-element(%recv-done), index=0 %send-done = token[] send-done(%send), channel_id=0, control-predecessors={%recv} ROOT copy = u32[2] copy(%recv-data) } )"; const int64_t kNumReplicas = 2; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({0, 0}), results[1])); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_TwoConcurrentChains)) { const char* const kModuleStr = R"( HloModule test, is_scheduled=true ENTRY test_computation { c0 = u32[] constant(0) c1 = u32[] constant(1) replica = u32[] replica-id() a = u32[] add(c1, replica) send-data = u32[2] broadcast(a), dimensions={} after-all.0 = token[] after-all() recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_pipeline="1" } send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_pipeline="1" } after-all.1 = token[] after-all() recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}}" } send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}}" } recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="1" } recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0 recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0 compare0 = pred[] compare(replica, c0), direction=EQ compare = pred[2] broadcast(compare0), dimensions={} recv-data = u32[2] select(compare, recv-data.0, recv-data.1) send-done.0 = token[] send-done(send.0), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="1" } send-done.1 = token[] send-done(send.1), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } c1b = u32[2] broadcast(c1), dimensions={} ROOT result = u32[2] add(c1b, recv-data) })"; const int64_t kNumReplicas = 2; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, false)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({3, 3}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({2, 2}), results[1])); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_ValidationAttr1)) { const char* const kModuleStr = R"( HloModule test, is_scheduled=true ENTRY test_computation { c0 = u32[] constant(0) c1 = u32[] constant(1) replica = u32[] replica-id() a = u32[] add(c1, replica) send-data = u32[2] broadcast(a), dimensions={} after-all.0 = token[] after-all() recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_validation="invalid" } send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_validation="invalid" } recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0 send-done.0 = token[] send-done(send.0), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } after-all.1 = token[] after-all() recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}}" } send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}}" } recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0 compare0 = pred[] compare(replica, c0), direction=EQ compare = pred[2] broadcast(compare0), dimensions={} recv-data = u32[2] select(compare, recv-data.0, recv-data.1) send-done.1 = token[] send-done(send.1), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } c1b = u32[2] broadcast(c1), dimensions={} ROOT result = u32[2] add(c1b, recv-data) })"; const int64_t kNumReplicas = 2; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, false)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({2, 2}), results[1])); } XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_ValidationAttr2)) { const char* const kModuleStr = R"( HloModule test, is_scheduled=true cond { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(%param), index=0 ub = u32[] constant(2) ROOT result = pred[] compare(count, ub), direction=LT } body { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(%param), index=0 send-data = get-tuple-element(%param), index=1 after-all.0 = token[] after-all() recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_validation="{{0,1}}" } send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_validation="{{0,1}}" } recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0 send-done.0 = token[] send-done(send.0), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } after-all.1 = token[] after-all() recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}}" } send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1), channel_id=0, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}}" } recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0 replica = u32[] replica-id() constant0 = u32[] constant(0) compare0 = pred[] compare(replica, constant0), direction=EQ compare = pred[2] broadcast(compare0), dimensions={} recv-data = u32[2] select(compare, recv-data.0, recv-data.1) c1 = u32[] constant(1) new_count = u32[] add(count, c1) r = u32[2] broadcast(c1), dimensions={} s = u32[2] add(r, recv-data) send-done.1 = token[] send-done(send.1), channel_id=0, frontend_attributes={ _xla_send_recv_pipeline="0" } ROOT result = (u32[], u32[2]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) r = u32[] replica-id() init = u32[2] broadcast(r), dimensions={} while_init = (u32[], u32[2]) tuple(c0, init) while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond ROOT result = u32[2] get-tuple-element(while_result), index=1 })"; const int64_t kNumReplicas = 2; SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas); HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, false)); ASSERT_EQ(results.size(), kNumReplicas); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({2, 2}), results[0])); EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({3, 3}), results[1])); } class Fp8CollectiveOpsTest : public CollectiveOpsTest { public: Fp8CollectiveOpsTest() { replacements_[kF8E4M3DatatypePlaceholder] = IsCuda() ? "f8e4m3fn" : "f8e4m3fnuz"; replacements_[kF8E5M2DatatypePlaceholder] = IsCuda() ? "f8e5m2" : "f8e5m2fnuz"; } protected: bool IsCuda() { return std::holds_alternative<se::CudaComputeCapability>(Capability()); } const se::GpuComputeCapability& Capability() { return backend() .default_stream_executor() ->GetDeviceDescription() .gpu_compute_capability(); } absl::flat_hash_map<absl::string_view, absl::string_view> replacements_; private: static constexpr const char* kF8E4M3DatatypePlaceholder{"<<F8E4M3>>"}; static constexpr const char* kF8E5M2DatatypePlaceholder{"<<F8E5M2>>"}; }; XLA_TEST_F(Fp8CollectiveOpsTest, DISABLED_ON_CPU(AllGather_8BitFloat)) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { a0 = <<F8E4M3>>[1,2] constant({{1,2}}) allgather = <<F8E4M3>>[2, 2] all-gather(a0), dimensions={0} p = <<F8E4M3>>[4] reshape(allgather) ROOT out = f32[4] convert(p) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( absl::StrReplaceAll(kModuleStr, replacements_), config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); for (const Literal& result : results) { LiteralTestUtil::ExpectR1Equal<float>({1, 2, 1, 2}, result); } } XLA_TEST_F(Fp8CollectiveOpsTest, DISABLED_ON_CPU(AllToAll_8BitFloat)) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { a0 = <<F8E4M3>>[2] constant({1,2}) a2a = <<F8E4M3>>[2] all-to-all(a0), dimensions={0} ROOT out = f32[2] convert(a2a) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( absl::StrReplaceAll(kModuleStr, replacements_), config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<float>({1, 1}, results[0]); LiteralTestUtil::ExpectR1Equal<float>({2, 2}, results[1]); } XLA_TEST_F(Fp8CollectiveOpsTest, DISABLED_ON_CPU(CollectivePermute_8BitFloat)) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { a0 = <<F8E5M2>>[2] constant({1,2}) a1 = <<F8E5M2>>[2] collective-permute(a0), source_target_pairs={{0,1}, {1,0}} ROOT out = f32[2] convert(a1) } )"; const int64_t kNumReplicas = 2; HloModuleConfig config = GetModuleConfigForTest(kNumReplicas); TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( absl::StrReplaceAll(kModuleStr, replacements_), config)); TF_ASSERT_OK_AND_ASSIGN( std::vector<Literal> results, ExecuteReplicated(std::move(module), absl::Span<Literal* const>{}, kNumReplicas, true, true)); ASSERT_EQ(results.size(), kNumReplicas); LiteralTestUtil::ExpectR1Equal<float>({1, 2}, results[0]); LiteralTestUtil::ExpectR1Equal<float>({1, 2}, results[1]); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/collective_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/collective_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0aa2f5d2-966e-42b9-a207-44881d727aae
cpp
tensorflow/tensorflow
tensor_flag_utils
tensorflow/core/kernels/tensor_flag_utils.cc
tensorflow/core/kernels/tensor_flag_utils_test.cc
#include "tensorflow/core/kernels/tensor_flag_utils.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/tensor_shape.h" namespace tensorflow { namespace tensor_flag_utils { Status ValidateSparseMatrixShardingConfig(const Tensor& config) { if (TensorShapeUtils::IsScalar(config.shape())) { const float scalar_config = config.template scalar<float>()(); if (0 < scalar_config && scalar_config <= 1.0) { return absl::OkStatus(); } return Status( absl::StatusCode::kInvalidArgument, absl::StrCat("Expected config to be in range (0, 1] but instead found ", scalar_config)); } if (!TensorShapeUtils::IsMatrix(config.shape())) { return Status(absl::StatusCode::kInvalidArgument, absl::StrCat("Expected config to be either scalar or matrix " "but instead found tensor of rank ", config.dims())); } if (config.dim_size(1) != 3) { return Status( absl::StatusCode::kInvalidArgument, absl::StrCat( "Expected config matrix to have dim(1) = 3 but instead found ", config.dim_size(1))); } auto config_matrix = config.matrix<float>(); for (int i = 0; i < config.dim_size(0); ++i) { if (0 > config_matrix(i, 0)) { return errors::InvalidArgument( "First column of fraction_rows_per_thread_config " "should " "have non-negative values but found ", config_matrix(i, 0), " in row ", i); } if (0 > config_matrix(i, 1)) { return errors::InvalidArgument( "Second column of fraction_rows_per_thread_config " "should " "have non-negative values but found ", config_matrix(i, 1), " in row ", i); } if (!(0 < config_matrix(i, 2) && config_matrix(i, 2) <= 1)) { return errors::InvalidArgument( "Last column of fraction_rows_per_thread_config should " "have values in the range (0, 1] but found ", config_matrix(i, 2), " in row ", i); } } return absl::OkStatus(); } template <typename MatrixType, typename K> MatrixType FindConfigValueForKey( const typename TTypes<MatrixType>::ConstMatrix& config_mat, const std::pair<K, K>& key) { const int last_row_index = config_mat.dimension(0) - 1; for (int i = 0; i < last_row_index; ++i) { if (key.first >= config_mat(i, 0) && key.second >= config_mat(i, 1)) { return config_mat(i, 2); } } return config_mat(last_row_index, 2); } Status ValidateScalarQuantityShardingConfig(const Tensor& config) { if (TensorShapeUtils::IsScalar(config.shape())) { const float scalar_config = config.template scalar<float>()(); if (0 < scalar_config && scalar_config <= 1.0) { return absl::OkStatus(); } return Status( absl::StatusCode::kInvalidArgument, absl::StrCat("Expected config to be in range (0, 1] but instead found ", scalar_config)); } if (!TensorShapeUtils::IsMatrix(config.shape())) { return Status(absl::StatusCode::kInvalidArgument, absl::StrCat("Expected config to be either scalar or matrix " "but instead found tensor of rank ", config.dims())); } if (config.dim_size(1) != 2) { return Status( absl::StatusCode::kInvalidArgument, absl::StrCat( "Expected config matrix to have dim(1) = 2 but instead found ", config.dim_size(1))); } auto config_matrix = config.matrix<float>(); for (int i = 0; i < config.dim_size(0); ++i) { if (0 > config_matrix(i, 0)) { return errors::InvalidArgument( "First column of fraction_rows_per_thread_config " "should " "have non-negative values but found ", config_matrix(i, 0), " in row ", i); } if (!(0 < config_matrix(i, 1) && config_matrix(i, 1) <= 1)) { return errors::InvalidArgument( "Last column of fraction_rows_per_thread_config should " "have values in the range (0, 1] but found ", config_matrix(i, 1), " in row ", i); } } return absl::OkStatus(); } template <typename MatrixType, typename K> MatrixType FindConfigValueForKey( const typename TTypes<MatrixType>::ConstMatrix& config_mat, const K key) { const int last_row_index = config_mat.dimension(0) - 1; for (int i = 0; i < last_row_index; ++i) { if (key >= config_mat(i, 0)) { return config_mat(i, 1); } } return config_mat(last_row_index, 1); } template <typename Tindices> Tindices GetLinearBucket(const Tindices value, const Tindices bucket_size) { const Tindices next_multiple_of_bucket_size = (value + bucket_size - 1) / bucket_size * bucket_size; return next_multiple_of_bucket_size - (bucket_size - 1); } template <typename Tindices> Tindices GetPowerBucket(const Tindices value, const Tindices bucket_size) { if (bucket_size == 1) { return 1; } return std::pow(bucket_size, std::floor(std::log(bucket_size * (value - 1)) / std::log(bucket_size)) - 1) + 1; } #define REGISTER_SPARSE_UTIL_FUNCTIONS(TypeIndex) \ template float FindConfigValueForKey<float, TypeIndex>( \ const TTypes<float>::ConstMatrix& config_mat, \ const std::pair<TypeIndex, TypeIndex>& key); \ template float FindConfigValueForKey<float, TypeIndex>( \ const TTypes<float>::ConstMatrix& config_mat, const TypeIndex key); \ template int64 FindConfigValueForKey<int64, TypeIndex>( \ const TTypes<int64_t>::ConstMatrix& config_mat, const TypeIndex key); REGISTER_SPARSE_UTIL_FUNCTIONS(int32); REGISTER_SPARSE_UTIL_FUNCTIONS(int64); REGISTER_SPARSE_UTIL_FUNCTIONS(uint8); REGISTER_SPARSE_UTIL_FUNCTIONS(uint16); REGISTER_SPARSE_UTIL_FUNCTIONS(uint32); REGISTER_SPARSE_UTIL_FUNCTIONS(uint64); template int32 GetLinearBucket(const int32 value, const int32 bucket_size); template int64 GetLinearBucket(const int64 value, const int64 bucket_size); template int32 GetPowerBucket(const int32 value, const int32 bucket_size); template int64 GetPowerBucket(const int64 value, const int64 bucket_size); } }
#include "tensorflow/core/kernels/tensor_flag_utils.h" #include <vector> #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/platform/test.h" namespace { using ::int64_t; using tensorflow::DataType; using tensorflow::int32; using tensorflow::Tensor; using tensorflow::TTypes; using tensorflow::error::INVALID_ARGUMENT; using tensorflow::tensor_flag_utils::FindConfigValueForKey; using tensorflow::tensor_flag_utils::GetLinearBucket; using tensorflow::tensor_flag_utils::GetPowerBucket; using tensorflow::tensor_flag_utils::ValidateScalarQuantityShardingConfig; using tensorflow::tensor_flag_utils::ValidateSparseMatrixShardingConfig; TEST(SparseUtilsTest, ValidateSparseMatrixShardingConfig) { { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 0.7; EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 1.0; EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok()); } { Tensor t(DataType::DT_FLOAT, {1, 1}); int indx = 0; for (const float v : {60.0}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {1, 2}); int indx = 0; for (const float v : { 60.0, 50.0, }) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {1, 3}); int indx = 0; for (const float v : {30.0, 20.0, 1.0}) { t.flat<float>()(indx++) = v; } EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok()); } { Tensor t(DataType::DT_FLOAT, {2, 3}); int indx = 0; for (const float v : {60.0, 50.0, 0.41, 30.0, 20.0, 0.7}) { t.flat<float>()(indx++) = v; } EXPECT_TRUE(ValidateSparseMatrixShardingConfig(t).ok()); } { Tensor t(DataType::DT_FLOAT, {2, 3}); int indx = 0; for (const float v : {60.0, 40.0, 0.41, 30.0, 20.0, 10.7}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {2, 3}); int indx = 0; for (const float v : {60.0, 40.0, 0.41, 30.0, 20.0, -0.7}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {2, 3}); int indx = 0; for (const float v : {60.0, -40.0, 0.41, 30.0, 20.0, 0.7}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = -0.5; EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 0; EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 1.2; EXPECT_EQ(INVALID_ARGUMENT, ValidateSparseMatrixShardingConfig(t).code()); } } TEST(SparseUtilsTest, ValidateScalarQuantityShardingConfig) { { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 0.7; EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 1.0; EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 1.2; EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {1, 1}); int indx = 0; for (const float v : {60.0}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {1, 2}); int indx = 0; for (const float v : { 60.0, 50.0, }) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {1, 3}); int indx = 0; for (const float v : {30.0, 20.0, 1.0}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {2, 2}); int indx = 0; for (const float v : {60.0, 0.41, 30.0, 0.7}) { t.flat<float>()(indx++) = v; } EXPECT_TRUE(ValidateScalarQuantityShardingConfig(t).ok()); } { Tensor t(DataType::DT_FLOAT, {2, 2}); int indx = 0; for (const float v : {60.0, 0.41, 30.0, 10.7}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {2, 2}); int indx = 0; for (const float v : {60.0, 0.41, 30.0, -0.7}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {2, 2}); int indx = 0; for (const float v : {-40.0, 0.41, 20.0, 0.7}) { t.flat<float>()(indx++) = v; } EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = -0.5; EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 0; EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } { Tensor t(DataType::DT_FLOAT, {}); t.scalar<float>()() = 1.2; EXPECT_EQ(INVALID_ARGUMENT, ValidateScalarQuantityShardingConfig(t).code()); } } TEST(SparseUtils, FindConfigValueForKey) { { float data[] = {60.0, 50.0, 0.41, 30.0, 20.0, 0.1, 0, 0, 0.7}; TTypes<float>::ConstMatrix config_mat(data, 3, 3); auto val = FindConfigValueForKey<float, int32>(config_mat, {70, 40}); EXPECT_FLOAT_EQ(0.1, val); val = FindConfigValueForKey<float, int32>(config_mat, {60, 50}); EXPECT_FLOAT_EQ(0.41, val); val = FindConfigValueForKey<float, int32>(config_mat, {60, 60}); EXPECT_FLOAT_EQ(0.41, val); val = FindConfigValueForKey<float, int32>(config_mat, {60, 40}); EXPECT_FLOAT_EQ(0.1, val); val = FindConfigValueForKey<float, int32>(config_mat, {50, 60}); EXPECT_FLOAT_EQ(0.1, val); val = FindConfigValueForKey<float, int32>(config_mat, {20, 30}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int32>(config_mat, {30, 10}); EXPECT_FLOAT_EQ(0.7, val); } { float data[] = {0, 0, 0.7}; TTypes<float>::ConstMatrix config_mat(data, 1, 3); auto val = FindConfigValueForKey<float, int64_t>(config_mat, {70, 40}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 50}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 60}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int64_t>(config_mat, {60, 40}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int64_t>(config_mat, {50, 60}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int64_t>(config_mat, {20, 30}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int64_t>(config_mat, {30, 10}); EXPECT_FLOAT_EQ(0.7, val); } { float data[] = {60.0, 50.0, 0.41, 0, 0, 0.7}; TTypes<float>::ConstMatrix config_mat(data, 2, 3); auto val = FindConfigValueForKey<float, int32>(config_mat, {70, 40}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int32>(config_mat, {60, 50}); EXPECT_FLOAT_EQ(0.41, val); val = FindConfigValueForKey<float, int32>(config_mat, {60, 60}); EXPECT_FLOAT_EQ(0.41, val); val = FindConfigValueForKey<float, int32>(config_mat, {60, 40}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int32>(config_mat, {50, 60}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int32>(config_mat, {20, 30}); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int32>(config_mat, {30, 10}); EXPECT_FLOAT_EQ(0.7, val); } { float data[] = {60.0, 0.41, 50.0, 0.14, 0, 0.7}; TTypes<float>::ConstMatrix config_mat(data, 3, 2); auto val = FindConfigValueForKey<float, int32>(config_mat, 70); EXPECT_FLOAT_EQ(0.41, val); val = FindConfigValueForKey<float, int32>(config_mat, 60); EXPECT_FLOAT_EQ(0.41, val); val = FindConfigValueForKey<float, int32>(config_mat, 55); EXPECT_FLOAT_EQ(0.14, val); val = FindConfigValueForKey<float, int32>(config_mat, 50); EXPECT_FLOAT_EQ(0.14, val); val = FindConfigValueForKey<float, int32>(config_mat, 20); EXPECT_FLOAT_EQ(0.7, val); val = FindConfigValueForKey<float, int32>(config_mat, 30); EXPECT_FLOAT_EQ(0.7, val); } } TEST(SparseUtils, GetLinearBucket) { EXPECT_EQ(11, GetLinearBucket(11, 5)); EXPECT_EQ(11, GetLinearBucket(12, 5)); EXPECT_EQ(1, GetLinearBucket(int64_t{4}, int64_t{5})); } TEST(SparseUtils, GetPowerBucket) { EXPECT_EQ(6, GetPowerBucket(11, 5)); EXPECT_EQ(6, GetPowerBucket(12, 5)); EXPECT_EQ(1332, GetPowerBucket(1335, 11)); EXPECT_EQ(5, GetPowerBucket(int64_t{5}, int64_t{4})); EXPECT_EQ(1, GetPowerBucket(int64_t{4}, int64_t{1})); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_flag_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_flag_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6a0d1eee-2a26-44cb-8754-0448e35469d2
cpp
tensorflow/tensorflow
requantization_range_op
tensorflow/core/kernels/requantization_range_op.cc
tensorflow/core/kernels/requantization_range_op_test.cc
#define EIGEN_USE_THREADS #include <math.h> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/type_traits.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; void CalculateUsedRange(const Tensor& input, qint32* used_min_quantized, qint32* used_max_quantized) { auto input_array = input.flat<qint32>(); Eigen::Tensor<qint32, 0, Eigen::RowMajor> min = input_array.minimum(); Eigen::Tensor<qint32, 0, Eigen::RowMajor> max = input_array.maximum(); *used_min_quantized = min(); *used_max_quantized = max(); } class RequantizationRangeOp : public OpKernel { public: explicit RequantizationRangeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& input = ctx->input(0); OP_REQUIRES(ctx, ctx->input(1).NumElements() > 0, errors::InvalidArgument("Input min must not be empty.")); OP_REQUIRES(ctx, ctx->input(2).NumElements() > 0, errors::InvalidArgument("Input max must not be empty.")); const float input_min_float = ctx->input(1).flat<float>()(0); const float input_max_float = ctx->input(2).flat<float>()(0); Tensor* output_min = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output_min)); Tensor* output_max = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_max)); qint32 used_min_quantized; qint32 used_max_quantized; CalculateUsedRange(input, &used_min_quantized, &used_max_quantized); const float used_min_float = std::min( 0.0f, QuantizedToFloat(used_min_quantized, input_min_float, input_max_float)); const float used_max_float = QuantizedToFloat(used_max_quantized, input_min_float, input_max_float); output_min->flat<float>().setConstant(used_min_float); output_max->flat<float>().setConstant(used_max_float); } }; REGISTER_KERNEL_BUILDER(Name("RequantizationRange") .Device(DEVICE_CPU) .TypeConstraint<qint32>("Tinput"), RequantizationRangeOp); }
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { void CalculateUsedRange(const Tensor& input, qint32* actual_min_quantized, qint32* actual_max_quantized); class RequantizationRangeTest : public OpsTestBase { protected: }; TEST_F(RequantizationRangeTest, HandCrafted) { TF_ASSERT_OK(NodeDefBuilder("requantization_range", "RequantizationRange") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("Tinput", DataTypeToEnum<qint32>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const int value_count = 3; AddInputFromArray<qint32>(TensorShape({value_count}), {-(1 << 23), 0, (1 << 23)}); AddInputFromArray<float>(TensorShape({1}), {-256.0f}); AddInputFromArray<float>(TensorShape({1}), {256.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_min(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_min, {-1.0f}); test::ExpectTensorEqual<float>(expected_min, *GetOutput(0)); Tensor expected_max(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected_max, {1.0f}); test::ExpectTensorEqual<float>(expected_max, *GetOutput(1)); } static void BM_RequantizationRange(::testing::benchmark::State& state) { const int size = state.range(0); Tensor quantized_tensor(DT_QINT32, TensorShape({1, size})); test::FillFn<qint32>(&quantized_tensor, [](int n) { return qint32(n); }); qint32 actual_min; qint32 actual_max; for (auto s : state) { CalculateUsedRange(quantized_tensor, &actual_min, &actual_max); } state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * size); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * size * 4); } BENCHMARK(BM_RequantizationRange) ->UseRealTime() ->Arg(100) ->Arg(1000) ->Arg(10000) ->Arg(100000) ->Arg(1000000) ->Arg(10000000) ->Arg(100000000); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/requantization_range_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/requantization_range_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0699d932-314d-46cb-9ab3-3f3a267a4ea0
cpp
tensorflow/tensorflow
quantized_mul_op
tensorflow/core/kernels/quantized_mul_op.cc
tensorflow/core/kernels/quantized_mul_op_test.cc
#define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/bcast.h" namespace tensorflow { namespace { template <class T, class Toutput> void ScalarMultiply(OpKernelContext* context, const T* full_input, int32_t full_input_offset, int64_t num_elements, T scalar_input, int32_t scalar_input_offset, Toutput* output) { const int32_t scalar_minus_offset = static_cast<int32>(scalar_input) - scalar_input_offset; for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #ifdef USE_NEON template <> void ScalarMultiply<quint8, qint32>(OpKernelContext* context, const quint8* full_input, int32 full_input_offset, int64 num_elements, quint8 scalar_input, int32 scalar_input_offset, qint32* output) { const int16 scalar_minus_offset = static_cast<int16>(scalar_input) - scalar_input_offset; const int16x4_t scalar_minus_offset_16x4 = vmov_n_s16(scalar_minus_offset); const uint8x8_t full_input_offset_8x8 = vmov_n_u8(full_input_offset); int i; for (i = 0; i < (num_elements - 15); i += 16) { const uint8* full_input_ptr = &(full_input->value) + i; const uint8x16_t full_input_8x16 = vld1q_u8(full_input_ptr); const uint8x8_t full_input_high_8x8 = vget_high_u8(full_input_8x16); const uint8x8_t full_input_low_8x8 = vget_low_u8(full_input_8x16); const int16x8_t full_input_minus_offset_high_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_high_8x8, full_input_offset_8x8)); const int16x8_t full_input_minus_offset_low_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_low_8x8, full_input_offset_8x8)); const int16x4_t x_high_high_16x4 = vget_high_s16(full_input_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(full_input_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(full_input_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(full_input_minus_offset_low_16x8); const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, scalar_minus_offset_16x4); int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #endif template <class T, class Toutput> void VectorMultiply(OpKernelContext* context, const T* x_data, int32_t offset_x, const T* y_data, int32_t offset_y, int64_t num_elements, Toutput* output) { for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #ifdef USE_NEON template <> void VectorMultiply<quint8, qint32>(OpKernelContext* context, const quint8* x_data, int32 offset_x, const quint8* y_data, int32 offset_y, int64 num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(offset_x); const uint8x8_t offset_y_8x8 = vmov_n_u8(offset_y); int i; for (i = 0; i < (num_elements - 15); i += 16) { const uint8* x_data_ptr = &(x_data->value) + i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(y_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #endif template <class T, class Toutput> void VectorTensorMultiply(const T* vector_data, int32_t vector_offset, int64_t vector_num_elements, const T* tensor_data, int32_t tensor_offset, int64_t tensor_num_elements, Toutput* output) { for (int i = 0; i < tensor_num_elements; ++i) { const int64_t vector_i = i % vector_num_elements; output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } #ifdef USE_NEON template <> void VectorTensorMultiply<quint8, qint32>( const quint8* vector_data, int32 vector_offset, int64 vector_num_elements, const quint8* tensor_data, int32 tensor_offset, int64 tensor_num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(vector_offset); const uint8x8_t offset_y_8x8 = vmov_n_u8(tensor_offset); CHECK_EQ(0, tensor_num_elements % vector_num_elements); for (int base_i = 0; base_i < tensor_num_elements; base_i += vector_num_elements) { int i = base_i; const int end_i = base_i + vector_num_elements; int vector_i; for (vector_i = 0; vector_i < (vector_num_elements - 15); vector_i += 16, i += 16) { const uint8* x_data_ptr = &(vector_data->value) + vector_i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(tensor_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < end_i; ++i, ++vector_i) { output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } } #endif } template <class T, class Toutput> class QuantizedMulOp : public OpKernel { public: explicit QuantizedMulOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); auto& min_x_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), errors::InvalidArgument("min_x must be a scalar")); const float min_x = min_x_tensor.flat<float>()(0); auto& max_x_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), errors::InvalidArgument("max_x must be a scalar")); const float max_x = max_x_tensor.flat<float>()(0); auto& min_y_tensor = context->input(4); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), errors::InvalidArgument("min_y must be a scalar")); const float min_y = min_y_tensor.flat<float>()(0); auto& max_y_tensor = context->input(5); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), errors::InvalidArgument("max_y must be a scalar")); const float max_y = max_y_tensor.flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32_t offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32_t offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64_t vector_num_elements; int32_t vector_offset; const T* tensor_data; int64_t tensor_num_elements; int32_t tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedMul") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("Toutput"), QuantizedMulOp<quint8, qint32>); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace ops { namespace { void TestMul(const std::vector<int64_t>& x_shape, const std::vector<float>& x_values, float x_min_value, float x_max_value, const std::vector<int64_t>& y_shape, const std::vector<float>& y_values, float y_min_value, float y_max_value, const std::vector<int64_t>& expected_shape, const std::vector<float>& expected_values, double tolerance) { Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Tensor x_quantized_tensor(DT_QUINT8, x_float_tensor.shape()); FloatTensorToQuantizedInPlace<quint8>(x_float_tensor, x_min_value, x_max_value, &x_quantized_tensor); Output x = Const(root.WithOpName("x"), Input::Initializer(x_quantized_tensor)); Output x_min = Const(root.WithOpName("x_min"), x_min_value); Output x_max = Const(root.WithOpName("x_max"), x_max_value); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Tensor y_quantized_tensor(DT_QUINT8, y_float_tensor.shape()); FloatTensorToQuantizedInPlace<quint8>(y_float_tensor, y_min_value, y_max_value, &y_quantized_tensor); Output y = Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor)); Output y_min = Const(root.WithOpName("y_min"), y_min_value); Output y_max = Const(root.WithOpName("y_max"), y_max_value); QuantizedMul mul = QuantizedMul(root.WithOpName("mul"), x, y, x_min, x_max, y_min, y_max); TF_EXPECT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {mul.z, mul.min_z, mul.max_z}, &outputs)); const Tensor& z_quantized = outputs[0]; const float z_min = outputs[1].flat<float>()(0); const float z_max = outputs[2].flat<float>()(0); Tensor z_float = QuantizedTensorToFloat<qint32>(z_quantized, z_min, z_max); Tensor expected_z_float(DT_FLOAT, TensorShape(expected_shape)); test::FillValues<float>(&expected_z_float, expected_values); test::ExpectTensorNear<float>(expected_z_float, z_float, tolerance); } void TestMulShape(const std::vector<int64_t>& x_shape, const std::vector<int64_t>& y_shape) { const size_t x_num_elements = TensorShape(x_shape).num_elements(); std::vector<float> x_values(x_num_elements); for (int i = 0; i < x_num_elements; ++i) { x_values[i] = i % 256; } const float x_min_value = 0.0f; const float x_max_value = 256.0f; const size_t y_num_elements = TensorShape(y_shape).num_elements(); std::vector<float> y_values(y_num_elements); for (int i = 0; i < y_num_elements; ++i) { y_values[i] = ((i + 23) % 123) - 50; } const float y_min_value = -150.0f; const float y_max_value = 150.0f; Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor)); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor)); Mul mul = Mul(root.WithOpName("mul"), x, y); TF_EXPECT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {mul.z}, &outputs)); const Tensor& expected_values_tensor = outputs[0]; const float* expected_values_data = expected_values_tensor.flat<float>().data(); std::vector<float> expected_values( expected_values_data, expected_values_data + expected_values_tensor.NumElements()); std::vector<int64_t> expected_shape; for (const int64_t dim : expected_values_tensor.shape().dim_sizes()) { expected_shape.push_back(dim); } TestMul(x_shape, x_values, x_min_value, x_max_value, y_shape, y_values, y_min_value, y_max_value, expected_shape, expected_values, 256.0); } void TimeMul(const std::vector<int64_t>& x_shape, const std::vector<int64_t>& y_shape, int64_t iterations) { TestMulShape(x_shape, y_shape); Scope root = Scope::NewRootScope(); Tensor x_quantized_tensor(DT_QUINT8, TensorShape(x_shape)); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_QUINT8); Output x_min = Const(root.WithOpName("x_min"), 0.0f); Output x_max = Const(root.WithOpName("x_max"), 1.0f); Tensor y_quantized_tensor(DT_QUINT8, TensorShape(y_shape)); Output y = Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor)); Output y_min = Const(root.WithOpName("y_min"), 0.0f); Output y_max = Const(root.WithOpName("y_max"), 1.0f); QuantizedMul mul = QuantizedMul(root.WithOpName("mul"), placeholder, y, x_min, x_max, y_min, y_max); TF_EXPECT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; int64_t total_duration = 0; for (int i = 0; i < iterations; ++i) { const int64_t start_time = Env::Default()->NowMicros(); TF_EXPECT_OK(session.Run({{placeholder, x_quantized_tensor}}, {mul.z, mul.min_z, mul.max_z}, &outputs)); const int64_t end_time = Env::Default()->NowMicros(); total_duration += end_time - start_time; } const int64_t one_run_duration = total_duration / iterations; const int64_t num_ops = outputs[0].NumElements(); const double million_ops_per_second = (iterations * num_ops) / static_cast<double>(total_duration); LOG(INFO) << "TimeMul: " << TensorShape(x_shape).DebugString() << " * " << TensorShape(y_shape).DebugString() << ": iterations=" << iterations << ", MOps/s=" << million_ops_per_second << ", one_run_duration=" << one_run_duration << ", total_duration=" << total_duration; } void TestManualScalar() { TestMul( {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {1}, {10.0f}, -100.0f, 100.0f, {10}, {10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f}, 3.0f); TestMul( {1}, {10.0f}, -100.0f, 100.0f, {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {10}, {10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f}, 3.0f); } void TestScalar() { TestMulShape({100}, {1}); TestMulShape({1}, {100}); } void TestManualVector() { TestMul({10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {10}, {1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f, 64.0f, 81.0f, 100.0f}, 3.0f); } void TestVector() { TestMulShape({100}, {100}); } void TestManualVectorTimesTensor() { TestMul( {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {2, 10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 0.0f, 20.0f, {2, 10}, {1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f, 64.0f, 81.0f, 100.0f, 11.0f, 24.0f, 39.0f, 56.0f, 75.0f, 96.0f, 119.0f, 144.0f, 171.0f, 200.0f}, 3.0f); TestMul({2, 10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 0.0f, 20.0f, {10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {2, 10}, {1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f, 64.0f, 81.0f, 100.0f, 11.0f, 24.0f, 39.0f, 56.0f, 75.0f, 96.0f, 119.0f, 144.0f, 171.0f, 200.0f}, 3.0f); TestMul( {5, 2}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f, 10.0f, {2, 5, 2}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f}, 0.0f, 20.0f, {2, 5, 2}, {1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f, 64.0f, 81.0f, 100.0f, 11.0f, 24.0f, 39.0f, 56.0f, 75.0f, 96.0f, 119.0f, 144.0f, 171.0f, 200.0f}, 3.0f); } void TestVectorTimesTensor() { TestMulShape({100}, {2, 100}); TestMulShape({2, 100}, {100}); TestMulShape({5, 2}, {2, 5, 2}); } void BenchmarkTensorScalar() { TimeMul({200}, {1}, 10000); TimeMul({10000}, {1}, 1000); TimeMul({1000000}, {1}, 100); TimeMul({10000000}, {1}, 100); } void BenchmarkVector() { TimeMul({200}, {200}, 10000); TimeMul({10000}, {10000}, 1000); TimeMul({1000000}, {1000000}, 100); TimeMul({10000000}, {10000000}, 100); } void BenchmarkVectorTimesTensor() { TimeMul({10, 20}, {20}, 10000); TimeMul({10, 1000}, {1000}, 1000); TimeMul({1000, 1000}, {1000}, 100); TimeMul({10000, 1000}, {1000}, 100); TimeMul({100, 100}, {100}, 1000); TimeMul({10000, 100}, {100}, 100); TimeMul({100000, 100}, {100}, 100); } } } } #define RUN_TEST(t) \ TEST(QuantizedAddOpTest, t) { tensorflow::ops::t(); } RUN_TEST(TestManualScalar); RUN_TEST(TestManualVector); RUN_TEST(TestManualVectorTimesTensor); RUN_TEST(TestScalar); RUN_TEST(TestVector); RUN_TEST(TestVectorTimesTensor); #if defined(__ANDROID__) RUN_TEST(BenchmarkTensorScalar); RUN_TEST(BenchmarkVector); RUN_TEST(BenchmarkVectorTimesTensor); #endif int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_mul_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_mul_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5108ee6e-3837-4fc5-9985-7bf9f46d0f35
cpp
tensorflow/tensorflow
scan_ops
tensorflow/compiler/tf2xla/kernels/scan_ops.cc
tensorflow/core/kernels/scan_ops_test.cc
#include <array> #include <utility> #include <vector> #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/builder/xla_computation.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { constexpr std::array<DataType, 6> kScanOpTypes = { {DT_HALF, DT_BFLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}}; enum class Reducer { kProduct, kSum, kLogSumExp }; class ScanOp : public XlaOpKernel { public: ScanOp(OpKernelConstruction* ctx, Reducer reducer) : XlaOpKernel(ctx), reducer_(reducer) { OP_REQUIRES_OK(ctx, ctx->GetAttr("reverse", &reverse_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("exclusive", &exclusive_)); } void Compile(XlaOpKernelContext* ctx) override { const TensorShape input_shape = ctx->InputShape(0); const TensorShape tensor_axis_shape = ctx->InputShape(1); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tensor_axis_shape), errors::InvalidArgument("ScanOp: axis must be a scalar, not ", tensor_axis_shape.DebugString())); int64_t axis; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(1, &axis)); if (axis < 0) { axis += input_shape.dims(); } OP_REQUIRES( ctx, FastBoundsCheck(axis, input_shape.dims()), errors::InvalidArgument("ScanOp: Expected scan axis in the range [", -input_shape.dims(), ", ", input_shape.dims(), "), but got ", axis)); DataType dtype = XlaHelpers::SumAccumulationType(ctx->input_type(0)); if (input_shape.num_elements() == 0) { ctx->SetOutput(0, ctx->Input(0)); return; } xla::XlaBuilder* builder = ctx->builder(); std::vector<int64_t> window_strides(input_shape.dims(), 1); std::vector<int64_t> window_dims(input_shape.dims(), 1); window_dims[axis] = input_shape.dim_size(axis); std::vector<std::pair<int64_t, int64_t>> padding(input_shape.dims(), {0, 0}); padding[axis].first = input_shape.dim_size(axis) - 1; if (exclusive_) { ++padding[axis].first; } if (reverse_) { std::swap(padding[axis].first, padding[axis].second); } xla::XlaOp init; const xla::XlaComputation* reducer; switch (reducer_) { case Reducer::kSum: init = XlaHelpers::Zero(builder, dtype); reducer = ctx->GetOrCreateAdd(dtype); break; case Reducer::kProduct: init = XlaHelpers::One(builder, dtype); reducer = ctx->GetOrCreateMul(dtype); break; case Reducer::kLogSumExp: init = XlaHelpers::FloatLiteral(builder, dtype, -INFINITY); reducer = ctx->GetOrCreateLogAddExp(dtype); break; } auto output = xla::ReduceWindowWithGeneralPadding( XlaHelpers::ConvertElementType(ctx->Input(0), dtype), init, *reducer, window_dims, window_strides, {}, {}, padding); output = XlaHelpers::ConvertElementType(output, ctx->input_type(0)); if (exclusive_) { if (reverse_) { output = xla::SliceInDim(output, 1, input_shape.dim_size(axis) + 1, 1, axis); } else { output = xla::SliceInDim(output, 0, input_shape.dim_size(axis), 1, axis); } } ctx->SetOutput(0, output); } private: const Reducer reducer_; bool reverse_; bool exclusive_; }; class CumsumOp : public ScanOp { public: explicit CumsumOp(OpKernelConstruction* ctx) : ScanOp(ctx, Reducer::kSum) {} }; REGISTER_XLA_OP(Name("Cumsum") .TypeConstraint("T", kScanOpTypes) .CompileTimeConstantInput("axis"), CumsumOp); class CumprodOp : public ScanOp { public: explicit CumprodOp(OpKernelConstruction* ctx) : ScanOp(ctx, Reducer::kProduct) {} }; REGISTER_XLA_OP(Name("Cumprod") .TypeConstraint("T", kScanOpTypes) .CompileTimeConstantInput("axis"), CumprodOp); class CumulativeLogsumexpOp : public ScanOp { public: explicit CumulativeLogsumexpOp(OpKernelConstruction* ctx) : ScanOp(ctx, Reducer::kLogSumExp) {} }; REGISTER_XLA_OP(Name("CumulativeLogsumexp") .TypeConstraint("T", kScanOpTypes) .CompileTimeConstantInput("axis"), CumulativeLogsumexpOp); } }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { template <typename T> static Graph* LargeOneDCumsum(int num_x, bool reverse = false) { auto* g = new Graph(OpRegistry::Global()); Tensor data(DataTypeToEnum<T>::value, TensorShape({num_x})); data.flat<T>().setRandom(); Tensor axes(DT_INT32, TensorShape({})); axes.flat<int32>()(0) = 0; test::graph::Cumsum(g, test::graph::Constant(g, data), test::graph::Constant(g, axes)); return g; } static Graph* ColCumsum(int num_x, int num_y, bool reverse = false) { auto* g = new Graph(OpRegistry::Global()); Tensor data(DT_FLOAT, TensorShape({num_x, num_y})); data.flat<float>().setRandom(); Tensor axes(DT_INT32, TensorShape({})); axes.flat<int32>()(0) = 0; test::graph::Cumsum(g, test::graph::Constant(g, data), test::graph::Constant(g, axes)); return g; } static Graph* RowCumsum(int num_x, int num_y, bool reverse = false) { auto* g = new Graph(OpRegistry::Global()); Tensor data(DT_FLOAT, TensorShape({num_x, num_y})); data.flat<float>().setRandom(); Tensor axes(DT_INT32, TensorShape({})); axes.flat<int32>()(0) = 1; test::graph::Cumsum(g, test::graph::Constant(g, data), test::graph::Constant(g, axes)); return g; } static Graph* ThreeDYCumsum(int num_y, int num_z, bool reverse = false) { auto* g = new Graph(OpRegistry::Global()); Tensor data(DT_FLOAT, TensorShape({32, num_y, num_z})); data.flat<float>().setRandom(); Tensor axes(DT_INT32, TensorShape({})); axes.flat<int32>()(0) = 1; test::graph::Cumsum(g, test::graph::Constant(g, data), test::graph::Constant(g, axes)); return g; } template <typename T> static void LargeOneDimensional(::testing::benchmark::State& state, const string& device, int num_x, bool reverse = false) { test::Benchmark(device, LargeOneDCumsum<T>(num_x, reverse), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x * sizeof(T)); } static void DoRowCumsum(::testing::benchmark::State& state, const string& device, int num_x, int num_y, bool reverse = false) { test::Benchmark(device, RowCumsum(num_x, num_y, reverse), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x * num_y); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x * num_y * sizeof(float)); } static void DoColCumsum(::testing::benchmark::State& state, const string& device, int num_x, int num_y, bool reverse = false) { test::Benchmark(device, ColCumsum(num_x, num_y, reverse), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x * num_y); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x * num_y * sizeof(float)); } static void Do3DYCumsum(::testing::benchmark::State& state, const string& device, int num_x, int num_y, bool reverse = false) { test::Benchmark(device, ThreeDYCumsum(num_x, num_y, reverse), false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x * num_y); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x * num_y * sizeof(float)); } static void BM_OneDCumsumGPU(::testing::benchmark::State& state) { const int num_x = state.range(0); LargeOneDimensional<float>(state, "gpu", num_x); } BENCHMARK(BM_OneDCumsumGPU)->Range(1, 1 << 21); static void BM_OneDCumsumGPUHalf(::testing::benchmark::State& state) { const int num_x = state.range(0); LargeOneDimensional<Eigen::half>(state, "gpu", num_x); } BENCHMARK(BM_OneDCumsumGPUHalf)->Range(1, 1 << 21); static void BM_Sum2DRowCumsumGPU(::testing::benchmark::State& state) { const int num_x = state.range(0); const int num_y = state.range(1); DoRowCumsum(state, "gpu", num_x, num_y); } BENCHMARK(BM_Sum2DRowCumsumGPU)->RangePair(1, 8192, 1, 8192); static void BM_Sum2DColumnCumsumGPU(::testing::benchmark::State& state) { const int num_x = state.range(0); const int num_y = state.range(1); DoColCumsum(state, "gpu", num_x, num_y); } BENCHMARK(BM_Sum2DColumnCumsumGPU)->RangePair(1, 8192, 1, 8192); static void BM_Sum3DYCumsumGPU(::testing::benchmark::State& state) { const int num_x = state.range(0); const int num_y = state.range(1); Do3DYCumsum(state, "gpu", num_x, num_y); } BENCHMARK(BM_Sum3DYCumsumGPU)->RangePair(64, 4096, 64, 4096); static void BM_OneDCumsumGPU_reverse(::testing::benchmark::State& state) { const int num_x = state.range(0); LargeOneDimensional<float>(state, "gpu", num_x, true); } BENCHMARK(BM_OneDCumsumGPU_reverse)->Range(1, 1 << 21); static void BM_Sum2DRowCumsumGPU_reverse(::testing::benchmark::State& state) { const int num_x = state.range(0); const int num_y = state.range(1); DoRowCumsum(state, "gpu", num_x, num_y, true); } BENCHMARK(BM_Sum2DRowCumsumGPU_reverse)->RangePair(1, 8192, 1, 8192); static void BM_Sum2DColumnCumsumGPU_reverse( ::testing::benchmark::State& state) { const int num_x = state.range(0); const int num_y = state.range(1); DoColCumsum(state, "gpu", num_x, num_y, true); } BENCHMARK(BM_Sum2DColumnCumsumGPU_reverse)->RangePair(1, 8192, 1, 8192); static void BM_Sum3DYCumsumGPU_reverse(::testing::benchmark::State& state) { const int num_x = state.range(0); const int num_y = state.range(1); Do3DYCumsum(state, "gpu", num_x, num_y, true); } BENCHMARK(BM_Sum3DYCumsumGPU_reverse)->RangePair(32, 2048, 32, 2048); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/scan_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scan_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ed923afa-a952-4b03-96af-6414be706916
cpp
tensorflow/tensorflow
clustering_ops
tensorflow/core/ops/clustering_ops.cc
tensorflow/core/kernels/clustering_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" namespace tensorflow { REGISTER_OP("KmeansPlusPlusInitialization") .Input("points: float32") .Input("num_to_sample: int64") .Input("seed: int64") .Input("num_retries_per_sample: int64") .Output("samples: float32") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("KMC2ChainInitialization") .Input("distances: float32") .Input("seed: int64") .Output("index: int64") .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("NearestNeighbors") .Input("points: float32") .Input("centers: float32") .Input("k: int64") .Output("nearest_center_indices: int64") .Output("nearest_center_distances: float32") .SetShapeFn(shape_inference::UnknownShape); }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { constexpr int k100Dim = 100; constexpr int k10Points = 10; constexpr int k100Points = 100; constexpr int k1kPoints = 1000; constexpr int k10kPoints = 10000; constexpr int k1MPoints = 1000000; constexpr int k2Centers = 2; constexpr int k5Centers = 5; constexpr int k10Centers = 10; constexpr int k20Centers = 20; constexpr int k50Centers = 50; constexpr int k100Centers = 100; constexpr int k200Centers = 200; constexpr int k500Centers = 500; constexpr int k1kCenters = 1000; constexpr int k10kCenters = 10000; constexpr int k0RetriesPerSample = 0; constexpr int k3RetriesPerSample = 3; Graph* SetUpKmeansPlusPlusInitialization(int num_dims, int num_points, int num_to_sample, int retries_per_sample) { Graph* g = new Graph(OpRegistry::Global()); Tensor points(DT_FLOAT, TensorShape({num_points, num_dims})); Tensor sample_size(DT_INT64, TensorShape({})); Tensor seed(DT_INT64, TensorShape({})); Tensor num_retries_per_sample(DT_INT64, TensorShape({})); points.flat<float>().setRandom(); sample_size.flat<int64_t>().setConstant(num_to_sample); seed.flat<int64_t>().setConstant(12345); num_retries_per_sample.flat<int64_t>().setConstant(retries_per_sample); TF_CHECK_OK(NodeBuilder("kmeans_plus_plus_initialization_op", "KmeansPlusPlusInitialization") .Input(test::graph::Constant(g, points)) .Input(test::graph::Constant(g, sample_size)) .Input(test::graph::Constant(g, seed)) .Input(test::graph::Constant(g, num_retries_per_sample)) .Finalize(g, nullptr )); return g; } template <int num_points, int num_to_sample, int num_dims, int retries_per_sample> void BM_KmeansPlusPlusInitialization(::testing::benchmark::State& state) { Graph* g = SetUpKmeansPlusPlusInitialization( num_dims, num_points, num_to_sample, retries_per_sample); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_points * num_dims * num_to_sample); } #define BENCHMARK_KMEANS_PLUS_PLUS(p, c, d, r) \ void BM_KmeansPlusPlusInitialization_##p##_##c##_##d##_##r( \ ::testing::benchmark::State& state) { \ BM_KmeansPlusPlusInitialization<p, c, d, r>(state); \ } \ BENCHMARK(BM_KmeansPlusPlusInitialization_##p##_##c##_##d##_##r) \ ->UseRealTime(); #define RUN_BM_KmeansPlusPlusInitialization(retries) \ BENCHMARK_KMEANS_PLUS_PLUS(k10Points, k2Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k10Points, k5Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k10Points, k10Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k10Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k20Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k50Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k100Points, k100Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k100Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k200Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k500Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1kPoints, k1kCenters, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k100Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k200Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k500Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k10kPoints, k1kCenters, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k100Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k200Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k500Centers, k100Dim, retries); \ BENCHMARK_KMEANS_PLUS_PLUS(k1MPoints, k1kCenters, k100Dim, retries) RUN_BM_KmeansPlusPlusInitialization(k0RetriesPerSample); RUN_BM_KmeansPlusPlusInitialization(k3RetriesPerSample); #undef RUN_BM_KmeansPlusPlusInitialization #undef BENCHMARK_KMEANS_PLUS_PLUS Graph* SetUpKMC2Initialization(int num_points) { Graph* g = new Graph(OpRegistry::Global()); Tensor distances(DT_FLOAT, TensorShape({num_points})); Tensor seed(DT_INT64, TensorShape({})); distances.flat<float>().setRandom(); seed.flat<int64_t>().setConstant(12345); TF_CHECK_OK( NodeBuilder("KMC2ChainInitializationOp", "KMC2ChainInitialization") .Input(test::graph::Constant(g, distances)) .Input(test::graph::Constant(g, seed)) .Finalize(g, nullptr )); return g; } template <int num_points, int num_to_sample, int num_dims> void BM_KMC2Initialization(::testing::benchmark::State& state) { Graph* g = SetUpKMC2Initialization(num_points); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_points * num_dims * num_to_sample); } #define BENCHMARK_KMC2(p, c, d) \ void BM_KMC2Initialization_##p##_##c##_##d( \ ::testing::benchmark::State& state) { \ BM_KMC2Initialization<p, c, d>(state); \ } \ BENCHMARK(BM_KMC2Initialization_##p##_##c##_##d)->UseRealTime(); #define RUN_BM_KMC2Initialization \ BENCHMARK_KMC2(k10Points, k2Centers, k100Dim); \ BENCHMARK_KMC2(k10Points, k5Centers, k100Dim); \ BENCHMARK_KMC2(k10Points, k10Centers, k100Dim); \ BENCHMARK_KMC2(k100Points, k10Centers, k100Dim); \ BENCHMARK_KMC2(k100Points, k20Centers, k100Dim); \ BENCHMARK_KMC2(k100Points, k50Centers, k100Dim); \ BENCHMARK_KMC2(k100Points, k100Centers, k100Dim); \ BENCHMARK_KMC2(k1kPoints, k100Centers, k100Dim); \ BENCHMARK_KMC2(k1kPoints, k200Centers, k100Dim); \ BENCHMARK_KMC2(k1kPoints, k500Centers, k100Dim); \ BENCHMARK_KMC2(k1kPoints, k1kCenters, k100Dim); \ BENCHMARK_KMC2(k10kPoints, k100Centers, k100Dim); \ BENCHMARK_KMC2(k10kPoints, k200Centers, k100Dim); \ BENCHMARK_KMC2(k10kPoints, k500Centers, k100Dim); \ BENCHMARK_KMC2(k10kPoints, k1kCenters, k100Dim); \ BENCHMARK_KMC2(k1MPoints, k100Centers, k100Dim); \ BENCHMARK_KMC2(k1MPoints, k200Centers, k100Dim); \ BENCHMARK_KMC2(k1MPoints, k500Centers, k100Dim); \ BENCHMARK_KMC2(k1MPoints, k1kCenters, k100Dim) RUN_BM_KMC2Initialization; #undef RUN_BM_KMC2Initialization #undef BENCHMARK_KMC2 Graph* SetUpNearestNeighbors(int num_dims, int num_points, int num_centers, int k) { Graph* g = new Graph(OpRegistry::Global()); Tensor points(DT_FLOAT, TensorShape({num_points, num_dims})); Tensor centers(DT_FLOAT, TensorShape({num_centers, num_dims})); Tensor top(DT_INT64, TensorShape({})); points.flat<float>().setRandom(); centers.flat<float>().setRandom(); top.flat<int64_t>().setConstant(k); TF_CHECK_OK(NodeBuilder("nearest_centers_op", "NearestNeighbors") .Input(test::graph::Constant(g, points)) .Input(test::graph::Constant(g, centers)) .Input(test::graph::Constant(g, top)) .Finalize(g, nullptr )); return g; } template <int num_dims, int num_points, int num_centers, int k> void BM_NearestNeighbors(::testing::benchmark::State& state) { Graph* g = SetUpNearestNeighbors(num_dims, num_points, num_centers, k); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_points * num_dims * num_centers); } constexpr int kTop1 = 1; constexpr int kTop2 = 2; constexpr int kTop5 = 5; constexpr int kTop10 = 10; #define BENCHMARK_NEAREST_NEIGHBORS(d, p, c, k) \ void BM_NearestNeighbors##d##_##p##_##c##_##k( \ ::testing::benchmark::State& state) { \ BM_NearestNeighbors<d, p, c, k>(state); \ } \ BENCHMARK(BM_NearestNeighbors##d##_##p##_##c##_##k)->UseRealTime(); #define RUN_BM_NearestNeighbors(k) \ BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1kPoints, k100Centers, k); \ BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1kPoints, k1kCenters, k); \ BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1kPoints, k10kCenters, k); \ BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1MPoints, k100Centers, k); \ BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1MPoints, k1kCenters, k); \ BENCHMARK_NEAREST_NEIGHBORS(k100Dim, k1MPoints, k10kCenters, k) RUN_BM_NearestNeighbors(kTop1); RUN_BM_NearestNeighbors(kTop2); RUN_BM_NearestNeighbors(kTop5); RUN_BM_NearestNeighbors(kTop10); #undef RUN_BM_NearestNeighbors #undef BENCHMARK_NEAREST_NEIGHBORS } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/clustering_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/clustering_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
744292de-128b-4b43-b735-94d626f0875e
cpp
tensorflow/tensorflow
quantized_bias_add_op
tensorflow/core/kernels/quantized_bias_add_op.cc
tensorflow/core/kernels/quantized_bias_add_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <class T1, class T2, class T3> class QuantizedBiasAddOp : public OpKernel { public: explicit QuantizedBiasAddOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& bias = context->input(1); const Tensor& min_input = context->input(2); const Tensor& max_input = context->input(3); const Tensor& min_bias = context->input(4); const Tensor& max_bias = context->input(5); OP_REQUIRES( context, TensorShapeUtils::IsScalar(min_input.shape()), errors::InvalidArgument("`min_input` must be rank 0 but is rank ", min_input.dims())); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_input.shape()), errors::InvalidArgument("`max_input` must be rank 0 but is rank ", max_input.dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_bias.shape()), errors::InvalidArgument( "`min_bias` must be rank 0 but is rank ", min_bias.dims())); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_bias.shape()), errors::InvalidArgument( "`max_bias` must be rank 0 but is rank ", max_bias.dims())); const float input_min = min_input.flat<float>()(0); const float input_max = max_input.flat<float>()(0); const float bias_min = min_bias.flat<float>()(0); const float bias_max = max_bias.flat<float>()(0); OP_REQUIRES(context, TensorShapeUtils::IsMatrixOrHigher(input.shape()), errors::InvalidArgument("Input tensor must be at least 2D: ", input.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(bias.shape()), errors::InvalidArgument("Biases must be 1D: ", bias.shape().DebugString())); const auto last_dim = input.shape().dims() - 1; OP_REQUIRES( context, bias.shape().dim_size(0) == input.shape().dim_size(last_dim), errors::InvalidArgument( "Must provide as many biases as the last dimension " "of the input tensor: ", bias.shape().DebugString(), " vs. ", input.shape().DebugString())); OP_REQUIRES(context, bias.NumElements() > 0, errors::InvalidArgument("Must provide at least 1 bias")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); float total_min; float total_max; if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && std::is_same<T3, qint32>()) { auto input_ui8_array = input.flat<quint8>(); auto bias_ui8_array = bias.flat<quint8>(); GetOutputMinAndMaxForQuantizedAdd(input_min, input_max, bias_min, bias_max, &total_min, &total_max); meta::QuantizedBiasAdd(context, input_ui8_array.data(), input_ui8_array.size(), bias_ui8_array.data(), bias_ui8_array.size(), input_min, input_max, bias_min, bias_max, total_min, total_max, output->flat<qint32>().data()); } else { QuantizedAddUsingEigen<T1, T2, T3>( context->template eigen_device<CPUDevice>(), input, input_min, input_max, bias, bias_min, bias_max, output, &total_min, &total_max); } Tensor* output_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); output_min->flat<float>()(0) = total_min; Tensor* output_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_max->flat<float>()(0) = total_max; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedBiasAdd") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("out_type"), QuantizedBiasAddOp<quint8, quint8, qint32>); REGISTER_KERNEL_BUILDER(Name("QuantizedBiasAdd") .Device(DEVICE_CPU) .TypeConstraint<qint8>("T1") .TypeConstraint<qint8>("T2") .TypeConstraint<qint32>("out_type"), QuantizedBiasAddOp<qint8, qint8, qint32>); }
#define EIGEN_USE_THREADS #include <functional> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class QuantizedBiasAddTest : public OpsTestBase { protected: }; TEST_F(QuantizedBiasAddTest, Small) { TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = 0.0f; const float input_max = 60.0f; const int input_height = 2; const int input_width = 3; Tensor input_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>(&input_float, {10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); const float bias_min = 0.0f; const float bias_max = 3.0f; const int bias_width = 3; Tensor bias_float(DT_FLOAT, {bias_width}); test::FillValues<float>(&bias_float, {1.0f, 2.0f, 3.0f}); Tensor bias_quantized = FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); Tensor expected_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>(&expected_float, {11.0f, 22.0f, 33.0f, 41.0f, 52.0f, 63.0f}); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<quint8>(bias_quantized.shape(), bias_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {input_min}); AddInputFromArray<float>(TensorShape({}), {input_max}); AddInputFromArray<float>(TensorShape({}), {bias_min}); AddInputFromArray<float>(TensorShape({}), {bias_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.2); } TEST_F(QuantizedBiasAddTest, RealData) { TF_ASSERT_OK(NodeDefBuilder("quantized_bias_add_op", "QuantizedBiasAdd") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("out_type", DataTypeToEnum<qint32>::v()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = -2164.25f; const float input_max = 2006.27f; const int input_height = 1; const int input_width = 64; Tensor input_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>( &input_float, {-1014.12, -157.382, -810.17, 1435.28, 1016.37, 219.684, -316.054, -2164.25, 2006.27, -547.444, 857.376, 404.376, 9.72115, 332.588, 194.385, -286.57, 26.062, 23.1125, 110.436, 247.055, -127.683, -376.275, -124.81, -846.826, -77.1507, 305.581, -202.747, 12.9528, 9.64886, 872.686, 40.9069, 197.816, 44.16, -306.768, -1457.52, -368.939, -1049.42, -486.353, 1745.87, 95.7695, 395.773, -254.333, -404.27, 787.16, -2.44114, 199.37, -1024.08, 784.901, 235.055, -42.7295, 241.498, -245.365, 470.763, 186.159, 186.579, -220.163, 1304.58, 386.272, -358.853, -755.996, 360.109, -866.007, 55.2828, -508.801}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); const float bias_min = -0.739539f; const float bias_max = 0.641057f; const int bias_width = 64; Tensor bias_float(DT_FLOAT, {bias_width}); test::FillValues<float>( &bias_float, {-0.294619, -0.0670519, 0.261507, -0.126274, 0.127229, -0.176945, -0.251223, 0.231086, 0.453694, 0.415666, -0.288733, 0.508717, 0.211551, 0.0435907, -0.582383, -0.308779, 0.0696883, -0.438122, 0.114, 0.433964, 0.109883, 0.284931, -0.149661, 0.108657, 0.458333, -0.130231, -0.35805, -0.123206, -0.437968, 0.0282411, 0.628818, -0.0522173, -0.0233403, 0.124863, 0.217165, 0.262294, -0.171005, -0.254693, -0.200433, -0.287354, 0.488166, -0.0354688, -0.118091, -0.590444, 0.491537, -0.739539, 0.083117, 0.282482, 0.275269, -0.36574, 0.107476, 0.0511428, -0.136887, -0.0149852, -0.259694, 0.641057, 0.264054, -0.295126, -0.0218791, 0.361211, 0.012448, 0.0709718, -0.392394, -0.434215}); Tensor bias_quantized = FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); Tensor expected_float(DT_FLOAT, {input_height, input_width}); test::FillValues<float>( &expected_float, {-1014.42, -157.449, -809.908, 1435.16, 1016.5, 219.507, -316.305, -2164.02, 2006.73, -547.028, 857.088, 404.885, 9.9327, 332.632, 193.803, -286.878, 26.1317, 22.6744, 110.55, 247.489, -127.573, -375.99, -124.959, -846.717, -76.6923, 305.451, -203.105, 12.8296, 9.21089, 872.714, 41.5357, 197.764, 44.1367, -306.643, -1457.3, -368.677, -1049.6, -486.608, 1745.67, 95.4821, 396.261, -254.368, -404.388, 786.57, -1.94961, 198.63, -1024.0, 785.183, 235.33, -43.0953, 241.605, -245.314, 470.627, 186.144, 186.319, -219.522, 1304.84, 385.977, -358.874, -755.635, 360.122, -865.936, 54.8904, -509.235}); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<quint8>(bias_quantized.shape(), bias_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({}), {input_min}); AddInputFromArray<float>(TensorShape({}), {input_max}); AddInputFromArray<float>(TensorShape({}), {bias_min}); AddInputFromArray<float>(TensorShape({}), {bias_max}); TF_ASSERT_OK(RunOpKernel()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 20.0); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_bias_add_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_bias_add_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fa6d2b11-f649-4767-8a92-432331a7711b
cpp
tensorflow/tensorflow
variable_ops
tensorflow/compiler/tf2xla/kernels/variable_ops.cc
tensorflow/c/experimental/saved_model/core/ops/variable_ops_test.cc
#include <functional> #include <utility> #include "absl/status/status.h" #include "tensorflow/compiler/tf2xla/kernels/gather_op_helpers.h" #include "tensorflow/compiler/tf2xla/kernels/shape_util.h" #include "tensorflow/compiler/tf2xla/lib/scatter.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/compiler/tf2xla/xla_resource.h" #include "xla/hlo/builder/lib/slicing.h" #include "xla/hlo/builder/xla_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/resource_variable_util.h" #include "tensorflow/core/kernels/scatter_nd_util.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { Status ValidateAssignUpdateVariableOpShapes(XlaOpKernelContext* ctx) { DataType variable_dtype; TensorShape variable_shape; TensorShape value_shape = ctx->InputShape(1); TF_RETURN_IF_ERROR( ctx->GetVariableTypeAndShape(0, &variable_dtype, &variable_shape)); TF_RETURN_IF_ERROR( ValidateAssignUpdateVariableOpShapes(variable_shape, value_shape)); return absl::OkStatus(); } class VarIsInitializedOp : public XlaOpKernel { public: explicit VarIsInitializedOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { XlaResource* variable; OP_REQUIRES_OK(ctx, ctx->GetResourceInput(0, &variable)); ctx->SetOutput( 0, xla::ConstantR0<bool>(ctx->builder(), variable->initialized())); } }; REGISTER_XLA_OP(Name("VarIsInitializedOp"), VarIsInitializedOp); class VariableShapeOp : public XlaOpKernel { public: explicit VariableShapeOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("out_type", &out_dtype_)); } void Compile(XlaOpKernelContext* ctx) override { DataType variable_dtype; TensorShape shape; OP_REQUIRES_OK(ctx, ctx->GetVariableTypeAndShape(0, &variable_dtype, &shape)); Tensor shape_constant(out_dtype_, TensorShape({shape.dims()})); OP_REQUIRES_OK(ctx, TensorShapeToConstant(shape, &shape_constant)); ctx->SetConstantOutput(0, shape_constant); } private: DataType out_dtype_; }; REGISTER_XLA_OP(Name("VariableShape").CompilationOnly().IsMetadataOp(), VariableShapeOp); class ReadVariableOp : public XlaOpKernel { public: explicit ReadVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("dtype", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { xla::XlaOp handle; OP_REQUIRES_OK( ctx, ctx->ReadVariableInput(0, dtype_, nullptr, &handle)); ctx->SetOutput(0, handle); } private: DataType dtype_; }; REGISTER_XLA_OP(Name("ReadVariableOp").CompilationOnly(), ReadVariableOp); class AssignVariableOp : public XlaOpKernel { public: explicit AssignVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, ctx->input_type(1), ctx->Input(1))); } }; REGISTER_XLA_OP(Name("AssignVariableOp").CompilationOnly(), AssignVariableOp); class AssignAddVariableOp : public XlaOpKernel { public: explicit AssignAddVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { DataType type = ctx->input_type(1); xla::XlaOp handle; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, nullptr, &handle)); OP_REQUIRES_OK(ctx, ValidateAssignUpdateVariableOpShapes(ctx)); handle = xla::Add(handle, ctx->Input(1)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle)); } }; REGISTER_XLA_OP( Name("AssignAddVariableOp").TypeConstraint("dtype", kNumericTypes), AssignAddVariableOp); class AssignSubVariableOp : public XlaOpKernel { public: explicit AssignSubVariableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { DataType type = ctx->input_type(1); xla::XlaOp handle; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, nullptr, &handle)); OP_REQUIRES_OK(ctx, ValidateAssignUpdateVariableOpShapes(ctx)); handle = xla::Sub(handle, ctx->Input(1)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle)); } }; REGISTER_XLA_OP( Name("AssignSubVariableOp").TypeConstraint("dtype", kNumericTypes), AssignSubVariableOp); class ResourceGatherOp : public XlaOpKernel { public: explicit ResourceGatherOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("batch_dims", &batch_dims_)); } void Compile(XlaOpKernelContext* ctx) override { DataType type = ctx->expected_output_dtype(0); TensorShape input_shape; xla::XlaOp input; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &input_shape, &input)); xla::XlaOp gather; OP_REQUIRES_OK(ctx, XlaGatherWithBatchDimsOpImpl(ctx, input, input_shape, batch_dims_, &gather)); ctx->SetOutput(0, gather); } private: int32 batch_dims_; }; REGISTER_XLA_OP(Name("ResourceGather"), ResourceGatherOp); class ResourceScatterOp : public XlaOpKernel { public: explicit ResourceScatterOp( OpKernelConstruction* context, bool indices_are_vectors, std::function<xla::XlaOp(const xla::XlaOp&, const xla::XlaOp&, xla::XlaBuilder*)> combiner) : XlaOpKernel(context), indices_are_vectors_(indices_are_vectors), combiner_(std::move(combiner)) {} void Compile(XlaOpKernelContext* context) override { xla::XlaBuilder* builder = context->builder(); DataType dtype = context->input_type(2); TensorShape var_shape; xla::XlaOp var_value; OP_REQUIRES_OK( context, context->ReadVariableInput(0, dtype, &var_shape, &var_value)); if (indices_are_vectors_) { OP_REQUIRES_OK(context, ValidateScatterNdUpdateShape( var_shape, context->InputShape(1), context->InputShape(2))); } const xla::XlaOp indices = context->Input(1); const xla::XlaOp updates = context->Input(2); auto result = XlaScatter(var_value, updates, indices, indices_are_vectors_, false, combiner_, builder); OP_REQUIRES_OK(context, result.status()); OP_REQUIRES_OK(context, context->AssignVariable(0, dtype, result.value())); } private: const bool indices_are_vectors_; const std::function<xla::XlaOp(const xla::XlaOp&, const xla::XlaOp&, xla::XlaBuilder*)> combiner_; }; class ResourceScatterAddOp : public ResourceScatterOp { public: explicit ResourceScatterAddOp(OpKernelConstruction* context) : ResourceScatterOp(context, false, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Add(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterAdd"), ResourceScatterAddOp); class ResourceScatterSubOp : public ResourceScatterOp { public: explicit ResourceScatterSubOp(OpKernelConstruction* context) : ResourceScatterOp(context, false, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Sub(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterSub"), ResourceScatterSubOp); class ResourceScatterMulOp : public ResourceScatterOp { public: explicit ResourceScatterMulOp(OpKernelConstruction* context) : ResourceScatterOp(context, false, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Mul(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterMul"), ResourceScatterMulOp); class ResourceScatterDivOp : public ResourceScatterOp { public: explicit ResourceScatterDivOp(OpKernelConstruction* context) : ResourceScatterOp(context, false, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Div(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterDiv"), ResourceScatterDivOp); class ResourceScatterMinOp : public ResourceScatterOp { public: explicit ResourceScatterMinOp(OpKernelConstruction* context) : ResourceScatterOp(context, false, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Min(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterMin"), ResourceScatterMinOp); class ResourceScatterMaxOp : public ResourceScatterOp { public: explicit ResourceScatterMaxOp(OpKernelConstruction* context) : ResourceScatterOp(context, false, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Max(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterMax"), ResourceScatterMaxOp); class ResourceScatterUpdateOp : public ResourceScatterOp { public: explicit ResourceScatterUpdateOp(OpKernelConstruction* context) : ResourceScatterOp(context, false, {}) {} }; REGISTER_XLA_OP(Name("ResourceScatterUpdate"), ResourceScatterUpdateOp); class ResourceScatterNdUpdateOp : public ResourceScatterOp { public: explicit ResourceScatterNdUpdateOp(OpKernelConstruction* context) : ResourceScatterOp(context, true, {}) {} }; REGISTER_XLA_OP(Name("ResourceScatterNdUpdate"), ResourceScatterNdUpdateOp); class ResourceScatterNdAddOp : public ResourceScatterOp { public: explicit ResourceScatterNdAddOp(OpKernelConstruction* context) : ResourceScatterOp(context, true, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Add(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterNdAdd"), ResourceScatterNdAddOp); class ResourceScatterNdSubOp : public ResourceScatterOp { public: explicit ResourceScatterNdSubOp(OpKernelConstruction* context) : ResourceScatterOp(context, true, Combine) {} private: static xla::XlaOp Combine(const xla::XlaOp x, const xla::XlaOp y, xla::XlaBuilder* builder) { return xla::Sub(x, y); } }; REGISTER_XLA_OP(Name("ResourceScatterNdSub"), ResourceScatterNdSubOp); } }
#include "tensorflow/c/experimental/saved_model/core/ops/variable_ops.h" #include <memory> #include "tensorflow/c/eager/immediate_execution_tensor_handle.h" #include "tensorflow/c/experimental/saved_model/core/test_utils.h" #include "tensorflow/c/tensor_interface.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/eager/context.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { ImmediateTensorHandlePtr CreateScalarTensorHandle(EagerContext* context, float value) { AbstractTensorPtr tensor(context->CreateFloatScalar(value)); ImmediateTensorHandlePtr handle(context->CreateLocalHandle(tensor.get())); return handle; } class VariableOpsTest : public ::testing::Test { public: VariableOpsTest() : device_mgr_(testing::CreateTestingDeviceMgr()), ctx_(testing::CreateTestingEagerContext(device_mgr_.get())) {} EagerContext* context() { return ctx_.get(); } private: std::unique_ptr<StaticDeviceMgr> device_mgr_; EagerContextPtr ctx_; }; TEST_F(VariableOpsTest, CreateVariableSuccessful) { ImmediateTensorHandlePtr handle; TF_EXPECT_OK(internal::CreateUninitializedResourceVariable( context(), DT_FLOAT, {}, nullptr, &handle)); EXPECT_EQ(handle->DataType(), DT_RESOURCE); } TEST_F(VariableOpsTest, DestroyVariableSuccessful) { ImmediateTensorHandlePtr handle; TF_EXPECT_OK(internal::CreateUninitializedResourceVariable( context(), DT_FLOAT, {}, nullptr, &handle)); TF_EXPECT_OK(internal::DestroyResource(context(), handle.get())); } TEST_F(VariableOpsTest, AssignVariableAndReadSuccessful) { ImmediateTensorHandlePtr variable; TF_EXPECT_OK(internal::CreateUninitializedResourceVariable( context(), DT_FLOAT, {}, nullptr, &variable)); ImmediateTensorHandlePtr my_value = CreateScalarTensorHandle(context(), 42.0); TF_EXPECT_OK(internal::AssignVariable(context(), variable.get(), DT_FLOAT, my_value.get())); ImmediateTensorHandlePtr read_value_handle; TF_EXPECT_OK(internal::ReadVariable(context(), variable.get(), DT_FLOAT, &read_value_handle)); Status status; AbstractTensorPtr read_value(read_value_handle->Resolve(&status)); TF_EXPECT_OK(status); EXPECT_FLOAT_EQ(42.0, *static_cast<float*>(read_value->Data())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/variable_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/saved_model/core/ops/variable_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8bb040b7-ee6d-4626-b127-24058237c255
cpp
tensorflow/tensorflow
string_util
tensorflow/compiler/mlir/tensorflow/utils/string_util.cc
tensorflow/lite/string_util_test.cc
#include "tensorflow/compiler/mlir/tensorflow/utils/string_util.h" #include <ostream> #include <string> #include "llvm/Support/raw_ostream.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Operation.h" namespace tensorflow { std::string OpAsString(mlir::Operation& op) { std::string out; llvm::raw_string_ostream op_stream(out); op.print(op_stream, mlir::OpPrintingFlags() .elideLargeElementsAttrs() .assumeVerified() .skipRegions() .printGenericOpForm()); return out; } std::string AttrAsString(mlir::Attribute& attr) { std::string out; llvm::raw_string_ostream attr_stream(out); attr.print(attr_stream); return out; } std::ostream& operator<<(std::ostream& o, const LoggableOperation& op) { return o << OpAsString(op.v); } std::ostream& operator<<(std::ostream& o, const LoggableAttribute& attr) { return o << AttrAsString(attr.v); } std::ostream& operator<<(std::ostream& o, const LoggableStringRef& ref) { return o << ref.v.str(); } }
#include "tensorflow/lite/string_util.h" #include <stdint.h> #include <string> #include <gtest/gtest.h> #include "tensorflow/lite/core/c/c_api_types.h" #include "tensorflow/lite/core/interpreter.h" #include "tensorflow/lite/string_type.h" namespace tflite { TEST(StringUtil, TestStringUtil) { Interpreter interpreter; interpreter.AddTensors(3); TfLiteTensor* t0 = interpreter.tensor(0); t0->type = kTfLiteString; t0->allocation_type = kTfLiteDynamic; TfLiteTensor* t1 = interpreter.tensor(1); t1->type = kTfLiteString; t1->allocation_type = kTfLiteDynamic; union { char raw_bytes[15]; struct { int32_t num_strs; int32_t offsets[2]; char str_data[3]; } tensor_data; } data; data.tensor_data = {1, {12, 15}, {'X', 'Y', 'Z'}}; TfLiteQuantization quant; quant.type = kTfLiteNoQuantization; quant.params = nullptr; interpreter.SetTensorParametersReadOnly( 2, kTfLiteString, "", {1}, quant, data.raw_bytes, sizeof(data.raw_bytes)); TfLiteTensor* t2 = interpreter.tensor(2); ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk); char s0[] = "ABC"; string s1 = "DEFG"; char s2[] = ""; DynamicBuffer buf0; ASSERT_EQ(buf0.AddString(s0, 3), kTfLiteOk); DynamicBuffer buf1; ASSERT_EQ(buf1.AddString(s1.data(), s1.length()), kTfLiteOk); ASSERT_EQ(buf0.AddString(s2, 0), kTfLiteOk); auto new_shape = TfLiteIntArrayCreate(2); new_shape->data[0] = 2; new_shape->data[1] = 1; buf0.WriteToTensor(t0, new_shape); buf1.WriteToTensorAsVector(t1); EXPECT_EQ(t0->dims->size, 2); EXPECT_EQ(t0->dims->data[0], 2); EXPECT_EQ(t0->dims->data[1], 1); EXPECT_EQ(t1->dims->size, 1); EXPECT_EQ(t1->dims->data[0], 1); ASSERT_EQ(GetStringCount(t0), 2); StringRef str_ref; str_ref = GetString(t0, 0); ASSERT_EQ(string(str_ref.str, str_ref.len), "ABC"); str_ref = GetString(t0, 1); ASSERT_EQ(string(str_ref.str, str_ref.len), ""); ASSERT_EQ(t0->bytes, 19); ASSERT_EQ(GetStringCount(t1), 1); str_ref = GetString(t1, 0); ASSERT_EQ(string(str_ref.str, str_ref.len), "DEFG"); ASSERT_EQ(t1->bytes, 16); ASSERT_EQ(GetStringCount(t2), 1); str_ref = GetString(t2, 0); ASSERT_EQ(string(str_ref.str, str_ref.len), "XYZ"); ASSERT_EQ(t2->bytes, 15); } TEST(StringUtil, AddStringOverflow32Length) { const size_t max_size = 100; DynamicBuffer buf{max_size}; std::string big_string(max_size + 1, 'A'); ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}), kTfLiteError); } TEST(StringUtil, AddStringToFullBufferOverflow32Length) { const size_t max_size = 100; DynamicBuffer buf{max_size}; std::string big_string((max_size / 2) + 1, 'A'); ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}), kTfLiteOk); EXPECT_EQ(buf.AddString({big_string.data(), big_string.length()}), kTfLiteError); } TEST(StringUtil, TruncatesCharDataToLen) { Interpreter interpreter; interpreter.AddTensors(1); TfLiteTensor* t0 = interpreter.tensor(0); t0->type = kTfLiteString; t0->allocation_type = kTfLiteDynamic; DynamicBuffer buf; char fake_big[] = "ABCADASDA"; ASSERT_EQ(buf.AddString({fake_big, 3}), kTfLiteOk); buf.WriteToTensorAsVector(t0); StringRef added_string = GetString(t0, 0); EXPECT_EQ(added_string.len, 3); EXPECT_EQ(string(added_string.str, 3), "ABC"); } TEST(StringUtil, TestAddJoinedStringCharSeparator) { Interpreter interpreter; interpreter.AddTensors(1); TfLiteTensor* t0 = interpreter.tensor(0); t0->type = kTfLiteString; t0->allocation_type = kTfLiteDynamic; char s0[] = ""; char s1[] = "ABC"; char s2[] = "DEFG"; char s3[] = ""; char s4[] = "XYZ"; DynamicBuffer buf; buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, ' '); buf.WriteToTensorAsVector(t0); ASSERT_EQ(GetStringCount(t0), 1); StringRef str_ref; str_ref = GetString(t0, 0); ASSERT_EQ(string(str_ref.str, str_ref.len), " ABC DEFG XYZ"); ASSERT_EQ(t0->bytes, 26); } TEST(StringUtil, TestAddJoinedStringStringRefSeparator) { Interpreter interpreter; interpreter.AddTensors(1); TfLiteTensor* t0 = interpreter.tensor(0); t0->type = kTfLiteString; t0->allocation_type = kTfLiteDynamic; char s[] = " - "; char s0[] = ""; char s1[] = "ABC"; char s2[] = "DEFG"; char s3[] = ""; char s4[] = "XYZ"; DynamicBuffer buf; buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, {s, 3}); buf.WriteToTensorAsVector(t0); ASSERT_EQ(GetStringCount(t0), 1); StringRef str_ref; str_ref = GetString(t0, 0); ASSERT_EQ(string(str_ref.str, str_ref.len), " - ABC - DEFG - - XYZ"); ASSERT_EQ(t0->bytes, 34); } TEST(StringUtil, TestEmptyList) { Interpreter interpreter; interpreter.AddTensors(1); TfLiteTensor* t0 = interpreter.tensor(0); t0->type = kTfLiteString; t0->allocation_type = kTfLiteDynamic; DynamicBuffer buf; buf.WriteToTensorAsVector(t0); ASSERT_EQ(GetStringCount(t0), 0); ASSERT_EQ(t0->bytes, 8); } TEST(StringUtil, TestShapes) { Interpreter interpreter; interpreter.AddTensors(1); TfLiteTensor* t0 = interpreter.tensor(0); t0->type = kTfLiteString; t0->allocation_type = kTfLiteDynamic; t0->dims = TfLiteIntArrayCreate(2); t0->dims->data[0] = 2; t0->dims->data[1] = 1; DynamicBuffer buf; buf.AddString("ABC", 3); buf.AddString("X", 1); buf.WriteToTensor(t0, nullptr); ASSERT_EQ(t0->dims->size, 2); EXPECT_EQ(t0->dims->data[0], 2); EXPECT_EQ(t0->dims->data[1], 1); auto new_shape = TfLiteIntArrayCreate(2); new_shape->data[0] = 1; new_shape->data[1] = 2; buf.WriteToTensor(t0, new_shape); ASSERT_EQ(t0->dims->size, 2); EXPECT_EQ(t0->dims->data[0], 1); EXPECT_EQ(t0->dims->data[1], 2); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/string_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/string_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c351440d-fff6-4367-a193-998c9dc9bbaf
cpp
tensorflow/tensorflow
control_flow_ops
tensorflow/core/ops/control_flow_ops.cc
tensorflow/core/ops/control_flow_ops_test.cc
#include <vector> #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::InferenceContext; using shape_inference::ShapeHandle; namespace { Status SwitchShape(InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); ShapeHandle out = c->input(0); c->set_output(0, out); c->set_output(1, out); auto* handle_data = c->input_handle_shapes_and_types(0); if (handle_data != nullptr) { c->set_output_handle_shapes_and_types(0, *handle_data); c->set_output_handle_shapes_and_types(1, *handle_data); } return absl::OkStatus(); } Status SwitchNShape(InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); ShapeHandle out = c->input(0); int num_outs; TF_RETURN_IF_ERROR(c->GetAttr("num_outs", &num_outs)); for (int i = 0; i < num_outs; i++) { c->set_output(i, out); } auto* handle_data = c->input_handle_shapes_and_types(0); if (handle_data != nullptr) { for (int i = 0; i < num_outs; i++) { c->set_output_handle_shapes_and_types(i, *handle_data); } } return absl::OkStatus(); } } REGISTER_OP("Switch") .Input("data: T") .Input("pred: bool") .Output("output_false: T") .Output("output_true: T") .Attr("T: type") .SetForwardTypeFn(full_type::ReplicateInput(0, 2)) .SetShapeFn(SwitchShape); REGISTER_OP("RefSwitch") .Input("data: Ref(T)") .Input("pred: bool") .Output("output_false: Ref(T)") .Output("output_true: Ref(T)") .Attr("T: type") .SetAllowsUninitializedInput() .SetShapeFn(SwitchShape); REGISTER_OP("_SwitchN") .Input("data: T") .Input("output_index: int32") .Output("outputs: num_outs * T") .Attr("num_outs: int >= 1") .Attr("T: type") .SetShapeFn(SwitchNShape); REGISTER_OP("RefSelect") .Input("index: int32") .Input("inputs: Ref(N * T)") .Output("output: Ref(T)") .Attr("T: type") .Attr("N: int >= 1") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused)); ShapeHandle first_input = c->input(1); if (!c->FullyDefined(first_input)) { c->set_output(0, c->UnknownShape()); return absl::OkStatus(); } for (int i = 2; i < c->num_inputs(); ++i) { ShapeHandle input = c->input(i); if (!c->FullyDefined(input) || !c->Merge(first_input, input, &unused).ok()) { c->set_output(0, c->UnknownShape()); return absl::OkStatus(); } } c->set_output(0, first_input); return absl::OkStatus(); }); namespace { Status MergeShape(InferenceContext* c) { ShapeHandle out = c->input(0); if (!c->RankKnown(out)) { out = c->UnknownShape(); } else { int32_t rank = c->Rank(out); for (int i = 1; i < c->num_inputs(); ++i) { ShapeHandle input = c->input(i); if (!c->RankKnown(input) || c->Rank(input) != rank) { out = c->UnknownShape(); break; } for (int d = 0; d < rank; ++d) { if (c->Value(c->Dim(input, d)) != c->Value(c->Dim(out, d))) { TF_RETURN_IF_ERROR(c->ReplaceDim(out, d, c->UnknownDim(), &out)); } } } } c->set_output(0, out); c->set_output(1, c->Scalar()); return absl::OkStatus(); } TypeInferenceFn MergeTypeFn() { std::vector<TypeInferenceFn> func_list{full_type::Merge(), full_type::Tensor(TFT_INT32)}; return full_type::Tuple(func_list); } } REGISTER_OP("Merge") .Input("inputs: N * T") .Output("output: T") .Output("value_index: int32") .Attr("T: type") .Attr("N: int >= 1") .SetForwardTypeFn(MergeTypeFn()) .SetShapeFn(MergeShape); REGISTER_OP("RefMerge") .Input("inputs: Ref(N * T)") .Output("output: Ref(T)") .Output("value_index: int32") .Attr("T: type") .Attr("N: int >= 1") .SetShapeFn(MergeShape); REGISTER_OP("Enter") .Input("data: T") .Output("output: T") .Attr("T: type") .Attr("frame_name: string") .Attr("is_constant: bool = false") .Attr("parallel_iterations: int = 10") .SetForwardTypeFn(full_type::ReplicateInput()) .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->UnknownShape()); auto* handle_data = c->input_handle_shapes_and_types(0); if (handle_data != nullptr) { c->set_output_handle_shapes_and_types(0, *handle_data); } bool is_constant; TF_RETURN_IF_ERROR(c->GetAttr("is_constant", &is_constant)); if (is_constant) { c->set_output(0, c->input(0)); } return absl::OkStatus(); }); REGISTER_OP("RefEnter") .Input("data: Ref(T)") .Output("output: Ref(T)") .Attr("T: type") .Attr("frame_name: string") .Attr("is_constant: bool = false") .Attr("parallel_iterations: int = 10") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("Exit") .Input("data: T") .Output("output: T") .Attr("T: type") .SetForwardTypeFn(full_type::ReplicateInput()) .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("RefExit") .Input("data: Ref(T)") .Output("output: Ref(T)") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("NextIteration") .Input("data: T") .Output("output: T") .Attr("T: type") .SetForwardTypeFn(full_type::ReplicateInput()) .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("RefNextIteration") .Input("data: Ref(T)") .Output("output: Ref(T)") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("LoopCond") .Input("input: bool") .Output("output: bool") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRank(c, 0); }); REGISTER_OP("ControlTrigger").SetShapeFn(shape_inference::NoOutputs); REGISTER_OP("Abort") .Attr("error_msg: string = ''") .Attr("exit_without_error: bool = false") .SetShapeFn(shape_inference::NoOutputs); }
#include <memory> #include "tensorflow/core/common_runtime/type_inference.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(ControlFlowOpsTest, Merge_ShapeFn) { ShapeInferenceTestOp op("Merge"); int n = 3; std::vector<NodeDefBuilder::NodeOut> src_list; src_list.reserve(n); for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT); TF_ASSERT_OK(NodeDefBuilder("test", "Merge") .Input(src_list) .Attr("N", n) .Finalize(&op.node_def)); INFER_OK(op, "?;?;?", "?;[]"); INFER_OK(op, "[2,1];?;[2,1]", "?;[]"); INFER_OK(op, "[2,1];[2,1];?", "?;[]"); INFER_OK(op, "[2,1];[2,1];[3,1,2]", "?;[]"); INFER_OK(op, "[2,1];[2,1];[3,1]", "[?,d0_1];[]"); INFER_OK(op, "[2,1];[2,2];[3,1]", "[?,?];[]"); INFER_OK(op, "[2,1];[2,1];[2,1]", "in0;[]"); } TEST(ControlFlowOpsTest, SwitchN_ShapeFn) { ShapeInferenceTestOp op("_SwitchN"); int n = 5; TF_ASSERT_OK(NodeDefBuilder("test", "_SwitchN") .Input({"d", 0, DT_FLOAT}) .Input({"bi", 0, DT_INT32}) .Attr("num_outs", n) .Finalize(&op.node_def)); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[1]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?]"); INFER_OK(op, "?;?", "in0;in0;in0;in0;in0"); INFER_OK(op, "[2,?];?", "in0;in0;in0;in0;in0"); INFER_OK(op, "[2,?];[]", "in0;in0;in0;in0;in0"); INFER_OK(op, "[2,3];[]", "in0;in0;in0;in0;in0"); } TEST(ControlFlowOpsTest, RefSelect_ShapeFn) { ShapeInferenceTestOp op("RefSelect"); int n = 3; std::vector<NodeDefBuilder::NodeOut> src_list; src_list.reserve(n); for (int i = 0; i < n; ++i) src_list.emplace_back("a", 1, DT_FLOAT_REF); TF_ASSERT_OK(NodeDefBuilder("test", "RefSelect") .Input("index", 0, DT_INT32) .Input(src_list) .Attr("N", n) .Finalize(&op.node_def)); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];?;?;?"); INFER_OK(op, "?;?;?;?", "?"); INFER_OK(op, "[];?;?;?", "?"); INFER_OK(op, "[];[1,2,3];?;?", "?"); INFER_OK(op, "[];[1,2,3];[1,2,?];[1,2,3]", "?"); INFER_OK(op, "[];[1,2,3];[1,2];[1,2,3]", "?"); INFER_OK(op, "[];[1,2,3];[1,2,4];[1,2,3]", "?"); INFER_OK(op, "[];[1,2,3];[1,2,3];[1,2,3]", "in1"); } static Status type_inference(Graph& graph) { GraphOptimizationPassOptions opt_options; std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global())); graph_ptr->Copy(graph); opt_options.graph = &graph_ptr; opt_options.flib_def = graph.mutable_flib_def(); TypeInferencePass pass; return pass.Run(opt_options); } REGISTER_OP("ControlFlowOpsTest>ConstTypeCtor") .Output("output: dtype") .Attr("value: tensor") .Attr("dtype: type") .SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype")) .SetShapeFn(shape_inference::UnknownShape); TEST(ControlFlowOpsTest, Merge_TypeInfrnc) { Graph graph(OpRegistry::Global()); Node* input_tensor_op1; TensorProto tensor_proto1; TF_EXPECT_OK( NodeBuilder("input_tensor_op1", "ControlFlowOpsTest>ConstTypeCtor") .Attr("value", tensor_proto1) .Attr("dtype", DT_FLOAT) .Finalize(&graph, &input_tensor_op1)); Node* input_tensor_op2; TensorProto tensor_proto2; TF_EXPECT_OK( NodeBuilder("input_tensor_op2", "ControlFlowOpsTest>ConstTypeCtor") .Attr("value", tensor_proto2) .Attr("dtype", DT_FLOAT) .Finalize(&graph, &input_tensor_op2)); Node* shape_op; TF_EXPECT_OK(NodeBuilder("merge_op", "Merge") .Input({input_tensor_op1, input_tensor_op2}) .Attr("T", DT_FLOAT) .Finalize(&graph, &shape_op)); TF_EXPECT_OK(type_inference(graph)); FullTypeDef expected_shape_op_t; protobuf::TextFormat::Parser parser; CHECK(parser.ParseFromString( R"pb(type_id: TFT_PRODUCT args { type_id: TFT_TENSOR args { type_id: TFT_FLOAT } } args { type_id: TFT_TENSOR args { type_id: TFT_INT32 } })pb", &expected_shape_op_t)); EXPECT_TRUE(full_type::IsEqual(shape_op->def().experimental_type(), expected_shape_op_t)) << "fulltype is\n" << shape_op->def().experimental_type().DebugString() << "\nexpected\n" << expected_shape_op_t.DebugString(); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/control_flow_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/control_flow_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ea1e268f-3784-4aec-8ec6-1cd57ff2d4cd
cpp
tensorflow/tensorflow
ragged_fill_empty_rows_op
tensorflow/core/kernels/ragged_fill_empty_rows_op.cc
tensorflow/core/kernels/ragged_fill_empty_rows_op_test.cc
#define EIGEN_USE_THREADS #include <algorithm> #include <numeric> #include <unordered_map> #include <utility> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/fill_empty_rows_functor.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { using CPUDevice = Eigen::ThreadPoolDevice; using GPUDevice = Eigen::GpuDevice; namespace { template <typename Device, typename T, typename Tindex> void RaggedFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { if (!done) { done = [] {}; } const int kValueRowidsInput = 0; const int kValuesInput = 1; const int kNRowsInput = 2; const int kDefaultValueInput = 3; const Tensor& value_rowids_t = context->input(kValueRowidsInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& nrows_t = context->input(kNRowsInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsScalar(nrows_t.shape()), errors::InvalidArgument("nrows must be a scalar, saw: ", nrows_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(value_rowids_t.shape()), errors::InvalidArgument("value_rowids must be a vector, saw: ", value_rowids_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, value_rowids_t.dim_size(0) == values_t.dim_size(0), errors::InvalidArgument( "The length of `values` (", values_t.dim_size(0), ") must match the first dimension of `value_rowids` (", value_rowids_t.dim_size(0), ")."), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); using FunctorType = functor::FillEmptyRows<Device, T, Tindex, true>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, value_rowids_t, values_t, nrows_t, done), done); } } template <typename Device, typename T, typename Tindex> class RaggedFillEmptyRowsOp : public OpKernel { public: explicit RaggedFillEmptyRowsOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { RaggedFillEmptyRowsOpImpl<Device, T, Tindex>(context); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("RaggedFillEmptyRows") \ .Device(DEVICE_##D) \ .HostMemory("nrows") \ .TypeConstraint<T>("T"), \ RaggedFillEmptyRowsOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_ALL_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T, typename Tindex> class RaggedFillEmptyRowsGPUOp : public AsyncOpKernel { public: explicit RaggedFillEmptyRowsGPUOp(OpKernelConstruction* context) : AsyncOpKernel(context) {} void ComputeAsync(OpKernelContext* context, DoneCallback done) override { RaggedFillEmptyRowsOpImpl<GPUDevice, T, Tindex>(context, done); } }; #define REGISTER_KERNELS(T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("RaggedFillEmptyRows") \ .Device(DEVICE_GPU) \ .HostMemory("nrows") \ .TypeConstraint<T>("T"), \ RaggedFillEmptyRowsGPUOp<T, Tindex>) #define REGISTER_KERNELS_TINDEX(T) REGISTER_KERNELS(T, int64) TF_CALL_POD_TYPES(REGISTER_KERNELS_TINDEX) #undef REGISTER_KERNELS_TINDEX #undef REGISTER_KERNELS #endif template <typename Device, typename T, typename Tindex> class RaggedFillEmptyRowsGradOp : public OpKernel { public: explicit RaggedFillEmptyRowsGradOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor* reverse_index_map_t; const Tensor* grad_values_t; OP_REQUIRES_OK(context, context->input("reverse_index_map", &reverse_index_map_t)); OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t)); OP_REQUIRES( context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()), errors::InvalidArgument("grad_values must be a vector, saw: ", grad_values_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec<Tindex>(); const auto grad_values = grad_values_t->vec<T>(); const Tindex N = reverse_index_map_t->shape().dim_size(0); Tensor* d_values_t; OP_REQUIRES_OK(context, context->allocate_output( "d_values", TensorShape({N}), &d_values_t)); auto d_values = d_values_t->vec<T>(); Tensor* d_default_value_t; OP_REQUIRES_OK(context, context->allocate_output("d_default_value", TensorShape({}), &d_default_value_t)); auto d_default_value = d_default_value_t->scalar<T>(); OP_REQUIRES_OK(context, functor::FillEmptyRowsGrad<Device, T, Tindex>()( context, reverse_index_map, grad_values, d_values, d_default_value)); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("RaggedFillEmptyRowsGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T"), \ RaggedFillEmptyRowsGradOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T, int64) TF_CALL_REAL_NUMBER_TYPES(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #endif #undef REGISTER_KERNELS }
#include <gtest/gtest.h> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class RaggedFillEmptyRowsOpTest : public ::tensorflow::OpsTestBase { protected: const int kValueRowidsOutput = 0; const int kValuesOutput = 1; const int kEmptyRowIndicatorOutput = 2; const int kReverseIndexMapOutput = 3; template <typename T> void BuildFillEmptyRowsGraph() { const auto& dtype = DataTypeToEnum<T>::v(); const auto& dtype_int64 = DataTypeToEnum<int64_t>::v(); TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedFillEmptyRows") .Input(FakeInput(dtype_int64)) .Input(FakeInput(dtype)) .Input(FakeInput(dtype_int64)) .Input(FakeInput(dtype)) .Attr("T", dtype) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(RaggedFillEmptyRowsOpTest, IntValues) { BuildFillEmptyRowsGraph<int>(); AddInputFromArray<int64_t>(TensorShape({4}), {1, 2, 2, 5}); AddInputFromArray<int>(TensorShape({4}), {2, 4, 6, 8}); AddInputFromArray<int64_t>(TensorShape({}), {7}); AddInputFromArray<int>(TensorShape({}), {-1}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>( *GetOutput(kValueRowidsOutput), test::AsTensor<int64_t>({0, 1, 2, 2, 3, 4, 5, 6})); test::ExpectTensorEqual<int>( *GetOutput(kValuesOutput), test::AsTensor<int>({-1, 2, 4, 6, -1, -1, 8, -1})); } TEST_F(RaggedFillEmptyRowsOpTest, FloatValues) { BuildFillEmptyRowsGraph<float>(); AddInputFromArray<int64_t>(TensorShape({4}), {1, 2, 2, 5}); AddInputFromArray<float>(TensorShape({4}), {2., 4., 6., 8.}); AddInputFromArray<int64_t>(TensorShape({}), {7}); AddInputFromArray<float>(TensorShape({}), {-1.}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>( *GetOutput(kValueRowidsOutput), test::AsTensor<int64_t>({0, 1, 2, 2, 3, 4, 5, 6})); test::ExpectTensorEqual<float>( *GetOutput(kValuesOutput), test::AsTensor<float>({-1., 2., 4., 6., -1., -1., 8., -1.})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_fill_empty_rows_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_fill_empty_rows_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
625b1169-0fd7-4eea-9250-58d3f1528a04
cpp
tensorflow/tensorflow
string_ngrams_op
tensorflow/core/kernels/string_ngrams_op.cc
tensorflow/core/kernels/string_ngrams_op_test.cc
#include <algorithm> #include <locale> #include <string> #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace text { namespace { template <typename SPLITS_TYPE> class StringNGramsOp : public tensorflow::OpKernel { public: explicit StringNGramsOp(tensorflow::OpKernelConstruction* context) : tensorflow::OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("separator", &separator_)); OP_REQUIRES_OK(context, context->GetAttr("ngram_widths", &ngram_widths_)); OP_REQUIRES_OK(context, context->GetAttr("left_pad", &left_pad_)); OP_REQUIRES_OK(context, context->GetAttr("right_pad", &right_pad_)); OP_REQUIRES_OK(context, context->GetAttr("pad_width", &pad_width_)); OP_REQUIRES_OK(context, context->GetAttr("preserve_short_sequences", &preserve_short_)); } int get_pad_width(const int ngram_width) const { return std::min(pad_width_ < 0 ? ngram_width - 1 : pad_width_, ngram_width - 1); } absl::StatusOr<int> get_num_ngrams(const int length, const int ngram_width) const { int64 limit = kint32max; int pad_width = get_pad_width(ngram_width); if (pad_width > limit / 2 - length) { return errors::InvalidArgument( "Pad width could lead to integer overflow, got pad_width = ", pad_width); } return std::max(0, ((length + 2 * pad_width) - ngram_width) + 1); } void Compute(tensorflow::OpKernelContext* context) override { for (int ngram_width : ngram_widths_) { OP_REQUIRES( context, ngram_width > 0, errors::InvalidArgument("ngram_widths must contain positive values")); } const tensorflow::Tensor* data; OP_REQUIRES_OK(context, context->input("data", &data)); const auto& input_data = data->flat<tstring>().data(); const tensorflow::Tensor* splits; OP_REQUIRES_OK(context, context->input("data_splits", &splits)); const auto& splits_vec = splits->flat<SPLITS_TYPE>(); const int input_data_size = data->flat<tstring>().size(); const int splits_vec_size = splits_vec.size(); if (splits_vec_size > 0) { int prev_split = splits_vec(0); OP_REQUIRES(context, prev_split == 0, errors::InvalidArgument("First split value must be 0, got ", prev_split)); for (int i = 1; i < splits_vec_size; ++i) { bool valid_splits = splits_vec(i) >= prev_split; valid_splits = valid_splits && (splits_vec(i) <= input_data_size); OP_REQUIRES(context, valid_splits, errors::InvalidArgument( "Invalid split value ", splits_vec(i), ", must be in [", prev_split, ", ", input_data_size, "]")); prev_split = splits_vec(i); } OP_REQUIRES(context, prev_split == input_data_size, errors::InvalidArgument( "Last split value must be data size. Expected ", input_data_size, ", got ", prev_split)); } int num_batch_items = splits_vec.size() - 1; tensorflow::Tensor* ngrams_splits; OP_REQUIRES_OK( context, context->allocate_output(1, splits->shape(), &ngrams_splits)); auto ngrams_splits_data = ngrams_splits->flat<SPLITS_TYPE>().data(); if (data->flat<tstring>().size() == 0 || splits_vec.size() == 0) { tensorflow::Tensor* empty; OP_REQUIRES_OK(context, context->allocate_output(0, data->shape(), &empty)); for (int i = 0; i <= num_batch_items; ++i) { ngrams_splits_data[i] = 0; } return; } ngrams_splits_data[0] = 0; for (int i = 1; i <= num_batch_items; ++i) { int length = splits_vec(i) - splits_vec(i - 1); int num_ngrams = 0; for (int ngram_width : ngram_widths_) { auto ngrams_or = get_num_ngrams(length, ngram_width); OP_REQUIRES_OK(context, ngrams_or.status()); num_ngrams += ngrams_or.value(); } if (preserve_short_ && length > 0 && num_ngrams == 0) { num_ngrams = 1; } ngrams_splits_data[i] = ngrams_splits_data[i - 1] + num_ngrams; } tensorflow::Tensor* ngrams; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({ngrams_splits_data[num_batch_items]}), &ngrams)); auto ngrams_data = ngrams->flat<tstring>().data(); for (int i = 0; i < num_batch_items; ++i) { auto data_start = &input_data[splits_vec(i)]; int output_start_idx = ngrams_splits_data[i]; for (int ngram_width : ngram_widths_) { auto output_start = &ngrams_data[output_start_idx]; int length = splits_vec(i + 1) - splits_vec(i); auto ngrams_or = get_num_ngrams(length, ngram_width); OP_REQUIRES_OK(context, ngrams_or.status()); int num_ngrams = ngrams_or.value(); CreateNgrams(data_start, output_start, num_ngrams, ngram_width); output_start_idx += num_ngrams; } if (preserve_short_ && output_start_idx == ngrams_splits_data[i]) { int data_length = splits_vec(i + 1) - splits_vec(i); if (data_length == 0) { continue; } OP_REQUIRES( context, pad_width_ >= 0, errors::InvalidArgument("Pad width should be >= 0 when " "preserve_short_sequences is True and " "ngram_widths are not provided, got ", pad_width_)); int ngram_width = data_length + 2 * pad_width_; auto output_start = &ngrams_data[output_start_idx]; int num_ngrams = 1; CreateNgrams(data_start, output_start, num_ngrams, ngram_width); } } } void CreateNgrams(const tstring* data, tstring* output, int num_ngrams, int ngram_width) const { for (int ngram_index = 0; ngram_index < num_ngrams; ++ngram_index) { int pad_width = get_pad_width(ngram_width); int left_padding = std::max(0, pad_width - ngram_index); int right_padding = std::max(0, pad_width - (num_ngrams - (ngram_index + 1))); int num_tokens = ngram_width - (left_padding + right_padding); int data_start_index = left_padding > 0 ? 0 : ngram_index - pad_width; int ngram_size = 0; ngram_size += left_padding * left_pad_.length(); for (int n = 0; n < num_tokens; ++n) { ngram_size += data[data_start_index + n].length(); } ngram_size += right_padding * right_pad_.length(); int num_separators = left_padding + right_padding + num_tokens - 1; ngram_size += num_separators * separator_.length(); tstring* ngram = &output[ngram_index]; ngram->reserve(ngram_size); for (int n = 0; n < left_padding; ++n) { ngram->append(left_pad_); ngram->append(separator_); } for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } if (num_tokens > 0) { ngram->append(data[data_start_index + num_tokens - 1]); for (int n = 0; n < right_padding; ++n) { ngram->append(separator_); ngram->append(right_pad_); } } else { for (int n = 0; n < right_padding - 1; ++n) { ngram->append(right_pad_); ngram->append(separator_); } ngram->append(right_pad_); } DCHECK_EQ(ngram_size, ngram->size()); } } string separator_; string left_pad_; string right_pad_; bool use_pad_; bool extend_pad_; bool preserve_short_; std::vector<int> ngram_widths_; int pad_width_; }; } REGISTER_KERNEL_BUILDER(Name("StringNGrams") .Device(tensorflow::DEVICE_CPU) .TypeConstraint<int32>("Tsplits"), StringNGramsOp<int32>); REGISTER_KERNEL_BUILDER(Name("StringNGrams") .Device(tensorflow::DEVICE_CPU) .TypeConstraint<int64_t>("Tsplits"), StringNGramsOp<int64_t>); } }
#include <vector> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { namespace text { using tensorflow::FakeInput; using tensorflow::NodeDefBuilder; using tensorflow::Status; using tensorflow::TensorShape; class NgramKernelTest : public tensorflow::OpsTestBase { public: void MakeOp(string separator, std::vector<int> ngram_width, string left_pad, string right_pad, int pad_width, bool preserve) { TF_ASSERT_OK(NodeDefBuilder("tested_op", "StringNGrams") .Attr("separator", separator) .Attr("ngram_widths", ngram_width) .Attr("left_pad", left_pad) .Attr("right_pad", right_pad) .Attr("pad_width", pad_width) .Attr("preserve_short_sequences", preserve) .Input(FakeInput()) .Input(FakeInput()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } void assert_string_equal(const std::vector<tstring> &expected, const Tensor &value) { Tensor expected_tensor( allocator(), DT_STRING, TensorShape({static_cast<int64_t>(expected.size())})); test::FillValues<tstring>(&expected_tensor, expected); test::ExpectTensorEqual<tstring>(expected_tensor, value); } void assert_int64_equal(const std::vector<int64_t> &expected, const Tensor &value) { Tensor expected_tensor( allocator(), DT_INT64, TensorShape({static_cast<int64_t>(expected.size())})); test::FillValues<int64_t>(&expected_tensor, expected); test::ExpectTensorEqual<int64_t>(expected_tensor, value); } }; TEST_F(NgramKernelTest, TestPaddedTrigrams) { MakeOp("|", {3}, "LP", "RP", -1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|LP|a", "LP|a|b", "a|b|c", "b|c|d", "c|d|RP", "d|RP|RP", "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); std::vector<int64_t> expected_splits({0, 6, 10}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedBigramsAndTrigrams) { MakeOp("|", {2, 3}, "LP", "RP", -1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|LP|a", "LP|a|b", "a|b|c", "b|c|d", "c|d|RP", "d|RP|RP", "LP|e", "e|f", "f|RP", "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); std::vector<int64_t> expected_splits({0, 11, 18}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedBigrams) { MakeOp("|", {2}, "LP", "RP", -1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|e", "e|f", "f|RP"}); std::vector<int64_t> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddingIsAtMostNGramSizeMinus1) { MakeOp("|", {2}, "LP", "RP", 4, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|e", "e|f", "f|RP"}); std::vector<int64_t> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedUnigramAndBigrams) { MakeOp("|", {1, 2}, "LP", "RP", -1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"a", "b", "c", "d", "LP|a", "a|b", "b|c", "c|d", "d|RP", "e", "f", "LP|e", "e|f", "f|RP"}); std::vector<int64_t> expected_splits({0, 9, 14}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingPaddedNGrams) { MakeOp("|", {3}, "LP", "RP", -1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|LP|a", "LP|a|RP", "a|RP|RP", "LP|LP|b", "LP|b|c", "b|c|d", "c|d|RP", "d|RP|RP", "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); std::vector<int64_t> expected_splits({0, 3, 8, 12}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingPaddedMultiCharNGrams) { MakeOp("|", {3}, "LP", "RP", -1, false); AddInputFromArray<tstring>(TensorShape({6}), {"aa", "bb", "cc", "dd", "ee", "ff"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|LP|aa", "LP|aa|RP", "aa|RP|RP", "LP|LP|bb", "LP|bb|cc", "bb|cc|dd", "cc|dd|RP", "dd|RP|RP", "LP|LP|ee", "LP|ee|ff", "ee|ff|RP", "ff|RP|RP"}); std::vector<int64_t> expected_splits({0, 3, 8, 12}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestMultiOverlappingPaddedNGrams) { MakeOp("|", {5}, "LP", "RP", -1, false); AddInputFromArray<tstring>(TensorShape({1}), {"a"}); AddInputFromArray<int64_t>(TensorShape({2}), {0, 1}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|LP|LP|LP|a", "LP|LP|LP|a|RP", "LP|LP|a|RP|RP", "LP|a|RP|RP|RP", "a|RP|RP|RP|RP"}); std::vector<int64_t> expected_splits({0, 5}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigrams) { MakeOp("|", {3}, "", "", 0, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d"}); std::vector<int64_t> expected_splits({0, 2, 2}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithEmptySequence) { MakeOp("|", {3}, "", "", 0, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 4, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d"}); std::vector<int64_t> expected_splits({0, 2, 2, 2}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithPreserveShort) { MakeOp("|", {3}, "", "", 0, true); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d", "e|f"}); std::vector<int64_t> expected_splits({0, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithPreserveShortAndEmptySequence) { MakeOp("|", {3}, "", "", 0, true); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 4, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d", "e|f"}); std::vector<int64_t> expected_splits({0, 2, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsAndQuadgramsWithPreserveShort) { MakeOp("|", {4, 3}, "", "", 0, true); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64_t> expected_splits({0, 3, 4}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigramsAndTrigrams) { MakeOp("|", {2, 3}, "", "", 0, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"a|b", "b|c", "c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64_t> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigramsAndTrigramsWithPreserveShort) { MakeOp("|", {2, 3}, "", "", 0, true); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"a|b", "b|c", "c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64_t> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsAndBigramsWithPreserveShort) { MakeOp("|", {3, 2}, "", "", 0, true); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"a|b|c", "b|c|d", "a|b", "b|c", "c|d", "e|f"}); std::vector<int64_t> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigrams) { MakeOp("|", {2}, "", "", 0, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b", "b|c", "c|d", "e|f"}); std::vector<int64_t> expected_splits({0, 3, 4}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingUnpaddedNGrams) { MakeOp("|", {3}, "", "", 0, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"b|c|d"}); std::vector<int64_t> expected_splits({0, 0, 1, 1}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingUnpaddedNGramsNoOutput) { MakeOp("|", {5}, "", "", 0, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64_t> expected_splits({0, 0, 0, 0}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedTrigrams) { MakeOp("|", {3}, "LP", "RP", 1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|a|b", "a|b|c", "b|c|d", "c|d|RP", "LP|e|f", "e|f|RP"}); std::vector<int64_t> expected_splits({0, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedBigrams) { MakeOp("|", {2}, "LP", "RP", 1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|e", "e|f", "f|RP"}); std::vector<int64_t> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedBigramsAnd5grams) { MakeOp("|", {2, 5}, "LP", "RP", 1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|a|b|c|d", "a|b|c|d|RP", "LP|e", "e|f", "f|RP"}); std::vector<int64_t> expected_splits({0, 7, 10}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPadded5gramsWithPreserveShort) { MakeOp("|", {5}, "LP", "RP", 1, true); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a|b|c|d", "a|b|c|d|RP", "LP|e|f|RP"}); std::vector<int64_t> expected_splits({0, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingSinglyPaddedNGrams) { MakeOp("|", {3}, "LP", "RP", 1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a|RP", "LP|b|c", "b|c|d", "c|d|RP", "LP|e|f", "e|f|RP"}); std::vector<int64_t> expected_splits({0, 1, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingSinglyPaddedNGramsNoOutput) { MakeOp("|", {5}, "LP", "RP", 1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|b|c|d|RP"}); std::vector<int64_t> expected_splits({0, 0, 1, 1}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedUnigrams) { MakeOp("|", {1}, "LP", "RP", 1, false); AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a", "b", "c", "d", "e", "f"}); std::vector<int64_t> expected_splits({0, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestEmptyInput) { MakeOp("|", {1}, "LP", "RP", 3, false); AddInputFromArray<tstring>(TensorShape({0}), {}); AddInputFromArray<int64_t>(TensorShape({0}), {}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64_t> expected_splits({}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestNoTokens) { MakeOp("|", {3}, "L", "R", -1, false); AddInputFromArray<tstring>(TensorShape({1}), {"a"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 0, 1}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"L|L|R", "L|R|R", "L|L|a", "L|a|R", "a|R|R"}); std::vector<int64_t> expected_splits({0, 2, 5}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestNoTokensNoPad) { MakeOp("|", {3}, "", "", 0, false); AddInputFromArray<tstring>(TensorShape({1}), {"a"}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 0, 1}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64_t> expected_splits({0, 0, 0}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, ShapeFn) { ShapeInferenceTestOp op("StringNGrams"); INFER_OK(op, "?;?", "[?];[?]"); INFER_OK(op, "[1];?", "[?];[?]"); INFER_OK(op, "[1];[2]", "[?];in1"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/string_ngrams_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/string_ngrams_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b762a54d-5c97-4b30-8aa7-942ba07c9b21
cpp
tensorflow/tensorflow
ops_testutil
tensorflow/core/kernels/ops_testutil.cc
tensorflow/core/kernels/ops_testutil_test.cc
#include "tensorflow/core/framework/node_properties.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define EIGEN_USE_GPU #include "tensorflow/core/common_runtime/gpu/gpu_managed_allocator.h" #endif #include <functional> #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/process_function_library_runtime.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/control_flow.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/type_index.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/tensor_slice_reader_cache.h" namespace tensorflow { namespace test { void SetOutputAttrs(OpKernelContext::Params* params, std::vector<AllocatorAttributes>* attrs) { attrs->clear(); for (int index = 0; index < params->op_kernel->num_outputs(); index++) { AllocatorAttributes attr; const bool on_host = (params->op_kernel->output_memory_types()[index] == HOST_MEMORY); attr.set_on_host(on_host); attrs->push_back(attr); } params->output_attr_array = attrs->data(); } } OpsTestBase::OpsTestBase() : device_type_(DEVICE_CPU) { auto device = DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"); CHECK(device) << "Could not create CPU device"; thread_pool_ = std::make_unique<thread::ThreadPool>( Env::Default(), "default", 1); device_ = device.get(); device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device)); allocator_ = device_->GetAllocator(AllocatorAttributes()); flib_def_ = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(), FunctionDefLibrary()); pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>( device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions()); } OpsTestBase::~OpsTestBase() { for (auto& temp : tensors_) { delete temp; } for (auto& temp : managed_outputs_) { delete temp; } tensors_.clear(); managed_outputs_.clear(); context_.reset(nullptr); params_.reset(nullptr); } void OpsTestBase::SetDevice(const DeviceType& device_type, std::unique_ptr<Device> device) { CHECK(device_) << "No device provided"; device_ = device.get(); device_type_ = device_type; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (device_type == DEVICE_GPU) { managed_allocator_.reset(new GpuManagedAllocator()); allocator_ = managed_allocator_.get(); } else { managed_allocator_.reset(); allocator_ = device_->GetAllocator(AllocatorAttributes()); } #else CHECK_NE(device_type, DEVICE_GPU) << "Requesting GPU on binary compiled without GOOGLE_CUDA or " "TENSORFLOW_USE_ROCM."; allocator_ = device_->GetAllocator(AllocatorAttributes()); #endif device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device)); pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>( device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(), thread_pool_.get()); } void OpsTestBase::set_node_def(const NodeDef& node_def) { node_def_.CopyFrom(node_def); } NodeDef* OpsTestBase::node_def() { return &node_def_; } Status OpsTestBase::InitOp() { return InitOpWithGraphVersion(TF_GRAPH_DEF_VERSION); } Status OpsTestBase::InitOpWithGraphVersion(int graph_def_version) { std::shared_ptr<const NodeProperties> props; TF_RETURN_IF_ERROR(NodeProperties::CreateFromNodeDef( node_def_, OpRegistry::Global(), &props)); OpKernel* kernel; TF_RETURN_IF_ERROR(CreateOpKernel( device_type_, device_, allocator(), nullptr, device_->resource_manager(), props, graph_def_version, &kernel)); kernel_.reset(kernel); input_types_ = kernel_->input_types(); return absl::OkStatus(); } static std::function<void(std::function<void()>)>* GetDefaultRunner() { static auto* const default_runner = new std::function<void(std::function<void()>)>( [](const std::function<void()>& f) { f(); }); return default_runner; } void OpsTestBase::CreateContext() { context_.reset(nullptr); for (auto& temp : managed_outputs_) { delete temp; } managed_outputs_.clear(); managed_outputs_.resize(0); params_.reset(new OpKernelContext::Params); params_->device = device_; params_->frame_iter = FrameAndIter(0, 0); params_->inputs = inputs_; params_->op_kernel = kernel_.get(); step_container_.reset(new ScopedStepContainer(0, [](const string&) {})); params_->step_container = step_container_.get(); test::SetOutputAttrs(params_.get(), &out_alloc_attrs_); params_->slice_reader_cache = &slice_reader_cache_wrapper_; params_->cancellation_manager = &default_cancellation_manager_; params_->resource_manager = device_->resource_manager(); params_->function_library = pflr_->GetFLR(device_->name()); params_->runner = GetDefaultRunner(); params_->session_metadata = &session_metadata(); context_.reset(new OpKernelContext(params_.get())); } Status OpsTestBase::RunOpKernel() { CreateContext(); device_->Compute(kernel_.get(), context_.get()); return context_->status(); } const Tensor& OpsTestBase::GetInput(int input_index) const { CHECK_LT(input_index, context_->num_inputs()); CHECK(!IsRefType(context_->input_dtype(input_index))); return context_->input(input_index); } TensorValue OpsTestBase::mutable_input(int input_index) { CHECK_LT(input_index, inputs_.size()); return inputs_[input_index]; } Tensor* OpsTestBase::GetOutput(int output_index) { CHECK_LT(output_index, context_->num_outputs()); Tensor* output = context_->mutable_output(output_index); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM if (device_type_ == DEVICE_GPU) { managed_outputs_.resize(context_->num_outputs()); if (!managed_outputs_[output_index]) { Tensor* managed_output = new Tensor(allocator(), output->dtype(), output->shape()); auto src = output->tensor_data(); auto dst = managed_output->tensor_data(); context_->eigen_gpu_device().memcpyDeviceToHost( const_cast<char*>(dst.data()), src.data(), src.size()); context_->eigen_gpu_device().synchronize(); managed_outputs_[output_index] = managed_output; } output = managed_outputs_[output_index]; } #endif return output; } Allocator* OpsTestBase::allocator() { return allocator_; } OpKernel* OpsTestBase::op_kernel() { return kernel_.get(); } const DataTypeVector& OpsTestBase::output_types() const { return kernel_->output_types(); } Tensor* OpsTestBase::AddInput(DataType dtype, const TensorShape& shape) { CHECK_GT(input_types_.size(), inputs_.size()) << "Adding more inputs than types; perhaps you need to call MakeOp"; bool is_ref = IsRefType(input_types_[inputs_.size()]); Tensor* input = new Tensor(allocator(), dtype, shape); tensors_.push_back(input); if (is_ref) { CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]), dtype); inputs_.push_back({&lock_for_refs_, input}); } else { CHECK_EQ(input_types_[inputs_.size()], dtype); inputs_.push_back({nullptr, input}); } return input; } void OpsTestBase::AddResourceInputInternal(const std::string& container_name, const std::string& name, const TypeIndex& type_index) { ResourceHandle handle; handle.set_device(device_->name()); handle.set_container(container_name); handle.set_name(name); handle.set_hash_code(type_index.hash_code()); handle.set_maybe_type_name(type_index.name()); Tensor* input = new Tensor(allocator(), DT_RESOURCE, TensorShape({})); input->scalar<ResourceHandle>()() = handle; tensors_.push_back(input); inputs_.push_back({nullptr, input}); } }
#include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/variable_ops.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { TEST_F(OpsTestBase, ScopedStepContainer) { TF_EXPECT_OK(NodeDefBuilder("identity", "Identity") .Input(FakeInput(DT_STRING)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<tstring>(TensorShape({}), {""}); TF_EXPECT_OK(RunOpKernel()); EXPECT_TRUE(step_container_ != nullptr); } TEST_F(OpsTestBase, ResourceVariableInput) { TF_EXPECT_OK(NodeDefBuilder("identity", "Identity") .Input(FakeInput(DT_RESOURCE)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); Var* var = new Var(DT_STRING); AddResourceInput("" , "Test" , var); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); EXPECT_EQ(output->dtype(), DT_RESOURCE); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ops_testutil.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ops_testutil_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bd75d663-745a-42b4-abd8-5e8e7c6c4e3f
cpp
tensorflow/tensorflow
summary_tensor_op
tensorflow/core/kernels/summary_tensor_op.cc
tensorflow/core/kernels/summary_tensor_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" namespace tensorflow { template <typename T> class SummaryTensorOpV2 : public OpKernel { public: explicit SummaryTensorOpV2(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* c) override { const Tensor& tag = c->input(0); OP_REQUIRES(c, TensorShapeUtils::IsScalar(tag.shape()), errors::InvalidArgument("tag must be scalar")); const Tensor& tensor = c->input(1); const Tensor& serialized_summary_metadata_tensor = c->input(2); OP_REQUIRES( c, TensorShapeUtils::IsScalar(serialized_summary_metadata_tensor.shape()), errors::InvalidArgument("serialized_summary_metadata must be scalar")); Summary s; Summary::Value* v = s.add_value(); v->set_tag(string(tag.scalar<tstring>()())); if (tensor.dtype() == DT_STRING) { tensor.AsProtoField(v->mutable_tensor()); } else { tensor.AsProtoTensorContent(v->mutable_tensor()); } ParseFromTString(serialized_summary_metadata_tensor.scalar<tstring>()(), v->mutable_metadata()); Tensor* summary_tensor = nullptr; OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor)); CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()())); } }; #define REGISTER(T) \ REGISTER_KERNEL_BUILDER( \ Name("TensorSummaryV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ SummaryTensorOpV2<T>); TF_CALL_ALL_TYPES(REGISTER) #undef REGISTER template <typename T> class SummaryTensorOp : public OpKernel { public: explicit SummaryTensorOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* c) override { const Tensor& tensor = c->input(0); Summary s; Summary::Value* v = s.add_value(); v->set_node_name(c->op_kernel().name()); if (tensor.dtype() == DT_STRING) { tensor.AsProtoField(v->mutable_tensor()); } else { tensor.AsProtoTensorContent(v->mutable_tensor()); } Tensor* summary_tensor = nullptr; OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor)); CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()())); } }; #define REGISTER(T) \ REGISTER_KERNEL_BUILDER( \ Name("TensorSummary").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ SummaryTensorOp<T>); TF_CALL_ALL_TYPES(REGISTER) #undef REGISTER }
#include <functional> #include <memory> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/histogram/histogram.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { static void EXPECT_SummaryMatches(const Summary& actual, const string& expected_str) { Summary expected; CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected)); EXPECT_EQ(expected.DebugString(), actual.DebugString()); } class SummaryTensorOpV2Test : public OpsTestBase { protected: void MakeOp() { TF_ASSERT_OK(NodeDefBuilder("myop", "TensorSummaryV2") .Input(FakeInput(DT_STRING)) .Input(FakeInput(DT_STRING)) .Input(FakeInput(DT_STRING)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SummaryTensorOpV2Test, BasicPluginData) { MakeOp(); AddInputFromArray<tstring>(TensorShape({}), {"tag_foo"}); AddInputFromArray<tstring>(TensorShape({}), {"some string tensor content"}); SummaryMetadata summary_metadata; SummaryMetadata::PluginData* plugin_data = summary_metadata.mutable_plugin_data(); plugin_data->set_plugin_name("foo"); plugin_data->set_content("content_for_plugin_foo"); AddInputFromArray<tstring>(TensorShape({}), {summary_metadata.SerializeAsString()}); TF_ASSERT_OK(RunOpKernel()); Tensor* out_tensor = GetOutput(0); ASSERT_EQ(0, out_tensor->dims()); Summary summary; ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()()); ASSERT_EQ(1, summary.value_size()); Tensor string_content_tensor; CHECK(string_content_tensor.FromProto(summary.value(0).tensor())); ASSERT_EQ("some string tensor content", string_content_tensor.scalar<tstring>()()); ASSERT_EQ("tag_foo", summary.value(0).tag()); ASSERT_EQ("foo", summary.value(0).metadata().plugin_data().plugin_name()); ASSERT_EQ("content_for_plugin_foo", summary.value(0).metadata().plugin_data().content()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_tensor_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_tensor_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
509e353a-35db-4466-9316-62db78131fc1
cpp
tensorflow/tensorflow
multinomial_op
tensorflow/core/kernels/multinomial_op.cc
tensorflow/core/kernels/multinomial_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/multinomial_op.h" #include <algorithm> #include <cmath> #include <memory> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/stateless_random_ops.h" #include "tensorflow/core/lib/random/random_distributions.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/util/guarded_philox_random.h" #include "tensorflow/core/util/work_sharder.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace functor { template <typename Device, typename T, typename OutputType> struct MultinomialFunctor { void operator()(OpKernelContext* ctx, const Device& d, typename TTypes<T>::ConstMatrix logits, typename TTypes<float>::Flat noises, typename TTypes<float>::Flat scores, typename TTypes<float>::Flat scratch, int batch_size, int num_classes, int num_samples, const random::PhiloxRandom& gen, typename TTypes<OutputType>::Matrix output); }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM extern template struct MultinomialFunctor<GPUDevice, Eigen::half, int32>; extern template struct MultinomialFunctor<GPUDevice, float, int32>; extern template struct MultinomialFunctor<GPUDevice, double, int32>; extern template struct MultinomialFunctor<GPUDevice, int32, int32>; extern template struct MultinomialFunctor<GPUDevice, int64_t, int32>; extern template struct MultinomialFunctor<GPUDevice, Eigen::half, int64_t>; extern template struct MultinomialFunctor<GPUDevice, float, int64_t>; extern template struct MultinomialFunctor<GPUDevice, double, int64_t>; extern template struct MultinomialFunctor<GPUDevice, int32, int64_t>; extern template struct MultinomialFunctor<GPUDevice, int64_t, int64_t>; #endif template <typename T, typename OutputType> struct MultinomialFunctor<CPUDevice, T, OutputType> { void operator()(OpKernelContext* ctx, const CPUDevice& d, typename TTypes<T>::ConstMatrix logits, typename TTypes<float>::Flat , typename TTypes<float>::Flat , typename TTypes<float>::Flat , int batch_size, int num_classes, int num_samples, const random::PhiloxRandom& gen, typename TTypes<OutputType>::Matrix output) { auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); auto DoWork = [ctx, num_samples, num_classes, &gen, &output, &logits]( int64_t start_row, int64_t limit_row) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(start_row * (num_samples + 3) / 4); random::SimplePhilox simple_philox(&gen_copy); Tensor cdf_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_DOUBLE, TensorShape({num_classes}), &cdf_tensor)); auto cdf = cdf_tensor.flat<double>(); for (int64_t b = start_row; b < limit_row; ++b) { const auto* logits_row = &logits(b, 0); T max = std::numeric_limits<T>::lowest(); for (int64_t j = 0; j < num_classes; ++j) { if (Eigen::numext::isfinite(logits_row[j])) { max = std::max(max, logits_row[j]); } } const double max_logit = static_cast<double>(max); cdf = (logits.template chip<0>(b).template cast<double>() - max_logit) .exp(); double running_total = 0; for (int64_t j = 0; j < num_classes; ++j) { if (Eigen::numext::isfinite(logits_row[j])) { running_total += cdf(j); } cdf(j) = running_total; } const double* cdf_begin = cdf.data(); const double* cdf_end = cdf.data() + num_classes; for (int64_t j = 0; j < num_samples; ++j) { const double to_find = simple_philox.RandDouble() * running_total; auto found_iter = std::upper_bound(cdf_begin, cdf_end, to_find); output(b, j) = std::distance(cdf_begin, found_iter); } } }; const int64_t cost = 50 * (num_samples * std::log(num_classes) / std::log(2) + num_classes); Shard(worker_threads.num_threads, worker_threads.workers, batch_size, cost, DoWork); } }; } namespace { template <typename Device, typename T, typename OutputType> class MultinomialOp : public OpKernel { public: explicit MultinomialOp(OpKernelConstruction* context) : OpKernel(context) {} void DoCompute(OpKernelContext* ctx, const Tensor& logits_t, const Tensor& num_samples_t, GuardedPhiloxRandom* generator) { OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(logits_t.shape()), errors::InvalidArgument("logits should be a matrix, got shape ", logits_t.shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(num_samples_t.shape()), errors::InvalidArgument("num_samples should be a scalar, got shape ", num_samples_t.shape().DebugString())); const int num_samples = num_samples_t.scalar<int>()(); OP_REQUIRES(ctx, num_samples >= 0, errors::InvalidArgument( "num_samples should be nonnegative, got ", num_samples)); for (int i = 0; i < 2; i++) { const int64_t dim = logits_t.dim_size(i); OP_REQUIRES(ctx, static_cast<int>(dim) == dim, errors::InvalidArgument( "logits.shape = ", logits_t.shape().DebugString(), " too large for int")); } const int batch_size = static_cast<int>(logits_t.dim_size(0)); const int num_classes = static_cast<int>(logits_t.dim_size(1)); OP_REQUIRES(ctx, num_classes > 0, errors::InvalidArgument("num_classes should be positive, got ", num_classes)); Tensor* samples_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({batch_size, num_samples}), &samples_t)); if (samples_t->NumElements() > 0) { Tensor noises, scores, scratch; if (std::is_same<Device, GPUDevice>::value) { OP_REQUIRES_OK( ctx, ctx->allocate_temp( DT_FLOAT, TensorShape({batch_size, num_samples, num_classes}), &noises)); OP_REQUIRES_OK( ctx, ctx->allocate_temp( DT_FLOAT, TensorShape({batch_size, num_samples, num_classes}), &scores)); OP_REQUIRES_OK( ctx, ctx->allocate_temp(DT_FLOAT, TensorShape({batch_size, num_samples}), &scratch)); } int num_samples_ceil_4 = (num_samples + 3) / 4 * 4; if (std::is_same<Device, CPUDevice>::value) num_samples_ceil_4 *= 2; auto rng = generator->ReserveRandomOutputs(batch_size * num_samples_ceil_4, 256); functor::MultinomialFunctor<Device, T, OutputType>()( ctx, ctx->eigen_device<Device>(), logits_t.matrix<T>(), noises.flat<float>(), scores.flat<float>(), scratch.flat<float>(), batch_size, num_classes, num_samples, rng, samples_t->matrix<OutputType>()); } } }; template <typename Device, typename T, typename OutputType> class StatefulMultinomialOp : public MultinomialOp<Device, T, OutputType> { public: explicit StatefulMultinomialOp(OpKernelConstruction* ctx) : MultinomialOp<Device, T, OutputType>(ctx) { OP_REQUIRES_OK(ctx, generator_.Init(ctx)); } void Compute(OpKernelContext* ctx) override { const Tensor& logits_t = ctx->input(0); const Tensor& num_samples_t = ctx->input(1); this->DoCompute(ctx, logits_t, num_samples_t, &generator_); } private: GuardedPhiloxRandom generator_; }; #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER(Name("Multinomial") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT32), \ StatefulMultinomialOp<CPUDevice, TYPE, int32>); \ REGISTER_KERNEL_BUILDER(Name("Multinomial") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT64), \ StatefulMultinomialOp<CPUDevice, TYPE, int64>); TF_CALL_half(REGISTER); TF_CALL_bfloat16(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); #undef REGISTER #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER(Name("Multinomial") \ .Device(DEVICE_GPU) \ .HostMemory("num_samples") \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT32), \ StatefulMultinomialOp<GPUDevice, TYPE, int32>) \ REGISTER_KERNEL_BUILDER(Name("Multinomial") \ .Device(DEVICE_GPU) \ .HostMemory("num_samples") \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT64), \ StatefulMultinomialOp<GPUDevice, TYPE, int64>) TF_CALL_half(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); #undef REGISTER #endif template <typename Device, typename T, typename OutputType> class StatelessMultinomialOp : public MultinomialOp<Device, T, OutputType> { public: explicit StatelessMultinomialOp(OpKernelConstruction* ctx) : MultinomialOp<Device, T, OutputType>(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& logits_t = ctx->input(0); const Tensor& num_samples_t = ctx->input(1); const Tensor& seed_t = ctx->input(2); OP_REQUIRES(ctx, seed_t.dims() == 1 && seed_t.dim_size(0) == 2, errors::InvalidArgument("seed must have shape [2], not ", seed_t.shape().DebugString())); random::PhiloxRandom::Key key; random::PhiloxRandom::ResultType counter; OP_REQUIRES_OK(ctx, GenerateKey(seed_t, &key, &counter)); GuardedPhiloxRandom generator; generator.Init(counter, key); this->DoCompute(ctx, logits_t, num_samples_t, &generator); } private: GuardedPhiloxRandom generator_; }; #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT32), \ StatelessMultinomialOp<CPUDevice, TYPE, int32>); \ REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT64), \ StatelessMultinomialOp<CPUDevice, TYPE, int64>); TF_CALL_half(REGISTER); TF_CALL_bfloat16(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); #undef REGISTER #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \ .Device(DEVICE_GPU) \ .HostMemory("num_samples") \ .HostMemory("seed") \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT32), \ StatelessMultinomialOp<GPUDevice, TYPE, int32>) \ REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \ .Device(DEVICE_GPU) \ .HostMemory("num_samples") \ .HostMemory("seed") \ .TypeConstraint<TYPE>("T") \ .TypeConstraint("output_dtype", DT_INT64), \ StatelessMultinomialOp<GPUDevice, TYPE, int64>) TF_CALL_half(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); #undef REGISTER #endif } }
#include <functional> #include <memory> #include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { static Graph* Multinomial(int batch_size, int num_classes, int num_samples) { Graph* g = new Graph(OpRegistry::Global()); Tensor logits_t(DT_FLOAT, TensorShape({batch_size, num_classes})); Tensor num_samples_t(DT_INT32, TensorShape()); logits_t.flat<float>().setRandom(); num_samples_t.scalar<int32>().setConstant(num_samples); Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("multinomial"), "Multinomial") .Input(test::graph::Constant(g, logits_t)) .Input(test::graph::Constant(g, num_samples_t)) .Attr("T", DT_FLOAT) .Finalize(g, &ret)); return g; } #define BM_MultinomialDev(DEVICE, B, C, S) \ static void BM_Multinomial_##DEVICE##_##B##_##C##_##S( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, Multinomial(B, C, S), \ false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(B) * C * S * \ state.iterations()); \ } \ BENCHMARK(BM_Multinomial_##DEVICE##_##B##_##C##_##S); #define BM_MultinomialBCS(B, C, S) \ BM_MultinomialDev(cpu, B, C, S); \ BM_MultinomialDev(gpu, B, C, S); BM_MultinomialBCS(1, 10000, 4); BM_MultinomialBCS(1, 10000, 128); BM_MultinomialBCS(1, 10000, 10000); BM_MultinomialBCS(1, 100000, 4); BM_MultinomialBCS(32, 10000, 4); BM_MultinomialBCS(32, 10000, 128); BM_MultinomialBCS(32, 100000, 4); BM_MultinomialBCS(128, 100000, 1); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/multinomial_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/multinomial_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
509e8590-a1ba-4f16-9c3f-6140d6c8b663
cpp
tensorflow/tensorflow
strided_slice_op
tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
tensorflow/core/util/strided_slice_op_test.cc
#include "tensorflow/core/util/strided_slice_op.h" #include <algorithm> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/types/span.h" #include "tensorflow/compiler/tf2xla/literal_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/dynamic_shaped_ops.h" #include "xla/hlo/builder/value_inference.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal.h" #include "xla/shape.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { using errors::InvalidArgument; class StridedSliceOp : public XlaOpKernel { public: explicit StridedSliceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("begin_mask", &begin_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("end_mask", &end_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("ellipsis_mask", &ellipsis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("new_axis_mask", &new_axis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("shrink_axis_mask", &shrink_axis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("Index", &index_type_)); } void EmitDynamicSlice(XlaOpKernelContext* ctx, const absl::InlinedVector<int64_t, 4>& strides, PartialTensorShape partial_processing_shape, PartialTensorShape partial_final_shape, const StridedSliceShapeSpec& shape_spec, const std::vector<bool>& begins_are_dynamic, const std::vector<bool>& ends_are_dynamic) { const TensorShape input_shape = ctx->InputShape(0); xla::XlaOp slice = ctx->Input(0); for (int64_t i = 0; i < ctx->InputShape("begin").dims(); ++i) { OP_REQUIRES(ctx, strides[i] == 1, errors::InvalidArgument( "Strides have to be one when inputs are not constant.")); } for (int64_t i = 0; i < partial_final_shape.dims(); ++i) { if (partial_final_shape.dim_size(i) == -1) { partial_final_shape.set_dim( i, input_shape.dim_size(shape_spec.output_to_processing_mapping[i])); } } TensorShape final_shape; OP_REQUIRES( ctx, partial_final_shape.AsTensorShape(&final_shape), InvalidArgument("XLA can't deduce compile time constant output " "shape for strided slice: ", partial_final_shape.DebugString(), ", output shape must be a compile-time constant")); for (int64_t i = 0; i < partial_processing_shape.dims(); ++i) { if (partial_processing_shape.dim_size(i) == -1) { partial_processing_shape.set_dim(i, input_shape.dim_size(i)); } } TensorShape processing_shape; OP_REQUIRES( ctx, partial_processing_shape.AsTensorShape(&processing_shape), InvalidArgument("XLA can't deduce compile time constant processing " "shape for strided slice: ", partial_processing_shape.DebugString(), ", output shape must be a compile-time constant")); xla::PaddingConfig padding_config; bool need_padding = false; std::vector<bool> result_dims_are_dynamic; const auto& dims = input_shape.dims(); result_dims_are_dynamic.reserve(dims); for (int64_t i = 0; i < dims; ++i) { int64_t sparse_index = shape_spec.processing_to_sparse_mapping[i]; bool shrink_axis_set = (1 << i) & shape_spec.shrink_axis_dense_mask; auto* dims = padding_config.add_dimensions(); dims->set_edge_padding_low(0); dims->set_interior_padding(0); if ((begins_are_dynamic[sparse_index] || ends_are_dynamic[sparse_index]) && !shrink_axis_set) { dims->set_edge_padding_high(input_shape.dim_size(i)); need_padding = true; result_dims_are_dynamic.push_back(true); } else { dims->set_edge_padding_high(0); result_dims_are_dynamic.push_back(false); } } if (need_padding) { slice = xla::Pad(slice, xla::Zero(ctx->builder(), ctx->input_xla_type(0)), padding_config); for (int64 i = 0; i < result_dims_are_dynamic.size(); ++i) { if (result_dims_are_dynamic[i]) { slice = xla::RemoveDynamicDimension(slice, i); } } } std::vector<xla::XlaOp> start_indices; std::vector<xla::XlaOp> slice_sizes_dynamic; xla::Shape input_xla_shape = ctx->InputXlaShape(0).value(); for (int64_t i = 0; i < input_shape.dims(); ++i) { bool begin_mask = (1 << i) & shape_spec.begin_dense_mask; bool end_mask = (1 << i) & shape_spec.end_dense_mask; auto zero = xla::Zero(ctx->builder(), ctx->InputXlaType("begin")); xla::XlaOp begin_index, end_index; int64_t sparse_index = shape_spec.processing_to_sparse_mapping[i]; bool xla_input_is_dynamic = input_xla_shape.is_dynamic_dimension(i); xla::XlaOp dim_size; if (xla_input_is_dynamic) { dim_size = xla::GetDimensionSize(ctx->Input(0), i); OP_REQUIRES(ctx, ctx->InputXlaType("begin") == xla::S32, errors::InvalidArgument("'begin shape has to be int32 when " "indices to slice op are dynamic")); } else { dim_size = xla::ConstantR0WithType(ctx->builder(), ctx->InputXlaType("begin"), input_xla_shape.dimensions(i)); } auto scalar_must_be_non_negative = [ctx](xla::XlaOp value) -> bool { auto lower_bound = ctx->value_inference().AnalyzeConstant( value, xla::ValueInferenceMode::kLowerBound); if (!lower_bound.ok() || !lower_bound->AllValid()) { return false; } return lower_bound->Get<int32>({}) >= 0; }; if (begin_mask) { begin_index = zero; } else { begin_index = xla::Slice(ctx->Input("begin"), {sparse_index}, {sparse_index + 1}, {1}); begin_index = xla::Reshape(begin_index, {}); if (!scalar_must_be_non_negative(begin_index)) { auto index_negative = xla::Lt(begin_index, zero); auto wrapped_index = xla::Add(dim_size, begin_index); begin_index = xla::Select(index_negative, wrapped_index, begin_index); } } start_indices.push_back(begin_index); if (end_mask) { end_index = dim_size; } else { end_index = xla::Slice(ctx->Input("end"), {sparse_index}, {sparse_index + 1}, {1}); end_index = xla::Reshape(end_index, {}); if (!scalar_must_be_non_negative(end_index)) { auto index_negative = xla::Lt(end_index, zero); auto wrapped_index = xla::Add(dim_size, end_index); end_index = xla::Select(index_negative, wrapped_index, end_index); } } xla::XlaOp size = xla::Max(xla::Sub(end_index, begin_index), zero); slice_sizes_dynamic.push_back(xla::ConvertElementType(size, xla::S32)); } slice = xla::DynamicSlice(slice, start_indices, processing_shape.dim_sizes()); slice = xla::Reshape(slice, final_shape.dim_sizes()); for (int64_t i = 0; i < final_shape.dims(); ++i) { int64 processing_shape_dim = shape_spec.output_to_processing_mapping[i]; if (processing_shape_dim != -1) { auto status = xla::SetDimensionSizeWithRebound( &ctx->value_inference(), slice, slice_sizes_dynamic[processing_shape_dim], i); OP_REQUIRES_OK(ctx, status.status()); slice = status.value(); } } ctx->SetOutput(0, slice); } void Compile(XlaOpKernelContext* ctx) override { const TensorShape input_shape = ctx->InputShape(0); const TensorShape begin_shape = ctx->InputShape("begin"); OP_REQUIRES( ctx, begin_shape.dims() == 1, errors::InvalidArgument("'begin' input has to be a rank 1 vector")); absl::InlinedVector<int64_t, 4> begin; absl::InlinedVector<int64_t, 4> end; absl::InlinedVector<int64_t, 4> strides; xla::Literal begin_literal, end_literal, strides_literal; bool begin_is_constant = ctx->ConstantInput(1, &begin_literal).ok(); bool end_is_constant = ctx->ConstantInput(2, &end_literal).ok(); OP_REQUIRES_OK(ctx, ctx->ConstantInput(3, &strides_literal)); Tensor begin_tensor, end_tensor, strides_tensor; if (begin_is_constant) { OP_REQUIRES_OK( ctx, LiteralToHostTensor(begin_literal, index_type_, &begin_tensor)); } if (end_is_constant) { OP_REQUIRES_OK( ctx, LiteralToHostTensor(end_literal, index_type_, &end_tensor)); } OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_, &strides_tensor)); PartialTensorShape partial_processing_shape, partial_final_shape; bool dummy = false; StridedSliceShapeSpec shape_spec; OP_REQUIRES_OK( ctx, ValidateStridedSliceOp( begin_is_constant ? &begin_tensor : nullptr, end_is_constant ? &end_tensor : nullptr, strides_tensor, input_shape, begin_mask_, end_mask_, ellipsis_mask_, new_axis_mask_, shrink_axis_mask_, &partial_processing_shape, &partial_final_shape, &dummy, &dummy, &dummy, &begin, &end, &strides, &shape_spec)); xla::XlaOp slice = ctx->Input(0); std::vector<bool> begins_are_dynamic; OP_REQUIRES_OK( ctx, ctx->ResolveInputDynamismIntoPredVector(1, &begins_are_dynamic)); std::vector<bool> ends_are_dynamic; OP_REQUIRES_OK( ctx, ctx->ResolveInputDynamismIntoPredVector(2, &ends_are_dynamic)); if (begin_is_constant && end_is_constant) { TensorShape final_shape; OP_REQUIRES( ctx, partial_final_shape.AsTensorShape(&final_shape), InvalidArgument("XLA can't deduce compile time constant output " "shape for strided slice: ", partial_final_shape.DebugString(), ", output shape must be a compile-time constant")); absl::InlinedVector<int64_t, 4> dimensions_to_reverse; absl::InlinedVector<int64_t, 4> slice_begin, slice_end, slice_strides; for (int i = 0; i < begin.size(); ++i) { if (strides[i] > 0) { slice_begin.push_back(begin[i]); slice_end.push_back(std::max(end[i], begin[i])); slice_strides.push_back(strides[i]); } else { slice_begin.push_back(input_shape.dim_size(i) - begin[i] - 1); slice_end.push_back(std::max(input_shape.dim_size(i) - end[i] - 1, input_shape.dim_size(i) - begin[i] - 1)); slice_strides.push_back(-strides[i]); dimensions_to_reverse.push_back(i); } } if (!dimensions_to_reverse.empty()) { slice = xla::Rev(slice, dimensions_to_reverse); } slice = xla::Slice(slice, slice_begin, slice_end, slice_strides); auto operand_shape_or = ctx->builder()->GetShape(ctx->Input(0)); OP_REQUIRES_OK(ctx, operand_shape_or.status()); xla::Shape xla_shape = operand_shape_or.value(); bool begins_are_static = absl::c_all_of( begins_are_dynamic, [](bool dynamic) { return !dynamic; }); OP_REQUIRES(ctx, begins_are_static, errors::InvalidArgument( "XLA can't use dynamic begin values for slice.")); bool ends_are_static = absl::c_all_of( ends_are_dynamic, [](bool dynamic) { return !dynamic; }); slice = xla::Reshape(slice, final_shape.dim_sizes()); if (xla_shape.is_static() && ends_are_static) { ctx->SetOutput(0, slice); return; } for (int64_t i = 0; i < final_shape.dims(); ++i) { int64_t input_index = shape_spec.output_to_processing_mapping[i]; if (input_index == -1) { continue; } bool input_is_dynamic = xla_shape.is_dynamic_dimension(input_index); int64_t sparse_index = shape_spec.output_to_sparse_mapping[i]; bool end_is_dynamic = sparse_index == -1 ? false : ends_are_dynamic[sparse_index]; bool backward_slice = sparse_index == -1 ? false : end_literal.Get<int32>({sparse_index}) < 0; if (input_is_dynamic || end_is_dynamic) { OP_REQUIRES( ctx, strides[input_index] == 1, errors::InvalidArgument("XLA has not implemented dynamic " "sized slice with non-trival stride yet. " "Please file a bug against XLA")); auto operand_size = xla::GetDimensionSize(ctx->Input(0), input_index); if (backward_slice) { OP_REQUIRES(ctx, !end_is_dynamic, errors::InvalidArgument( "XLA has not implemented dynamic " "sized slice with dynamic negative index %lld. ")); operand_size = xla::Add( operand_size, xla::ConstantR0<int32>(ctx->builder(), end_literal.Get<int32>({sparse_index}))); } else { xla::XlaOp end_size; if (end_is_dynamic) { end_size = xla::Reshape(xla::Slice(ctx->Input(2), {sparse_index}, {sparse_index + 1}, {1}), {}); } else { end_size = xla::ConstantR0<int32>(ctx->builder(), end[input_index]); } operand_size = xla::Min(operand_size, end_size); } slice = xla::SetDimensionSize( slice, xla::Sub(operand_size, xla::ConstantR0<int32>( ctx->builder(), begin[input_index])), i); } } ctx->SetOutput(0, slice); return; } else { EmitDynamicSlice(ctx, strides, partial_processing_shape, partial_final_shape, shape_spec, begins_are_dynamic, ends_are_dynamic); } } private: int32 begin_mask_, end_mask_; int32 ellipsis_mask_, new_axis_mask_, shrink_axis_mask_; DataType index_type_; }; REGISTER_XLA_OP(Name("StridedSlice") .CompileTimeConstantInput("begin") .CompileTimeConstantInput("end") .CompileTimeConstantInput("strides"), StridedSliceOp); class StridedSliceGradOp : public XlaOpKernel { public: explicit StridedSliceGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("begin_mask", &begin_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("end_mask", &end_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("ellipsis_mask", &ellipsis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("new_axis_mask", &new_axis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("shrink_axis_mask", &shrink_axis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("Index", &index_type_)); } void CompileAsDynamicUpdateSlice(XlaOpKernelContext* ctx, const TensorShape& input_shape, const xla::Literal& strides_literal) { bool dummy = false; Tensor strides_tensor; PartialTensorShape processing_shape, final_shape; absl::InlinedVector<int64_t, 4> begin; absl::InlinedVector<int64_t, 4> end; absl::InlinedVector<int64_t, 4> strides; StridedSliceShapeSpec shape_spec; OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_, &strides_tensor)); OP_REQUIRES_OK( ctx, ValidateStridedSliceOp( nullptr, nullptr, strides_tensor, input_shape, begin_mask_, end_mask_, ellipsis_mask_, new_axis_mask_, shrink_axis_mask_, &processing_shape, &final_shape, &dummy, &dummy, &dummy, &begin, &end, &strides, &shape_spec)); for (int64_t i = 0; i < processing_shape.dims(); ++i) { OP_REQUIRES( ctx, strides[i] == 1, errors::InvalidArgument("Strides in strided slice grad have to be " "one when inputs are not constant.")); } xla::XlaOp grad = ctx->Input(4); xla::Shape grad_shape = ctx->InputXlaShape(4).value(); VLOG(1) << "xla grad shape" << grad_shape; VLOG(1) << "xla final_shape" << final_shape; VLOG(1) << "input_shape" << input_shape.DebugString(); auto input_sizes = input_shape.dim_sizes(); auto input_sizes_padded = input_shape.dim_sizes(); bool need_padding = false; for (int64_t i = 0; i < processing_shape.dims(); ++i) { if (processing_shape.dim_size(i) == -1) { input_sizes_padded[i] *= 2; need_padding = true; } } for (int64_t i = 0; i < grad_shape.rank(); ++i) { if (shape_spec.output_to_processing_mapping[i] != -1) { processing_shape.set_dim(shape_spec.output_to_processing_mapping[i], grad_shape.dimensions(i)); } } std::vector<xla::XlaOp> begins; begins.reserve(processing_shape.dims()); for (int64_t i = 0; i < input_shape.dims(); ++i) { bool begin_mask = (1 << i) & shape_spec.begin_dense_mask; int64_t begin_dim = shape_spec.processing_to_sparse_mapping[i]; xla::XlaOp begin_index; auto zero = xla::Zero(ctx->builder(), ctx->InputXlaType("begin")); if (begin_mask) { begin_index = zero; } else { xla::XlaOp dim_size = xla::Slice(ctx->Input(0), {i}, {i + 1}, {1}); dim_size = xla::Reshape(dim_size, {}); begin_index = xla::Slice(ctx->Input(1), {begin_dim}, {begin_dim + 1}, {1}); begin_index = xla::Reshape(begin_index, {}); auto index_negative = xla::Lt(begin_index, zero); auto wrapped_index = xla::Add(dim_size, begin_index); begin_index = xla::Select(index_negative, wrapped_index, begin_index); } begins.push_back(begin_index); } auto zero = XlaHelpers::Zero(ctx->builder(), ctx->expected_output_dtype(0)); zero = xla::Broadcast(zero, input_sizes_padded); grad = xla::Reshape(grad, processing_shape.dim_sizes()); grad = xla::DynamicUpdateSlice(zero, grad, begins); if (need_padding) { std::vector<int64_t> strides(input_shape.dims(), 1); std::vector<int64_t> start_indices(input_shape.dims(), 0); grad = xla::Slice(grad, start_indices, input_sizes, strides); } ctx->SetOutput(0, grad); } void Compile(XlaOpKernelContext* ctx) override { TensorShape processing_shape, final_shape; absl::InlinedVector<int64_t, 4> begin; absl::InlinedVector<int64_t, 4> end; absl::InlinedVector<int64_t, 4> strides; TensorShape input_shape; OP_REQUIRES_OK( ctx, ctx->ConstantInputAsShape(0, &input_shape, xla::ValueInferenceMode::kUpperBound)); xla::Literal begin_literal, end_literal, strides_literal; bool begin_is_constant = ctx->ConstantInput(1, &begin_literal).ok(); bool end_is_constant = ctx->ConstantInput(2, &end_literal).ok(); OP_REQUIRES_OK(ctx, ctx->ConstantInput(3, &strides_literal)); if (!(begin_is_constant && end_is_constant)) { CompileAsDynamicUpdateSlice(ctx, input_shape, strides_literal); return; } Tensor begin_tensor, end_tensor, strides_tensor; OP_REQUIRES_OK( ctx, LiteralToHostTensor(begin_literal, index_type_, &begin_tensor)); OP_REQUIRES_OK(ctx, LiteralToHostTensor(end_literal, index_type_, &end_tensor)); OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_, &strides_tensor)); bool dummy = false; OP_REQUIRES_OK( ctx, ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_, end_mask_, ellipsis_mask_, new_axis_mask_, shrink_axis_mask_, &processing_shape, &final_shape, &dummy, &dummy, &dummy, &begin, &end, &strides)); const TensorShape dy_shape = ctx->InputShape(4); OP_REQUIRES( ctx, final_shape == dy_shape, errors::InvalidArgument("shape of dy was ", dy_shape.DebugString(), " instead of ", final_shape.DebugString())); OP_REQUIRES( ctx, input_shape.dims() == processing_shape.dims(), errors::Internal( "input shape and processing shape must have same number of dims")); auto zero = XlaHelpers::Zero(ctx->builder(), ctx->expected_output_dtype(0)); xla::XlaOp grad = ctx->Input(4); grad = xla::Reshape(grad, processing_shape.dim_sizes()); absl::InlinedVector<int64_t, 4> dimensions_to_reverse; xla::PaddingConfig padding_config; for (int i = 0; i < processing_shape.dims(); ++i) { auto* dims = padding_config.add_dimensions(); if (strides[i] > 0) { dims->set_edge_padding_low(begin[i]); dims->set_interior_padding(strides[i] - 1); int64_t size = dims->edge_padding_low() + processing_shape.dim_size(i) + (processing_shape.dim_size(i) - 1) * dims->interior_padding(); dims->set_edge_padding_high(input_shape.dim_size(i) - size); } else { dimensions_to_reverse.push_back(i); dims->set_edge_padding_high(input_shape.dim_size(i) - begin[i] - 1); dims->set_interior_padding(-strides[i] - 1); int64_t size = dims->edge_padding_high() + processing_shape.dim_size(i) + (processing_shape.dim_size(i) - 1) * dims->interior_padding(); dims->set_edge_padding_low(input_shape.dim_size(i) - size); } } if (!dimensions_to_reverse.empty()) { grad = xla::Rev(grad, dimensions_to_reverse); } grad = xla::Pad(grad, zero, padding_config); xla::XlaOp dynamic_shape = ctx->Input(0); xla::Shape grad_shape = ctx->builder()->GetShape(grad).value(); std::vector<bool> dynamic_input; OP_REQUIRES_OK(ctx, ctx->ResolveInputDynamismIntoPredVector(0, &dynamic_input)); DCHECK_EQ(grad_shape.rank(), input_shape.dims()); for (int64_t dim = 0; dim < input_shape.dims(); ++dim) { DCHECK_EQ(grad_shape.dimensions(dim), input_shape.dim_size(dim)); if (dynamic_input[dim]) { auto dim_size = xla::Slice(dynamic_shape, {dim}, {dim + 1}, {1}); dim_size = xla::ConvertElementType(dim_size, xla::S32); auto dim_size_scalar = xla::Reshape(dim_size, {}); grad = xla::SetDimensionSize(grad, dim_size_scalar, dim); } else if (grad_shape.is_dynamic_dimension(dim)) { grad = xla::RemoveDynamicDimension(grad, dim); } } ctx->SetOutput(0, grad); } private: int32 begin_mask_, end_mask_; int32 ellipsis_mask_, new_axis_mask_, shrink_axis_mask_; DataType index_type_; }; REGISTER_XLA_OP(Name("StridedSliceGrad") .CompileTimeConstantInput("shape") .CompileTimeConstantInput("begin") .CompileTimeConstantInput("end") .CompileTimeConstantInput("strides"), StridedSliceGradOp); class StridedSliceAssignOp : public XlaOpKernel { public: explicit StridedSliceAssignOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("begin_mask", &begin_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("end_mask", &end_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("ellipsis_mask", &ellipsis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("new_axis_mask", &new_axis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("shrink_axis_mask", &shrink_axis_mask_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("Index", &index_type_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape final_shape; absl::InlinedVector<int64_t, 4> begin; absl::InlinedVector<int64_t, 4> end; absl::InlinedVector<int64_t, 4> strides; xla::Literal begin_literal, end_literal, strides_literal; OP_REQUIRES_OK(ctx, ctx->ConstantInput(1, &begin_literal)); OP_REQUIRES_OK(ctx, ctx->ConstantInput(2, &end_literal)); OP_REQUIRES_OK(ctx, ctx->ConstantInput(3, &strides_literal)); Tensor begin_tensor, end_tensor, strides_tensor; OP_REQUIRES_OK( ctx, LiteralToHostTensor(begin_literal, index_type_, &begin_tensor)); OP_REQUIRES_OK(ctx, LiteralToHostTensor(end_literal, index_type_, &end_tensor)); OP_REQUIRES_OK(ctx, LiteralToHostTensor(strides_literal, index_type_, &strides_tensor)); TensorShape lhs_shape; xla::XlaOp lhs; if (ctx->input_type(0) == DT_RESOURCE) { OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &lhs_shape, &lhs)); } else { lhs_shape = ctx->InputShape(0); lhs = ctx->Input(0); } const TensorShape rhs_shape = ctx->InputShape(4); TensorShape dummy_processing_shape; bool dummy = false; OP_REQUIRES_OK(ctx, ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, lhs_shape, begin_mask_, end_mask_, ellipsis_mask_, new_axis_mask_, shrink_axis_mask_, &dummy_processing_shape, &final_shape, &dummy, &dummy, &dummy, &begin, &end, &strides)); if (final_shape.num_elements() == 0 && rhs_shape.num_elements() == 0) { return; } OP_REQUIRES(ctx, final_shape == rhs_shape, errors::Unimplemented( "sliced l-value shape ", final_shape.DebugString(), " does not match r-value shape ", rhs_shape.DebugString(), ". Automatic broadcasting not yet implemented.")); xla::XlaOp rhs = ctx->Input(4); absl::InlinedVector<int64_t, 4> dimensions_to_reverse; absl::InlinedVector<xla::XlaOp, 4> slice_begin; absl::InlinedVector<int64_t, 4> slice_dims; for (int i = 0; i < begin.size(); ++i) { OP_REQUIRES( ctx, strides[i] == 1 || strides[i] == -1, errors::Unimplemented("Strides != 1 or -1 are not yet implemented")); if (strides[i] > 0) { slice_begin.push_back( xla::ConstantR0<int64_t>(ctx->builder(), begin[i])); slice_dims.push_back(end[i] - begin[i]); } else { slice_begin.push_back( xla::ConstantR0<int64_t>(ctx->builder(), end[i] + 1)); slice_dims.push_back(begin[i] - end[i]); dimensions_to_reverse.push_back(i); } } if (!dimensions_to_reverse.empty()) { rhs = xla::Rev(rhs, dimensions_to_reverse); } rhs = xla::Reshape(rhs, slice_dims); lhs = xla::DynamicUpdateSlice(lhs, rhs, slice_begin); if (ctx->input_type(0) == DT_RESOURCE) { OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, lhs)); } else { ctx->SetOutput(0, lhs); } } private: int32 begin_mask_, end_mask_; int32 ellipsis_mask_, new_axis_mask_, shrink_axis_mask_; DataType index_type_; DataType dtype_; }; REGISTER_XLA_OP(Name("ResourceStridedSliceAssign") .CompileTimeConstantInput("begin") .CompileTimeConstantInput("end") .CompileTimeConstantInput("strides"), StridedSliceAssignOp); REGISTER_XLA_OP(Name("TensorStridedSliceUpdate") .CompileTimeConstantInput("begin") .CompileTimeConstantInput("end") .CompileTimeConstantInput("strides"), StridedSliceAssignOp); } }
#include "tensorflow/core/util/strided_slice_op.h" #include <algorithm> #include <ostream> #include <tuple> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/test.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" namespace tensorflow { namespace { using ::testing::PrintToString; using Vec = typename StridedSliceAssignBCast::Vec; struct BroadcastPair { Vec from; Vec to; friend std::ostream& operator<<(std::ostream& os, const BroadcastPair& pair) { return os << strings::StrCat("BroadcastPair{", PrintToString(pair.from), "->", PrintToString(pair.to), "}"); } }; struct BroadcastRemap { int64_t dims; Vec map; friend std::ostream& operator<<(std::ostream& os, const BroadcastRemap& remap) { return os << strings::StrCat("BroadcastRemap{", remap.dims, ", ", PrintToString(remap.map), "}"); } }; int64_t NumberOfElements(const Vec& shape) { int64_t number_of_elements = 1; for (int64_t elem : shape) { number_of_elements *= elem; } return number_of_elements; } MATCHER_P2(Broadcasts, input_shape, output_shape, strings::StrCat("broadcasts ", PrintToString(input_shape), " to ", PrintToString(output_shape))) { const size_t size = input_shape.size(); for (size_t i = 0; i < size; ++i) { if (!((arg[i] == 1 && input_shape[i] == output_shape[i]) || (arg[i] == output_shape[i] && input_shape[i] == 1))) { return false; } } return true; } MATCHER_P(HasSuffix, suffix, "") { const size_t offset = arg.size() - suffix.size(); for (size_t i = 0; i < suffix.size(); ++i) { if (suffix[i] != arg[i + offset]) { return false; } } return true; } MATCHER_P(HasSameNumberOfElementsAs, other, "") { return NumberOfElements(arg) == NumberOfElements(other); } TEST(StridedSliceAssignBCastTest, BroadcastingToSameRankWorks) { const BroadcastPair test_pairs[] = { {Vec{1}, Vec{5}}, {Vec{1, 1}, Vec{4, 5}}, {Vec{1, 5}, Vec{4, 5}}, {Vec{4, 1}, Vec{4, 5}}, {Vec{1, 1, 1}, Vec{2, 4, 5}}, {Vec{1, 1, 5}, Vec{2, 4, 5}}, {Vec{1, 4, 5}, Vec{2, 4, 5}}, {Vec{2, 1, 5}, Vec{2, 4, 5}}, {Vec{2, 4, 1}, Vec{2, 4, 5}}, }; for (const BroadcastPair& test_pair : test_pairs) { StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); EXPECT_TRUE(bcast.IsValid()) << test_pair; EXPECT_TRUE(bcast.IsBroadcastingRequired()); EXPECT_EQ(bcast.result_shape(), test_pair.to); EXPECT_EQ(bcast.reshape(), test_pair.from); EXPECT_THAT(bcast.bcast(), Broadcasts(test_pair.from, test_pair.to)); } } TEST(StridedSliceAssignBCastTest, BroadcastingToLargerRankWorks) { const BroadcastPair test_pairs[] = { {Vec{}, Vec{2, 4, 5}}, {Vec{1}, Vec{2, 4, 5}}, {Vec{5}, Vec{2, 4, 5}}, {Vec{1, 1}, Vec{2, 4, 5}}, {Vec{1, 5}, Vec{2, 4, 5}}, {Vec{4, 1}, Vec{2, 4, 5}}, {Vec{4, 5}, Vec{2, 4, 5}}, }; for (const BroadcastPair& test_pair : test_pairs) { StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); EXPECT_TRUE(bcast.IsValid()) << test_pair; EXPECT_TRUE(bcast.IsBroadcastingRequired()); EXPECT_EQ(bcast.result_shape(), test_pair.to); EXPECT_THAT(bcast.reshape(), HasSuffix(test_pair.from)); EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from)); EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to)); } } TEST(StridedSliceAssignBCastTest, BroadcastingToSmallerRankWorks) { const BroadcastPair test_pairs[] = { {Vec{1, 1}, Vec{5}}, {Vec{1, 1, 5}, Vec{4, 5}}, {Vec{1, 4, 1}, Vec{4, 5}}, {Vec{1, 1, 1, 5}, Vec{4, 5}}, {Vec{1, 1, 4, 1}, Vec{4, 5}}, }; for (const BroadcastPair& test_pair : test_pairs) { StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); EXPECT_TRUE(bcast.IsValid()) << test_pair; EXPECT_TRUE(bcast.IsBroadcastingRequired()); EXPECT_EQ(bcast.result_shape(), test_pair.to); EXPECT_THAT(test_pair.from, HasSuffix(bcast.reshape())); EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from)); EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to)); } } TEST(StridedSliceAssignBCastTest, ReshapeOnlyWorks) { const BroadcastPair test_pairs[] = { {Vec{}, Vec{1, 1}}, {Vec{5}, Vec{5}}, {Vec{5}, Vec{1, 5}}, {Vec{1, 1}, Vec{}}, {Vec{1, 5}, Vec{5}}, {Vec{2, 4, 5}, Vec{2, 4, 5}}, {Vec{2, 4, 5}, Vec{1, 1, 1, 2, 4, 5}}, {Vec{1, 1, 1, 2, 4, 5}, Vec{2, 4, 5}}, }; for (const BroadcastPair& test_pair : test_pairs) { StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); EXPECT_TRUE(bcast.IsValid()) << test_pair; EXPECT_FALSE(bcast.IsBroadcastingRequired()); EXPECT_EQ(bcast.result_shape(), test_pair.to); EXPECT_THAT(bcast.reshape(), HasSameNumberOfElementsAs(test_pair.from)); EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), test_pair.to)); } } TEST(StridedSliceAssignBCastTest, InvalidBroadcastFails) { const BroadcastPair test_pairs[] = { {Vec{5}, Vec{1}}, {Vec{3}, Vec{4, 5}}, {Vec{4}, Vec{4, 5}}, {Vec{5}, Vec{}}, {Vec{3, 5}, Vec{4, 5}}, {Vec{4, 3}, Vec{4, 5}}, {Vec{5, 5}, Vec{1, 5}}, {Vec{2, 4}, Vec{2, 4, 5}}, {Vec{4, 3}, Vec{2, 4, 5}}, {Vec{3, 5}, Vec{2, 4, 5}}, {Vec{3, 5}, Vec{5}}, {Vec{3, 5}, Vec{}}, {Vec{3, 4, 5}, Vec{2, 4, 5}}, {Vec{2, 4, 5}, Vec{1, 4, 5}}, {Vec{2, 3, 5}, Vec{2, 4, 5}}, {Vec{2, 4, 5}, Vec{2, 4, 5, 2}}, {Vec{2, 4, 5}, Vec{2, 4, 5, 1}}, {Vec{2, 4, 5}, Vec{2, 4, 1, 5}}, {Vec{2, 4, 5}, Vec{4, 5}}, {Vec{2, 4, 5}, Vec{2, 4}}, {Vec{1, 4, 5}, Vec{4, 1}}, {Vec{1, 4, 5}, Vec{5}}, {Vec{1, 4, 5}, Vec{}}, }; for (const BroadcastPair& test_pair : test_pairs) { StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); EXPECT_FALSE(bcast.IsValid()) << test_pair; } } TEST(StridedSliceAssignBCastTest, RemapDimensionsToItselfWorks) { const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = { {BroadcastPair{Vec{}, Vec{}}, BroadcastRemap{0, Vec{}}}, {BroadcastPair{Vec{4, 5}, Vec{4, 5}}, BroadcastRemap{2, Vec{0, 1}}}, {BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}}, BroadcastRemap{3, Vec{0, 1, 2}}}, }; for (const auto& test_input : test_inputs) { const BroadcastPair& test_pair = test_input.first; const BroadcastRemap& test_remap = test_input.second; StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); ASSERT_TRUE(bcast.IsValid()); EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map)) << PrintToString(test_input); EXPECT_EQ(bcast.result_shape(), test_pair.to); EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), bcast.result_shape())); } } TEST(StridedSliceAssignBCastTest, RemapDimensionsRemovingAxesWorks) { const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = { {BroadcastPair{Vec{2, 1, 4, 1, 5}, Vec{2, 1, 4, 1, 5}}, BroadcastRemap{3, Vec{0, -1, 1, -1, 2}}, Vec{2, 4, 5}}, {BroadcastPair{Vec{1, 4, 1}, Vec{1, 4, 1}}, BroadcastRemap{1, Vec{-1, 0, -1}}, Vec{4}}, {BroadcastPair{Vec{1, 1, 1}, Vec{1, 1, 1}}, BroadcastRemap{0, Vec{-1, -1, -1}}, Vec{}}, }; for (const auto& test_input : test_inputs) { const BroadcastPair& test_pair = std::get<0>(test_input); const BroadcastRemap& test_remap = std::get<1>(test_input); const Vec& expected_result_shape = std::get<2>(test_input); StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); ASSERT_TRUE(bcast.IsValid()); EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map)) << PrintToString(test_input); EXPECT_EQ(bcast.result_shape(), expected_result_shape); EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), bcast.result_shape())); } } TEST(StridedSliceAssignBCastTest, RemapDimensionsAddingAxesWorks) { const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = { {BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}}, BroadcastRemap{5, Vec{0, 2, 4}}, Vec{2, 1, 4, 1, 5}}, {BroadcastPair{Vec{4, 5}, Vec{4, 5}}, BroadcastRemap{4, Vec{1, 2}}, Vec{1, 4, 5, 1}}, {BroadcastPair{Vec{}, Vec{}}, BroadcastRemap{3, Vec{}}, Vec{1, 1, 1}}, }; for (const auto& test_input : test_inputs) { const BroadcastPair& test_pair = std::get<0>(test_input); const BroadcastRemap& test_remap = std::get<1>(test_input); const Vec& expected_result_shape = std::get<2>(test_input); StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); ASSERT_TRUE(bcast.IsValid()); EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map)) << PrintToString(test_input); EXPECT_EQ(bcast.result_shape(), expected_result_shape); EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), bcast.result_shape())); } } TEST(StridedSliceAssignBCastTest, RemapDimensionsAddingAndRemovingAxesWorks) { const std::tuple<BroadcastPair, BroadcastRemap, Vec> test_inputs[] = { {BroadcastPair{Vec{1}, Vec{1}}, BroadcastRemap{1, Vec{-1}}, Vec{1}}, {BroadcastPair{Vec{1}, Vec{1}}, BroadcastRemap{3, Vec{-1}}, Vec{1, 1, 1}}, {BroadcastPair{Vec{1, 5}, Vec{1, 5}}, BroadcastRemap{3, Vec{-1, 1}}, Vec{1, 5, 1}}, {BroadcastPair{Vec{1, 5}, Vec{2, 1, 4, 1, 5}}, BroadcastRemap{4, Vec{0, -1, 1, -1, 3}}, Vec{2, 4, 1, 5}}, }; for (const auto& test_input : test_inputs) { const BroadcastPair& test_pair = std::get<0>(test_input); const BroadcastRemap& test_remap = std::get<1>(test_input); const Vec& expected_result_shape = std::get<2>(test_input); StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); ASSERT_TRUE(bcast.IsValid()); EXPECT_TRUE(bcast.RemapDimensions(test_remap.dims, test_remap.map)) << PrintToString(test_input); EXPECT_EQ(bcast.result_shape(), expected_result_shape); EXPECT_THAT(bcast.bcast(), Broadcasts(bcast.reshape(), bcast.result_shape())); } } TEST(StridedSliceAssignBCastTest, RemapDimensionsInvalidSizeFails) { const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = { {BroadcastPair{Vec{}, Vec{}}, BroadcastRemap{0, Vec{-1}}}, {BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}}, BroadcastRemap{3, Vec{0, 1, -1, 2}}}, {BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}}, BroadcastRemap{3, Vec{0, 2}}}, }; for (const auto& test_input : test_inputs) { const BroadcastPair& test_pair = test_input.first; const BroadcastRemap& test_remap = test_input.second; StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); ASSERT_TRUE(bcast.IsValid()); EXPECT_FALSE(bcast.RemapDimensions(test_remap.dims, test_remap.map)) << PrintToString(test_input); } } TEST(StridedSliceAssignBCastTest, RemapDimensionsOutOfBoundsFails) { const std::pair<BroadcastPair, BroadcastRemap> test_inputs[] = { {BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}}, BroadcastRemap{3, Vec{0, 1, 3}}}, {BroadcastPair{Vec{2, 4, 5}, Vec{2, 4, 5}}, BroadcastRemap{2, Vec{0, 1, 2}}}, }; for (const auto& test_input : test_inputs) { const BroadcastPair& test_pair = test_input.first; const BroadcastRemap& test_remap = test_input.second; StridedSliceAssignBCast bcast(test_pair.from, test_pair.to); ASSERT_TRUE(bcast.IsValid()); EXPECT_FALSE(bcast.RemapDimensions(test_remap.dims, test_remap.map)) << PrintToString(test_input); } } using IntVector = absl::InlinedVector<int64_t, 4UL>; TensorShape AsTensorShape(absl::Span<const int64_t> dim_sizes) { TensorShape out; TF_CHECK_OK(TensorShape::BuildTensorShape(dim_sizes, &out)); return out; } TEST(ValidateStridedSliceOpTest, BasicStride) { Tensor begin_tensor = test::AsTensor<int32_t>({1, 1}); Tensor end_tensor = test::AsTensor<int32_t>({7, 7}); Tensor strides_tensor = test::AsTensor<int32_t>({2, 2}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x2; int32_t end_mask_spec = 0x1; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); EXPECT_EQ(processing_shape, AsTensorShape({5, 4})); EXPECT_EQ(final_shape, AsTensorShape({5, 4})); EXPECT_FALSE(is_identity); EXPECT_FALSE(is_simple_slice); EXPECT_FALSE(slice_dim0); EXPECT_EQ(begin, (IntVector{1, 0})); EXPECT_EQ(end, (IntVector{10, 7})); EXPECT_EQ(strides, (IntVector{2, 2})); } TEST(ValidateStridedSliceOpTest, NegativeBeginEnd) { Tensor begin_tensor = test::AsTensor<int32_t>({-9, -20}); Tensor end_tensor = test::AsTensor<int32_t>({-3, -3}); Tensor strides_tensor = test::AsTensor<int32_t>({2, 2}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); EXPECT_EQ(processing_shape, AsTensorShape({3, 4})); EXPECT_EQ(final_shape, AsTensorShape({3, 4})); EXPECT_EQ(begin, (IntVector{1, 0})); EXPECT_EQ(end, (IntVector{7, 7})); } TEST(ValidateStridedSliceOpTest, EmptyOutputDim) { Tensor begin_tensor = test::AsTensor<int32_t>({1, 1}); Tensor end_tensor = test::AsTensor<int32_t>({7, 1}); Tensor strides_tensor = test::AsTensor<int32_t>({2, 1}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); EXPECT_EQ(processing_shape, AsTensorShape({3, 0})); EXPECT_EQ(final_shape, AsTensorShape({3, 0})); } TEST(ValidateStridedSliceOpTest, ZeroStrideFails) { Tensor begin_tensor = test::AsTensor<int32_t>({1, 1}); Tensor end_tensor = test::AsTensor<int32_t>({7, 7}); Tensor strides_tensor = test::AsTensor<int32_t>({0, 2}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x2; int32_t end_mask_spec = 0x1; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; EXPECT_THAT( ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec), tsl::testing::StatusIs( tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("strides.* must be non-zero"))); } TEST(ValidateStridedSliceOpTest, ShrinkAxis) { Tensor begin_tensor = test::AsTensor<int16_t>({0, 1, 0}); Tensor end_tensor = test::AsTensor<int16_t>({3, 1, 5}); Tensor strides_tensor = test::AsTensor<int16_t>({1, 1, 1}); TensorShape input_shape = AsTensorShape({3, 4, 5}); int32_t begin_mask_spec = 0x2; int32_t end_mask_spec = 0x2; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x2; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); EXPECT_EQ(final_shape, AsTensorShape({3, 5})); } TEST(ValidateStridedSliceOpTest, ShrinkSliceOutOfBoundsFails) { Tensor begin_tensor = test::AsTensor<int16_t>({0, 7, 0}); Tensor end_tensor = test::AsTensor<int16_t>({3, 7, 5}); Tensor strides_tensor = test::AsTensor<int16_t>({1, 1, 1}); TensorShape input_shape = AsTensorShape({3, 4, 5}); int32_t begin_mask_spec = 0x2; int32_t end_mask_spec = 0x2; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x2; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; EXPECT_THAT( ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec), tsl::testing::StatusIs( tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("slice index .* out of bounds"))); } TEST(ValidateStridedSliceOpTest, ShrinkAxisNegativeStrideFails) { Tensor begin_tensor = test::AsTensor<int16_t>({0, 1, 0}); Tensor end_tensor = test::AsTensor<int16_t>({3, 2, 5}); Tensor strides_tensor = test::AsTensor<int16_t>({1, -1, 1}); TensorShape input_shape = AsTensorShape({3, 4, 5}); int32_t begin_mask_spec = 0x2; int32_t end_mask_spec = 0x2; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x2; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; EXPECT_THAT( ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec), tsl::testing::StatusIs( tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("only stride 1 allowed"))); } TEST(ValidateStridedSliceOpTest, NewAxis) { Tensor begin_tensor = test::AsTensor<int64_t>({0, 0}); Tensor end_tensor = test::AsTensor<int64_t>({10, 10}); Tensor strides_tensor = test::AsTensor<int64_t>({1, 1}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x2; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); EXPECT_EQ(processing_shape, AsTensorShape({10, 10})); EXPECT_EQ(final_shape, AsTensorShape({10, 1, 10})); } TEST(ValidateStridedSliceOpTest, Ellipsis) { Tensor begin_tensor = test::AsTensor<int32_t>({0, 0}); Tensor end_tensor = test::AsTensor<int32_t>({10, 10}); Tensor strides_tensor = test::AsTensor<int32_t>({1, 1}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x1; int32_t new_axis_mask = 0x2; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); EXPECT_EQ(processing_shape, AsTensorShape({10, 10})); EXPECT_EQ(final_shape, AsTensorShape({10, 10, 1})); } TEST(ValidateStridedSliceOpTest, MultipleEllipsisFails) { Tensor begin_tensor = test::AsTensor<int32_t>({0, 0}); Tensor end_tensor = test::AsTensor<int32_t>({10, 10}); Tensor strides_tensor = test::AsTensor<int32_t>({1, 1}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x3; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; EXPECT_THAT( ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec), tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT, "Multiple ellipses in slice spec not allowed")); } TEST(ValidateStridedSliceOpTest, WrongBeginTensorFails) { Tensor begin_tensor = test::AsTensor<int32_t>({0}); Tensor end_tensor = test::AsTensor<int32_t>({10, 10}); Tensor strides_tensor = test::AsTensor<int32_t>({1, 1}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x1; int32_t new_axis_mask = 0x2; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; EXPECT_THAT( ValidateStridedSliceOp( &begin_tensor, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec), tsl::testing::StatusIs( tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("Expected .* equal size tensors"))); } TEST(ValidateStridedSliceOpTest, WrongStridesTensorWithNullBeginFails) { Tensor end_tensor = test::AsTensor<int32_t>({10, 10}); Tensor strides_tensor = test::AsTensor<int32_t>({1}); TensorShape input_shape = AsTensorShape({10, 10}); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x1; int32_t new_axis_mask = 0x2; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; EXPECT_THAT( ValidateStridedSliceOp( nullptr, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec), tsl::testing::StatusIs( tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("Expected .* equal size tensors"))); } TEST(ValidateStridedSliceOpTest, NullBeginEndWithShrinkAxis) { Tensor strides_tensor = test::AsTensor<int32_t>({2, -2, 1}); TensorShape input_shape = AsTensorShape({10, 10, 1}); int32_t begin_mask_spec = 0x3; int32_t end_mask_spec = 0x3; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x4; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( nullptr, nullptr, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); EXPECT_EQ(processing_shape, AsTensorShape({5, 5, 1})); EXPECT_EQ(final_shape, AsTensorShape({5, 5})); EXPECT_EQ(strides, (IntVector{2, -2, 1})); } TEST(ValidateStridedSliceOpTest, UnknownInputRankFails) { Tensor end_tensor = test::AsTensor<int32_t>({10, 10}); Tensor strides_tensor = test::AsTensor<int32_t>({1, 1}); PartialTensorShape input_shape; int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x1; int32_t new_axis_mask = 0x2; int32_t shrink_axis_mask = 0x0; TensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; EXPECT_THAT( ValidateStridedSliceOp( nullptr, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec), tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("unknown rank"))); } TEST(ValidateStridedSliceOpTest, PartialInputShape) { Tensor end_tensor = test::AsTensor<int32_t>({10, 10}); Tensor strides_tensor = test::AsTensor<int32_t>({1, 1}); PartialTensorShape input_shape; TF_CHECK_OK( PartialTensorShape::BuildPartialTensorShape({10, -1}, &input_shape)); int32_t begin_mask_spec = 0x0; int32_t end_mask_spec = 0x0; int32_t ellipsis_mask = 0x0; int32_t new_axis_mask = 0x0; int32_t shrink_axis_mask = 0x0; PartialTensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; IntVector begin, end, strides; StridedSliceShapeSpec shape_spec; TF_EXPECT_OK(ValidateStridedSliceOp( nullptr, &end_tensor, strides_tensor, input_shape, begin_mask_spec, end_mask_spec, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides, &shape_spec)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/strided_slice_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
da147f44-4e47-4776-bc80-4d094be7648f
cpp
tensorflow/tensorflow
cudnn_rnn_ops
tensorflow/core/ops/cudnn_rnn_ops.cc
tensorflow/core/ops/cudnn_rnn_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/lib/strings/strcat.h" namespace tensorflow { namespace { constexpr auto kRNNModeAttrs = "rnn_mode: {'rnn_relu', 'rnn_tanh', 'lstm', 'gru'} = 'lstm'"; constexpr auto kRNNInputModeAttrs = "input_mode: {'linear_input', 'skip_input', 'auto_select'} = " "'linear_input'"; constexpr auto kRNNDirectionAttrs = "direction: {'unidirectional', 'bidirectional'} = 'unidirectional'"; } using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeHandle; REGISTER_OP("CudnnRNNParamsSize") .Input("num_layers: int32") .Input("num_units: int32") .Input("input_size: int32") .Attr("T: {bfloat16, float16, float32, float64}") .Attr("S: {int32, int64}") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("num_proj: int = 0") .Output("params_size: S") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); c->set_output(0, c->Vector(1)); return absl::OkStatus(); }); REGISTER_OP("CudnnRNN") .Input("input: T") .Input("input_h: T") .Input("input_c: T") .Input("params: T") .SetIsStateful() .Output("output: T") .Output("output_h: T") .Output("output_c: T") .Output("reserve_space: T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); auto seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); string direction; TF_RETURN_IF_ERROR(c->GetAttr("direction", &direction)); string rnn_mode; TF_RETURN_IF_ERROR(c->GetAttr("rnn_mode", &rnn_mode)); int dir_count = (direction == "bidirectional") ? 2 : 1; DimensionHandle output_size; TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size)); auto output_shape = c->MakeShape({seq_length, batch_size, output_size}); auto output_h_shape = input_h_shape; auto output_c_shape TF_ATTRIBUTE_UNUSED = (rnn_mode == "lstm") ? output_h_shape : c->MakeShape({}); c->set_output(0, output_shape); c->set_output(1, output_h_shape); c->set_output(2, output_c_shape); c->set_output(3, c->UnknownShape()); return absl::OkStatus(); }); REGISTER_OP("CudnnRNNV2") .Input("input: T") .Input("input_h: T") .Input("input_c: T") .Input("params: T") .SetIsStateful() .Output("output: T") .Output("output_h: T") .Output("output_c: T") .Output("reserve_space: T") .Output("host_reserved: int8") .Attr("T: {bfloat16, float16, float32, float64}") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("is_training: bool = true") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); auto seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); string direction; TF_RETURN_IF_ERROR(c->GetAttr("direction", &direction)); string rnn_mode; TF_RETURN_IF_ERROR(c->GetAttr("rnn_mode", &rnn_mode)); int dir_count = (direction == "bidirectional") ? 2 : 1; DimensionHandle output_size; TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size)); auto output_shape = c->MakeShape({seq_length, batch_size, output_size}); auto output_h_shape = input_h_shape; auto output_c_shape TF_ATTRIBUTE_UNUSED = (rnn_mode == "lstm") ? output_h_shape : c->MakeShape({}); c->set_output(0, output_shape); c->set_output(1, output_h_shape); c->set_output(2, output_c_shape); c->set_output(3, c->UnknownShape()); c->set_output(4, c->UnknownShape()); return absl::OkStatus(); }); REGISTER_OP("CudnnRNNV3") .Input("input: T") .Input("input_h: T") .Input("input_c: T") .Input("params: T") .Input("sequence_lengths: int32") .SetIsStateful() .Output("output: T") .Output("output_h: T") .Output("output_c: T") .Output("reserve_space: T") .Output("host_reserved: int8") .Attr("T: {bfloat16, float16, float32, float64}") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("num_proj: int = 0") .Attr("is_training: bool = true") .Attr("time_major: bool = true") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; auto input_shape = c->input(0); auto input_h_shape = c->input(1); auto input_c_shape = c->input(2); TF_RETURN_IF_ERROR(c->WithRank(input_shape, 3, &unused)); TF_RETURN_IF_ERROR(c->WithRank(input_h_shape, 3, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 1, &unused)); auto max_seq_length = c->Dim(input_shape, 0); auto batch_size = c->Dim(input_shape, 1); auto num_units = c->Dim(input_h_shape, 2); string direction; TF_RETURN_IF_ERROR(c->GetAttr("direction", &direction)); string rnn_mode; TF_RETURN_IF_ERROR(c->GetAttr("rnn_mode", &rnn_mode)); if (rnn_mode == "lstm") { TF_RETURN_IF_ERROR(c->WithRank(input_c_shape, 3, &unused)); } int dir_count = (direction == "bidirectional") ? 2 : 1; DimensionHandle output_size; TF_RETURN_IF_ERROR(c->Multiply(num_units, dir_count, &output_size)); auto output_shape = c->MakeShape({max_seq_length, batch_size, output_size}); auto output_h_shape = input_h_shape; auto output_c_shape TF_ATTRIBUTE_UNUSED = (rnn_mode == "lstm") ? input_c_shape : c->MakeShape({}); c->set_output(0, output_shape); c->set_output(1, output_h_shape); c->set_output(2, output_c_shape); c->set_output(3, c->UnknownShape()); c->set_output(4, c->UnknownShape()); return absl::OkStatus(); }); REGISTER_OP("CudnnRNNBackprop") .Input("input: T") .Input("input_h: T") .Input("input_c: T") .Input("params: T") .Input("output: T") .Input("output_h: T") .Input("output_c: T") .Input("output_backprop: T") .Input("output_h_backprop: T") .Input("output_c_backprop: T") .Input("reserve_space: T") .SetIsStateful() .Output("input_backprop: T") .Output("input_h_backprop: T") .Output("input_c_backprop: T") .Output("params_backprop: T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .SetShapeFn([](InferenceContext* c) { auto input_shape = c->input(0); auto input_h_shape = c->input(1); auto input_c_shape = c->input(2); auto params_shape = c->input(3); c->set_output(0, input_shape); c->set_output(1, input_h_shape); c->set_output(2, input_c_shape); c->set_output(3, params_shape); return absl::OkStatus(); }); REGISTER_OP("CudnnRNNBackpropV2") .Input("input: T") .Input("input_h: T") .Input("input_c: T") .Input("params: T") .Input("output: T") .Input("output_h: T") .Input("output_c: T") .Input("output_backprop: T") .Input("output_h_backprop: T") .Input("output_c_backprop: T") .Input("reserve_space: T") .Input("host_reserved: int8") .SetIsStateful() .Output("input_backprop: T") .Output("input_h_backprop: T") .Output("input_c_backprop: T") .Output("params_backprop: T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .SetShapeFn([](InferenceContext* c) { auto input_shape = c->input(0); auto input_h_shape = c->input(1); auto input_c_shape = c->input(2); auto params_shape = c->input(3); c->set_output(0, input_shape); c->set_output(1, input_h_shape); c->set_output(2, input_c_shape); c->set_output(3, params_shape); return absl::OkStatus(); }); REGISTER_OP("CudnnRNNBackpropV3") .Input("input: T") .Input("input_h: T") .Input("input_c: T") .Input("params: T") .Input("sequence_lengths: int32") .Input("output: T") .Input("output_h: T") .Input("output_c: T") .Input("output_backprop: T") .Input("output_h_backprop: T") .Input("output_c_backprop: T") .Input("reserve_space: T") .Input("host_reserved: int8") .SetIsStateful() .Output("input_backprop: T") .Output("input_h_backprop: T") .Output("input_c_backprop: T") .Output("params_backprop: T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("num_proj: int = 0") .Attr("time_major: bool = true") .SetShapeFn([](InferenceContext* c) { auto input_shape = c->input(0); auto input_h_shape = c->input(1); auto input_c_shape = c->input(2); auto params_shape = c->input(3); c->set_output(0, input_shape); c->set_output(1, input_h_shape); c->set_output(2, input_c_shape); c->set_output(3, params_shape); return absl::OkStatus(); }); REGISTER_OP("CudnnRNNParamsToCanonical") .Input("num_layers: int32") .Input("num_units: int32") .Input("input_size: int32") .Input("params: T") .Output("weights: num_params * T") .Output("biases: num_params * T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr("num_params: int") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); int num_params; TF_RETURN_IF_ERROR(c->GetAttr("num_params", &num_params)); for (int i = 0; i < num_params; i++) { c->set_output(i, c->Matrix(InferenceContext::kUnknownDim, InferenceContext::kUnknownDim)); } for (int i = 0; i < num_params; i++) { c->set_output(num_params + i, c->Vector(InferenceContext::kUnknownDim)); } return absl::OkStatus(); }); REGISTER_OP("CudnnRNNParamsToCanonicalV2") .Input("num_layers: int32") .Input("num_units: int32") .Input("input_size: int32") .Input("params: T") .Output("weights: num_params_weights * T") .Output("biases: num_params_biases * T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr("num_params_weights: int") .Attr("num_params_biases: int") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("num_proj: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); int num_params_weights; int num_params_biases; TF_RETURN_IF_ERROR(c->GetAttr("num_params_weights", &num_params_weights)); TF_RETURN_IF_ERROR(c->GetAttr("num_params_biases", &num_params_biases)); for (int i = 0; i < num_params_weights; i++) { c->set_output(i, c->Matrix(InferenceContext::kUnknownDim, InferenceContext::kUnknownDim)); } for (int i = 0; i < num_params_biases; i++) { c->set_output(num_params_weights + i, c->Vector(InferenceContext::kUnknownDim)); } return absl::OkStatus(); }); REGISTER_OP("CudnnRNNCanonicalToParams") .Input("num_layers: int32") .Input("num_units: int32") .Input("input_size: int32") .Input("weights: num_params * T") .Input("biases: num_params * T") .Output("params: T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr("num_params: int") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); return absl::OkStatus(); }); REGISTER_OP("CudnnRNNCanonicalToParamsV2") .Input("num_layers: int32") .Input("num_units: int32") .Input("input_size: int32") .Input("weights: num_params_weights * T") .Input("biases: num_params_biases * T") .Output("params: T") .Attr("T: {bfloat16, float16, float32, float64}") .Attr("num_params_weights: int") .Attr("num_params_biases: int") .Attr(kRNNModeAttrs) .Attr(kRNNInputModeAttrs) .Attr(kRNNDirectionAttrs) .Attr("dropout: float = 0.0") .Attr("seed: int = 0") .Attr("seed2: int = 0") .Attr("num_proj: int = 0") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); return absl::OkStatus(); }); }
#include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { static string JoinedCopies(const string& s, int copies) { string res; for (int i = 0; i < copies; ++i) { strings::StrAppend(&res, i > 0 ? ";" : "", s); } return res; } TEST(CudnnRNNOpsTest, ParamsSize_ShapeFn) { ShapeInferenceTestOp op("CudnnRNNParamsSize"); INFER_OK(op, "[];[];[]", "[1]"); INFER_OK(op, "?;[];[]", "[1]"); INFER_OK(op, "[];?;[]", "[1]"); INFER_OK(op, "[];[];?", "[1]"); INFER_OK(op, "[];?;?", "[1]"); INFER_OK(op, "?;?;?", "[1]"); INFER_ERROR("Shape must be rank 0 ", op, "[1,2];?;[]"); INFER_ERROR("Shape must be rank 0 ", op, "?;[2];[]"); INFER_ERROR("Shape must be rank 0 ", op, "?;?;[1]"); } TEST(CudnnRNNOpsTest, ForwardLstm_ShapeFn) { int seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {seq_length, batch_size, num_units * dir_count}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_h_shape), ";", "[?]"); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?"; ShapeInferenceTestOp op("CudnnRNN"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNN") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { int seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {seq_length, batch_size, num_units * dir_count}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_h_shape), ";", "[?]"); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?;?"; ShapeInferenceTestOp op("CudnnRNNV2"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV2") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV3Lstm_ShapeFn) { int max_seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> input_c_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {max_seq_length, batch_size, num_units * dir_count}; std::vector<int> seq_lengths_shape = {batch_size}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_c_shape), ";", "[?]", ";", shape_to_str(seq_lengths_shape)); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in2;?;?"; ShapeInferenceTestOp op("CudnnRNNV3"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV3") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Input({"sequence_lengths", 0, DT_INT32}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[];[?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[?];[]"); } TEST(CudnnRNNOpsTest, ForwardV3Gru) { int max_seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> input_c_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {max_seq_length, batch_size, num_units * dir_count}; std::vector<int> seq_lengths_shape = {batch_size}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_c_shape), ";", "[?]", ";", shape_to_str(seq_lengths_shape)); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;[];?;?"; ShapeInferenceTestOp op("CudnnRNNV3"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV3") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Input({"sequence_lengths", 0, DT_INT32}) .Attr("rnn_mode", "gru") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[];[?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[];[?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[?];[]"); } TEST(CudnnRNNOpsTest, LSTMBlockCell_ShapeFn) { ShapeInferenceTestOp op("LSTMBlockCell"); string input_suffix = strings::StrCat(";", JoinedCopies("?", 6)); INFER_ERROR("must be rank 2", op, "[?];?" + input_suffix); INFER_ERROR("must be rank 2", op, "?;[?]" + input_suffix); INFER_OK(op, "?;?" + input_suffix, JoinedCopies("[?,?]", 7)); INFER_OK(op, "[?,?];[?,?]" + input_suffix, JoinedCopies("[d0_0,d1_1]", 7)); } TEST(CudnnRNNOpsTest, BlockLSTM_ShapeFn) { ShapeInferenceTestOp op("BlockLSTM"); TF_ASSERT_OK(NodeDefBuilder("test", "BlockLSTM") .Input({"seq_len_max", 0, DT_INT64}) .Input({"x", 0, DT_FLOAT}) .Input({"cs_prev", 0, DT_FLOAT}) .Input({"h_prev", 0, DT_FLOAT}) .Input({"w", 0, DT_FLOAT}) .Input({"wci", 0, DT_FLOAT}) .Input({"wcf", 0, DT_FLOAT}) .Input({"wco", 0, DT_FLOAT}) .Input({"b", 0, DT_FLOAT}) .Finalize(&op.node_def)); string infix = ";" + JoinedCopies("?", 6) + ";"; INFER_ERROR("must be rank 3", op, "?;[?]" + infix + "?"); INFER_ERROR("must be rank 1", op, "?;?" + infix + "[?,?]"); INFER_OK(op, "?;?" + infix + "?", JoinedCopies("[?,?,?]", 7)); INFER_OK(op, "?;[?,?,?]" + infix + "?", JoinedCopies("[d1_0,d1_1,?]", 7)); INFER_OK(op, "?;[?,?,?]" + infix + "[?]", JoinedCopies("[d1_0,d1_1,?]", 7)); INFER_OK(op, "?;[?,?,?]" + infix + "[20]", JoinedCopies("[d1_0,d1_1,5]", 7)); INFER_ERROR("must be evenly divisible", op, "?;?" + infix + "[11]"); } TEST(CudnnRNNOpsTest, BlockLSTMV2_ShapeFn) { ShapeInferenceTestOp op("BlockLSTMV2"); TF_ASSERT_OK(NodeDefBuilder("test", "BlockLSTMV2") .Input({"seq_len_max", 0, DT_INT64}) .Input({"x", 0, DT_FLOAT}) .Input({"cs_prev", 0, DT_FLOAT}) .Input({"h_prev", 0, DT_FLOAT}) .Input({"w", 0, DT_FLOAT}) .Input({"wci", 0, DT_FLOAT}) .Input({"wcf", 0, DT_FLOAT}) .Input({"wco", 0, DT_FLOAT}) .Input({"b", 0, DT_FLOAT}) .Finalize(&op.node_def)); string infix = ";" + JoinedCopies("?", 6) + ";"; INFER_ERROR("must be rank 3", op, "?;[?]" + infix + "?"); INFER_ERROR("must be rank 1", op, "?;?" + infix + "[?,?]"); INFER_OK(op, "?;?" + infix + "?", JoinedCopies("[?,?,?]", 7)); INFER_OK(op, "?;[?,?,?]" + infix + "?", JoinedCopies("[d1_0,d1_1,?]", 7)); INFER_OK(op, "?;[?,?,?]" + infix + "[?]", JoinedCopies("[d1_0,d1_1,?]", 7)); INFER_OK(op, "?;[?,?,?]" + infix + "[20]", JoinedCopies("[d1_0,d1_1,5]", 7)); INFER_ERROR("must be evenly divisible", op, "?;?" + infix + "[11]"); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/cudnn_rnn_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/cudnn_rnn_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
36232e17-485a-4372-ac8e-bbcb4ca23de8
cpp
tensorflow/tensorflow
sequence_ops
tensorflow/compiler/tf2xla/kernels/sequence_ops.cc
tensorflow/core/kernels/sequence_ops_test.cc
#include "absl/status/statusor.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/value_inference.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal.h" #include "xla/primitive_util.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { template <typename T> absl::StatusOr<xla::XlaOp> CreateRangeTensor( const xla::LiteralSlice& start_literal, const xla::LiteralSlice& limit_literal, const xla::LiteralSlice& delta_literal, xla::XlaBuilder* builder) { T start = start_literal.Get<T>({}); T limit = limit_literal.Get<T>({}); T delta = delta_literal.Get<T>({}); if (delta == 0) { return errors::InvalidArgument("Requires delta != 0: ", delta); } if (delta > 0) { if (start > limit) { return errors::InvalidArgument( "Requires start <= limit when delta > 0: ", start, "/", limit); } } else { if (start < limit) { return errors::InvalidArgument( "Requires start >= limit when delta < 0: ", start, "/", limit); } } int64_t size = (std::is_integral<T>::value ? static_cast<T>( limit == start ? 0 : (std::abs(limit - start) - 1) / std::abs(delta) + 1) : std::ceil(std::abs((limit - start) / delta))); return xla::ConstantR0(builder, start) + xla::ConstantR0(builder, delta) * xla::Iota(builder, xla::primitive_util::NativeToPrimitiveType<T>(), size); } class RangeOp : public XlaOpKernel { public: explicit RangeOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const TensorShape start_in_shape = ctx->InputShape(0); const TensorShape limit_in_shape = ctx->InputShape(1); const TensorShape delta_in_shape = ctx->InputShape(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(start_in_shape), errors::InvalidArgument("start must be a scalar, not shape ", start_in_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(limit_in_shape), errors::InvalidArgument("limit must be a scalar, not shape ", limit_in_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(delta_in_shape), errors::InvalidArgument("delta must be a scalar, not shape ", delta_in_shape.DebugString())); xla::Literal start, limit, delta; OP_REQUIRES_OK(ctx, ctx->ConstantInput( 0, &start, xla::ValueInferenceMode::kLowerBound)); OP_REQUIRES_OK(ctx, ctx->ConstantInput( 1, &limit, xla::ValueInferenceMode::kUpperBound)); OP_REQUIRES_OK(ctx, ctx->ConstantInput(2, &delta)); DataType type = input_type(0); absl::StatusOr<xla::XlaOp> output; switch (type) { case DT_INT32: output = CreateRangeTensor<int32>(start, limit, delta, ctx->builder()); break; case DT_INT64: output = CreateRangeTensor<int64_t>(start, limit, delta, ctx->builder()); break; case DT_FLOAT: output = CreateRangeTensor<float>(start, limit, delta, ctx->builder()); break; case DT_DOUBLE: output = CreateRangeTensor<double>(start, limit, delta, ctx->builder()); break; default: output = errors::InvalidArgument("Invalid type for Range ", DataTypeString(type)); } OP_REQUIRES_OK(ctx, output.status()); bool start_is_dynamic = false; OP_REQUIRES_OK(ctx, ctx->ResolveInputDynamismIntoPred(0, &start_is_dynamic)); bool limit_is_dynamic = false; OP_REQUIRES_OK(ctx, ctx->ResolveInputDynamismIntoPred(1, &limit_is_dynamic)); if (start_is_dynamic || limit_is_dynamic) { xla::XlaOp delta = ctx->Input(2); xla::XlaOp limit = ctx->Input(1); xla::XlaOp start = ctx->Input(0); if (type == DT_INT32 || type == DT_INT64) { auto dynamic_size = (xla::Abs(limit - start) + xla::Abs(delta) - xla::One(ctx->builder(), ctx->input_xla_type(0))) / xla::Abs(delta); dynamic_size = xla::ConvertElementType(dynamic_size, xla::S32); output = xla::SetDimensionSize(output.value(), dynamic_size, 0); } else { auto dynamic_size = (xla::Ceil(xla::Abs((limit - start) / delta))); dynamic_size = xla::ConvertElementType(dynamic_size, xla::S32); output = xla::SetDimensionSize(output.value(), dynamic_size, 0); } } ctx->SetOutput(0, output.value()); } }; REGISTER_XLA_OP(Name("Range") .CompileTimeConstantInput("start") .CompileTimeConstantInput("limit") .CompileTimeConstantInput("delta"), RangeOp); class LinSpaceOp : public XlaOpKernel { public: explicit LinSpaceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const TensorShape start_in_shape = ctx->InputShape("start"); const TensorShape stop_in_shape = ctx->InputShape("stop"); const TensorShape num_in_shape = ctx->InputShape("num"); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(start_in_shape), errors::InvalidArgument("start must be a scalar, not shape ", start_in_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(stop_in_shape), errors::InvalidArgument("stop must be a scalar, not shape ", stop_in_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(num_in_shape), errors::InvalidArgument("num must be a scalar, not shape ", num_in_shape.DebugString())); int64_t num; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar("num", &num)); OP_REQUIRES(ctx, num > 0, errors::InvalidArgument("Requires num > 0: ", num)); xla::XlaOp start = ctx->Input("start"); xla::XlaOp stop = ctx->Input("stop"); xla::XlaOp iota = xla::Iota(ctx->builder(), ctx->output_xla_type(0), num); xla::XlaOp step = (stop - start) / xla::ScalarLike(start, (num > 1 ? num - 1 : num)); xla::XlaOp result = iota * step + start; if (num > 1) { xla::XlaOp mask = xla::Iota(ctx->builder(), xla::S64, num); xla::XlaOp eq = xla::Eq(mask, xla::ScalarLike(mask, num - 1)); result = xla::Select(eq, stop, result); } ctx->SetOutput(0, result); } }; REGISTER_XLA_OP(Name("LinSpace").CompileTimeConstantInput("num"), LinSpaceOp); } }
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class RangeOpTest : public OpsTestBase { protected: void MakeOp(DataType input_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "Range") .Input(FakeInput(input_type)) .Input(FakeInput(input_type)) .Input(FakeInput(input_type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; class LinSpaceOpTest : public OpsTestBase { protected: void MakeOp(DataType input_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "LinSpace") .Input(FakeInput(input_type)) .Input(FakeInput(input_type)) .Input(FakeInput(index_type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(RangeOpTest, Simple_D32) { MakeOp(DT_INT32); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<int32>(TensorShape({}), {10}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({5})); test::FillValues<int32>(&expected, {0, 2, 4, 6, 8}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(RangeOpTest, Simple_Half) { MakeOp(DT_HALF); AddInputFromList<Eigen::half, float>(TensorShape({}), {0.5}); AddInputFromList<Eigen::half, float>(TensorShape({}), {2}); AddInputFromList<Eigen::half, float>(TensorShape({}), {0.3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_HALF, TensorShape({5})); test::FillValues<Eigen::half, float>(&expected, {0.5, 0.8, 1.1, 1.4, 1.7}); test::ExpectTensorEqual<Eigen::half>(expected, *GetOutput(0)); } TEST_F(RangeOpTest, Simple_Float) { MakeOp(DT_FLOAT); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {0.3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5})); test::FillValues<float>(&expected, {0.5, 0.8, 1.1, 1.4, 1.7}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RangeOpTest, Large_Double) { MakeOp(DT_DOUBLE); AddInputFromArray<double>(TensorShape({}), {0.0}); AddInputFromArray<double>(TensorShape({}), {10000}); AddInputFromArray<double>(TensorShape({}), {0.5}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({20000})); std::vector<double> result; for (int32_t i = 0; i < 20000; ++i) result.push_back(i * 0.5); test::FillValues<double>(&expected, absl::Span<const double>(result)); test::ExpectTensorEqual<double>(expected, *GetOutput(0)); } TEST_F(LinSpaceOpTest, Simple_D32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<float>(TensorShape({}), {7.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected, {3.0, 5.0, 7.0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(LinSpaceOpTest, Exact_Endpoints) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({}), {0.0}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<int32>(TensorShape({}), {42}); TF_ASSERT_OK(RunOpKernel()); Tensor output = *GetOutput(0); float expected_start = 0.0; float start = output.flat<float>()(0); EXPECT_EQ(expected_start, start) << expected_start << " vs. " << start; float expected_stop = 1.0; float stop = output.flat<float>()(output.NumElements() - 1); EXPECT_EQ(expected_stop, stop) << expected_stop << " vs. " << stop; } TEST_F(LinSpaceOpTest, Single_D64) { MakeOp(DT_FLOAT, DT_INT64); AddInputFromArray<float>(TensorShape({}), {9.0}); AddInputFromArray<float>(TensorShape({}), {100.0}); AddInputFromArray<int64_t>(TensorShape({}), {1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1})); test::FillValues<float>(&expected, {9.0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(LinSpaceOpTest, Simple_Double) { MakeOp(DT_DOUBLE, DT_INT32); AddInputFromArray<double>(TensorShape({}), {5.0}); AddInputFromArray<double>(TensorShape({}), {6.0}); AddInputFromArray<int32>(TensorShape({}), {6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({6})); test::FillValues<double>(&expected, {5.0, 5.2, 5.4, 5.6, 5.8, 6.0}); test::ExpectTensorEqual<double>(expected, *GetOutput(0)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sequence_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ddf590a5-45ca-46d3-a781-60ab681c81be
cpp
tensorflow/tensorflow
sparse_utils
tensorflow/core/kernels/sparse_utils.cc
tensorflow/core/kernels/sparse_utils_test.cc
#include "tensorflow/core/kernels/sparse_utils.h" #include <cstddef> #include <cstdint> #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { namespace sparse_utils { template <typename Tindices> Tindices FindNextDenseRowStartIndex( const Tindices sparse_index_begin, const typename TTypes<Tindices>::ConstMatrix& indices_mat) { Tindices begin = sparse_index_begin; Tindices end = indices_mat.dimension(0); const Tindices orig_sparse_index_end = end; const Tindices orig_dense_index_begin = indices_mat(begin, 0); if (orig_dense_index_begin == static_cast<int64_t>(indices_mat(end - 1, 0))) { return orig_sparse_index_end; } Tindices increment = 1; while (begin + increment < end && indices_mat(begin + increment, 0) == orig_dense_index_begin) { increment *= 2; } if (begin + increment < end) { end = begin + increment; } begin += increment / 2; const Tindices dense_row_index_to_find = orig_dense_index_begin; while (begin < end) { const Tindices m = begin + (end - begin) / 2; const Tindices m_dense_row_index = static_cast<Tindices>(indices_mat(m, 0)); if (m_dense_row_index == dense_row_index_to_find && (m + 1 == orig_sparse_index_end || static_cast<Tindices>(indices_mat(m + 1, 0)) != dense_row_index_to_find)) { return m + 1; } else if (m_dense_row_index <= dense_row_index_to_find) { begin = m + 1; } else { end = m; } } return orig_sparse_index_end; } template <typename Tindices> std::vector<Tindices> GetStartIndicesOfEachDenseRow( const typename TTypes<Tindices>::ConstMatrix& indices_mat, bool* contains_empty_rows) { int64_t start_sparse_index_of_cur_dense_row = 0; std::vector<Tindices> segment_indices; const Tindices num_entries_in_sparse_tensor = indices_mat.dimension(0); const Tindices num_dense_rows_in_sparse_tensor = 1 + indices_mat(num_entries_in_sparse_tensor - 1, 0); segment_indices.reserve(1 + num_dense_rows_in_sparse_tensor); segment_indices.push_back(0); for (Tindices i = 0; i < indices_mat(0, 0); ++i) { segment_indices.push_back(0); } *contains_empty_rows = indices_mat(0, 0) > 0; while (true) { const Tindices start_sparse_index_of_next_dense_row = FindNextDenseRowStartIndex<Tindices>( start_sparse_index_of_cur_dense_row, indices_mat); if (start_sparse_index_of_next_dense_row == num_entries_in_sparse_tensor) { segment_indices.push_back(start_sparse_index_of_next_dense_row); break; } for (Tindices i = 0; i < indices_mat(start_sparse_index_of_next_dense_row, 0) - indices_mat(start_sparse_index_of_cur_dense_row, 0); ++i) { segment_indices.push_back(start_sparse_index_of_next_dense_row); } *contains_empty_rows |= indices_mat(start_sparse_index_of_next_dense_row, 0) - indices_mat(start_sparse_index_of_cur_dense_row, 0) > 1; start_sparse_index_of_cur_dense_row = start_sparse_index_of_next_dense_row; } return segment_indices; } template <typename Tindices> std::vector<Tindices> ParseRowStartIndices( const tensorflow::Tensor& tensor, const Tindices num_nonzero_entries_in_sparse_mat) { std::vector<Tindices> out; auto vec = tensor.vec<Tindices>(); out.reserve(vec.size() + 1); for (size_t i = 0; i < vec.dimension(0); ++i) { out.push_back(vec(i)); } out.push_back(num_nonzero_entries_in_sparse_mat); return out; } template <typename Tindices> bool ContainsEmptyRows(const std::vector<Tindices>& row_start_indices) { for (size_t i = 1; i < row_start_indices.size() - 1; ++i) { if (row_start_indices.at(i) - row_start_indices.at(i - 1) == 0) { return true; } } return false; } namespace { Status ValidateSparseTensorShape(const Tensor& indices, const Tensor& values, const Tensor& shape) { if (!TensorShapeUtils::IsMatrix(indices.shape())) { return errors::InvalidArgument("Sparse indices must be rank 2 but is rank ", indices.shape().dim_sizes().size()); } if (!TensorShapeUtils::IsVector(values.shape())) { return errors::InvalidArgument("Sparse values must be rank 1 but is rank ", values.shape().dims()); } if (!TensorShapeUtils::IsVector(shape.shape())) { return errors::InvalidArgument("Sparse shape must be rank 1 but is rank ", shape.shape().dims()); } int64_t nnz = indices.dim_size(0); int64_t ndims = indices.dim_size(1); if (values.dim_size(0) != nnz) { return errors::InvalidArgument("Number of elements in indices (", nnz, ") and values (", values.dim_size(0), ") do not match"); } if (shape.NumElements() != ndims) { return errors::InvalidArgument("Index rank (", ndims, ") and shape rank (", shape.NumElements(), ") do not match"); } return absl::OkStatus(); } template <typename IndexTensor> string CreateIndexString(const IndexTensor& indices, int64_t row) { const int64_t ndims = indices.dimension(1); string index_str = strings::StrCat("indices[", row, ", :] = ["); for (int64_t dim = 0; dim < ndims; ++dim) { strings::StrAppend(&index_str, indices(row, dim), dim < ndims - 1 ? ", " : "]"); } if (ndims == 0) { strings::StrAppend(&index_str, "]"); } return index_str; } template <typename Tindices> Status ValidateSparseTensorIndicesUnordered(const Tensor& indices, const Tensor& shape) { const auto indices_mat = indices.flat_inner_dims<Tindices>(); const auto shape_vec = shape.flat<Tindices>(); int64_t nnz = indices.dim_size(0); int64_t ndims = indices.dim_size(1); for (int64_t i = 0; i < nnz; ++i) { for (int64_t dim = 0; dim < ndims; ++dim) { const Tindices idx = indices_mat(i, dim); if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) { string index_str = CreateIndexString(indices_mat, i); return errors::InvalidArgument("Sparse index tuple ", index_str, " is out of bounds"); } } } return absl::OkStatus(); } template <typename Tindices> Status ValidateSparseTensorIndicesOrdered(const Tensor& indices, const Tensor& shape) { const auto indices_mat = indices.flat_inner_dims<Tindices>(); const auto shape_vec = shape.flat<Tindices>(); int64_t nnz = indices.dim_size(0); int64_t ndims = indices.dim_size(1); if (nnz == 0) { return absl::OkStatus(); } for (int64_t dim = 0; dim < ndims; ++dim) { const Tindices idx = indices_mat(0, dim); if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) { string index_str = CreateIndexString(indices_mat, 0); return errors::InvalidArgument("Sparse index tuple ", index_str, " is out of bounds"); } } for (int64_t i = 1; i < nnz; ++i) { bool different = false; for (int64_t dim = 0; dim < ndims; ++dim) { const Tindices idx = indices_mat(i, dim); const Tindices prev_idx = indices_mat(i - 1, dim); if (TF_PREDICT_TRUE(different)) { if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) { string index_str = CreateIndexString(indices_mat, i); return errors::InvalidArgument("Sparse index tuple ", index_str, " is out of bounds"); } } else { if (TF_PREDICT_FALSE(idx < prev_idx || idx >= shape_vec(dim))) { string index_str = CreateIndexString(indices_mat, i); if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) { return errors::InvalidArgument("Sparse index tuple ", index_str, " is out of bounds"); } else { return errors::InvalidArgument("Sparse index tuple ", index_str, " is out of order"); } } else if (TF_PREDICT_TRUE(idx > prev_idx)) { different = true; } } } if (TF_PREDICT_FALSE(!different)) { string index_str = CreateIndexString(indices_mat, i); return errors::InvalidArgument("Sparse index tuple ", index_str, " is repeated"); } } return absl::OkStatus(); } } template <typename Tindices> Status ValidateSparseTensor(const Tensor& indices, const Tensor& values, const Tensor& shape, IndexValidation index_validation) { TF_RETURN_IF_ERROR(ValidateSparseTensorShape(indices, values, shape)); switch (index_validation) { case IndexValidation::kOrdered: return ValidateSparseTensorIndicesOrdered<Tindices>(indices, shape); case IndexValidation::kUnordered: return ValidateSparseTensorIndicesUnordered<Tindices>(indices, shape); case IndexValidation::kNone: { } } return absl::OkStatus(); } #define REGISTER_SPARSE_UTIL_FUNCTIONS(TypeIndex) \ template TypeIndex FindNextDenseRowStartIndex<TypeIndex>( \ const TypeIndex sparse_index_begin, \ const TTypes<TypeIndex>::ConstMatrix& indices_mat); \ template std::vector<TypeIndex> GetStartIndicesOfEachDenseRow<TypeIndex>( \ const TTypes<TypeIndex>::ConstMatrix& indices_mat, \ bool* contains_empty_rows); \ template bool ContainsEmptyRows<TypeIndex>( \ const std::vector<TypeIndex>& row_start_indices); \ template std::vector<TypeIndex> ParseRowStartIndices<TypeIndex>( \ const tensorflow::Tensor& tensor, \ const TypeIndex num_nonzero_entries_in_sparse_mat); \ template Status ValidateSparseTensor<TypeIndex>( \ const Tensor& indices, const Tensor& values, const Tensor& shape, \ IndexValidation index_validation) REGISTER_SPARSE_UTIL_FUNCTIONS(int32); REGISTER_SPARSE_UTIL_FUNCTIONS(int64); REGISTER_SPARSE_UTIL_FUNCTIONS(uint8); REGISTER_SPARSE_UTIL_FUNCTIONS(uint16); REGISTER_SPARSE_UTIL_FUNCTIONS(uint32); REGISTER_SPARSE_UTIL_FUNCTIONS(uint64); } }
#include "tensorflow/core/kernels/sparse_utils.h" #include <algorithm> #include <cstdint> #include <set> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/philox_random.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace sparse_utils { namespace { using ::tensorflow::testing::StatusIs; using ::testing::MatchesRegex; TEST(SparseUtilsTest, GetStartIndicesOfEachDenseRow) { { int32 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0}; TTypes<int32>::ConstMatrix indices_mat(data, 8, 2); bool contains_empty_rows; EXPECT_TRUE(GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows) == std::vector<int32>({0, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8})); EXPECT_TRUE(contains_empty_rows); } { int32 data[] = {0, 0, 1, 0, 1, 0, 4, 0, 4, 0, 4, 0, 6, 0, 7, 0, 7, 0, 7, 0, 7, 0, 8, 0, 8, 0, 10, 0, 12, 0}; TTypes<int32>::ConstMatrix indices_mat(data, 15, 2); bool contains_empty_rows; EXPECT_TRUE( GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows) == std::vector<int32>({0, 1, 3, 3, 3, 6, 6, 7, 11, 13, 13, 14, 14, 15})); EXPECT_TRUE(contains_empty_rows); } { int64_t data[] = {3, 0}; TTypes<int64_t>::ConstMatrix indices_mat(data, 1, 2); bool contains_empty_rows; EXPECT_TRUE(GetStartIndicesOfEachDenseRow<int64_t>(indices_mat, &contains_empty_rows) == std::vector<int64_t>({0, 0, 0, 0, 1})); EXPECT_TRUE(contains_empty_rows); } { uint32 data[] = {3, 0, 3, 0}; TTypes<uint32>::ConstMatrix indices_mat(data, 2, 2); bool contains_empty_rows; EXPECT_TRUE(GetStartIndicesOfEachDenseRow<uint32>(indices_mat, &contains_empty_rows) == std::vector<uint32>({0, 0, 0, 0, 2})); EXPECT_TRUE(contains_empty_rows); } { uint16 data[] = {0, 0, 0, 0, 0, 0, 1, 0}; TTypes<uint16>::ConstMatrix indices_mat(data, 4, 2); bool contains_empty_rows; EXPECT_TRUE(GetStartIndicesOfEachDenseRow<uint16>(indices_mat, &contains_empty_rows) == std::vector<uint16>({0, 3, 4})); EXPECT_FALSE(contains_empty_rows); } { uint64 data[] = {0, 0, 0, 0, 0, 0, 3, 0}; TTypes<uint64>::ConstMatrix indices_mat(data, 4, 2); bool contains_empty_rows; EXPECT_TRUE(GetStartIndicesOfEachDenseRow<uint64>(indices_mat, &contains_empty_rows) == std::vector<uint64>({0, 3, 3, 3, 4})); EXPECT_TRUE(contains_empty_rows); } } TEST(SparseUtilsTest, ParseRowStartIndices) { { Tensor t(DataType::DT_INT32, {1}); int indx = 0; for (const int32_t v : {0}) { t.flat<int32>()(indx++) = v; } EXPECT_TRUE(ParseRowStartIndices<int32>(t, 1) == std::vector<int32>({0, 1})); } { Tensor t(DataType::DT_INT64, {1}); int indx = 0; for (const int64_t v : {0}) { t.flat<int64_t>()(indx++) = v; } EXPECT_TRUE(ParseRowStartIndices<int64_t>(t, 2) == std::vector<int64_t>({0, 2})); } { Tensor t(DataType::DT_UINT64, {2}); int indx = 0; for (const uint64 v : {0, 3}) { t.flat<uint64>()(indx++) = v; } EXPECT_TRUE(ParseRowStartIndices<uint64>(t, 4) == std::vector<uint64>({0, 3, 4})); } { Tensor t(DataType::DT_UINT16, {2}); int indx = 0; for (const uint16 v : {0, 3}) { t.flat<uint16>()(indx++) = v; } EXPECT_TRUE(ParseRowStartIndices<uint16>(t, 4) == std::vector<uint16>({0, 3, 4})); } } TEST(SparseUtilsTest, ContainsEmptyRows) { { int32 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0}; TTypes<int32>::ConstMatrix indices_mat(data, 8, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows); EXPECT_TRUE(ContainsEmptyRows(segment_indices)); } { int64_t data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0}; TTypes<int64_t>::ConstMatrix indices_mat(data, 8, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<int64_t>( indices_mat, &contains_empty_rows); EXPECT_TRUE(ContainsEmptyRows(segment_indices)); } { int32 data[] = {1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4}; TTypes<int32>::ConstMatrix indices_mat(data, 6, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows); EXPECT_TRUE(ContainsEmptyRows(segment_indices)); } { uint16 data[] = {1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4}; TTypes<uint16>::ConstMatrix indices_mat(data, 6, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<uint16>( indices_mat, &contains_empty_rows); EXPECT_TRUE(ContainsEmptyRows(segment_indices)); } { int32 data[] = {0, 0, 1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4}; TTypes<int32>::ConstMatrix indices_mat(data, 7, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows); EXPECT_FALSE(ContainsEmptyRows(segment_indices)); } { int64_t data[] = {0, 0, 1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4}; TTypes<int64_t>::ConstMatrix indices_mat(data, 7, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<int64_t>( indices_mat, &contains_empty_rows); EXPECT_FALSE(ContainsEmptyRows(segment_indices)); } { uint32 data[] = {0, 0, 0, 1, 0, 2, 2, 0, 2, 1, 2, 2, 3, 4}; TTypes<uint32>::ConstMatrix indices_mat(data, 7, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<uint32>( indices_mat, &contains_empty_rows); EXPECT_TRUE(ContainsEmptyRows(segment_indices)); } { int64_t data[] = {0, 0, 0, 1, 0, 2, 2, 0, 2, 1, 2, 2, 3, 4}; TTypes<int64_t>::ConstMatrix indices_mat(data, 7, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<int64_t>( indices_mat, &contains_empty_rows); EXPECT_TRUE(ContainsEmptyRows(segment_indices)); } { uint64 data[] = {0, 0, 0, 1, 0, 2, 1, 0, 2, 1, 2, 2, 3, 4}; TTypes<uint64>::ConstMatrix indices_mat(data, 7, 2); bool contains_empty_rows; const auto segment_indices = GetStartIndicesOfEachDenseRow<uint64>( indices_mat, &contains_empty_rows); EXPECT_FALSE(ContainsEmptyRows(segment_indices)); } } TEST(SparseUtilsTest, FindNextDenseRowStartIndex) { { int32 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0}; TTypes<int32>::ConstMatrix indices_mat(data, 8, 2); for (int32_t i = 0; i < 8; ++i) { EXPECT_EQ(i + 1, FindNextDenseRowStartIndex<int32>(i, indices_mat)); } } { uint16 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0}; TTypes<uint16>::ConstMatrix indices_mat(data, 8, 2); for (uint16 i = 0; i < 8; ++i) { EXPECT_EQ(i + 1, FindNextDenseRowStartIndex<uint16>(i, indices_mat)); } } { int64_t data[] = {0, 0, 1, 0, 1, 0, 4, 0, 4, 0, 4, 0, 6, 0, 7, 0, 7, 0, 7, 0, 7, 0, 8, 0, 8, 0, 10, 0, 12, 0}; TTypes<int64_t>::ConstMatrix indices_mat(data, 15, 2); EXPECT_EQ(3, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(1), indices_mat)); EXPECT_EQ(3, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(2), indices_mat)); EXPECT_EQ(6, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(3), indices_mat)); EXPECT_EQ(6, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(4), indices_mat)); EXPECT_EQ(14, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(13), indices_mat)); EXPECT_EQ(15, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(14), indices_mat)); } } ::tensorflow::random::SimplePhilox& RandomPhilox() { static auto* philox = new ::tensorflow::random::PhiloxRandom(tensorflow::testing::RandomSeed()); static auto* rnd = new ::tensorflow::random::SimplePhilox(philox); return *rnd; } template <typename SetType> void FillIndicesWithRandomTuples(const TensorShape& shape, Tensor& indices) { const int64_t nnz = indices.dim_size(0); const int64_t ndims = indices.dim_size(1); SetType indices_set; int64_t count = 0; while (count < nnz) { std::vector<int64_t> candidate(ndims); for (int64_t d = 0; d < ndims; ++d) { candidate[d] = RandomPhilox().Uniform64(shape.dim_size(d)); } auto it = indices_set.insert(std::move(candidate)); if (it.second) { ++count; } } auto indices_mat = indices.matrix<int64_t>(); int64_t row = 0; for (const std::vector<int64_t>& idxs : indices_set) { for (int64_t col = 0; col < ndims; ++col) { indices_mat(row, col) = idxs[col]; } ++row; } } void GenerateRandomSparseTensor(int64_t max_nnz, const TensorShape& shape, bool ordered, Tensor& output_indices, Tensor& output_values, Tensor& output_shape) { const int64_t ndims = shape.dims(); const int64_t nnz = std::min(shape.num_elements(), max_nnz); output_indices = Tensor(DT_INT64, TensorShape({nnz, ndims})); output_values = Tensor(DT_FLOAT, TensorShape({nnz})); output_shape = Tensor(DT_INT64, TensorShape({ndims})); if (ordered) { FillIndicesWithRandomTuples<std::set<std::vector<int64_t>>>(shape, output_indices); } else { FillIndicesWithRandomTuples<absl::flat_hash_set<std::vector<int64_t>>>( shape, output_indices); } auto values_vec = output_values.vec<float>(); values_vec.setRandom(); auto shape_vec = output_shape.vec<int64_t>(); for (int i = 0; i < shape.dims(); ++i) { shape_vec(i) = shape.dim_size(i); } } using ValidateSparseTensorTest = ::testing::TestWithParam<IndexValidation>; TEST_P(ValidateSparseTensorTest, ValidSparseTensorPasses) { constexpr int kNumNonZeros = 1000; const TensorShape kTensorShapes[] = { {}, {3}, {4, 5}, {6, 7, 8}, {9, 10, 11, 12}}; const IndexValidation index_validation = GetParam(); const bool ordered = (index_validation == IndexValidation::kOrdered); for (const TensorShape& test_shape : kTensorShapes) { Tensor indices, values, shape; GenerateRandomSparseTensor(kNumNonZeros, test_shape, ordered, indices, values, shape); TF_EXPECT_OK((ValidateSparseTensor<int64_t>(indices, values, shape, index_validation))); } } TEST_P(ValidateSparseTensorTest, InvalidIndicesRankFails) { constexpr int kNumNonZeros = 1000; constexpr int kNumDims = 3; const TensorShape kInvalidIndicesShapes[] = { {}, {kNumNonZeros}, {kNumNonZeros, kNumDims, 4}}; const IndexValidation index_validation = GetParam(); for (const TensorShape& invalid_shape : kInvalidIndicesShapes) { const Tensor indices = Tensor(DT_INT64, invalid_shape); const Tensor values = Tensor(DT_FLOAT, TensorShape({kNumNonZeros})); const Tensor shape = Tensor(DT_INT64, TensorShape({kNumDims})); EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape, index_validation)), StatusIs(error::INVALID_ARGUMENT, MatchesRegex("Sparse indices must be rank 2 .*"))); } } TEST_P(ValidateSparseTensorTest, InvalidValuesRankFails) { constexpr int kNumNonZeros = 1000; constexpr int kNumDims = 3; const TensorShape kInvalidValuesShapes[] = {{}, {kNumNonZeros, 2}}; const IndexValidation index_validation = GetParam(); for (const TensorShape& invalid_shape : kInvalidValuesShapes) { const Tensor indices = Tensor(DT_INT64, TensorShape({kNumNonZeros, kNumDims})); const Tensor values = Tensor(DT_FLOAT, invalid_shape); const Tensor shape = Tensor(DT_INT64, TensorShape({kNumDims})); EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape, index_validation)), StatusIs(error::INVALID_ARGUMENT, MatchesRegex("Sparse values must be rank 1 .*"))); } } TEST_P(ValidateSparseTensorTest, InvalidShapeRankFails) { constexpr int kNumNonZeros = 1000; constexpr int kNumDims = 3; const IndexValidation index_validation = GetParam(); const TensorShape kInvalidShapeShapes[] = {{}, {kNumDims, 2}}; for (const TensorShape& invalid_shape : kInvalidShapeShapes) { const Tensor indices = Tensor(DT_INT64, TensorShape({kNumNonZeros, kNumDims})); const Tensor values = Tensor(DT_FLOAT, TensorShape({kNumNonZeros})); const Tensor shape = Tensor(DT_INT64, invalid_shape); EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape, index_validation)), StatusIs(error::INVALID_ARGUMENT, MatchesRegex("Sparse shape must be rank 1 .*"))); } } TEST_P(ValidateSparseTensorTest, IncompatibleShapesFails) { constexpr int kNumNonZeros = 1000; constexpr int kNumDims = 3; const IndexValidation index_validation = GetParam(); const Tensor values = Tensor(DT_FLOAT, TensorShape({kNumNonZeros})); const Tensor shape = Tensor(DT_INT64, TensorShape({kNumDims})); { const Tensor indices = Tensor(DT_INT64, TensorShape({kNumNonZeros + 1, kNumDims})); EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape, index_validation)), StatusIs(error::INVALID_ARGUMENT, MatchesRegex("Number of elements in indices .* and " "values .* do not match"))); } { const Tensor indices = Tensor(DT_INT64, TensorShape({kNumNonZeros, kNumDims + 1})); EXPECT_THAT( (ValidateSparseTensor<int64_t>(indices, values, shape, index_validation)), StatusIs(error::INVALID_ARGUMENT, MatchesRegex("Index rank .* and shape rank .* do not match"))); } } TEST_P(ValidateSparseTensorTest, IndexOutOfBoundsFails) { constexpr int kNumNonZeros = 1000; constexpr int kNumTests = 100; const IndexValidation index_validation = GetParam(); const bool ordered = (index_validation == IndexValidation::kOrdered); const TensorShape kTensorShapes[] = {{3}, {4, 5}, {6, 7, 8}, {9, 10, 11, 12}}; for (const TensorShape& test_shape : kTensorShapes) { Tensor indices, values, shape; GenerateRandomSparseTensor(kNumNonZeros, test_shape, ordered, indices, values, shape); auto indices_mat = indices.matrix<int64_t>(); for (int test = 0; test < kNumTests; ++test) { int64_t row = RandomPhilox().Uniform64(indices.dim_size(0)); int64_t dim = RandomPhilox().Uniform64(indices.dim_size(1)); int64_t old_val = indices_mat(row, dim); for (int64_t val : {static_cast<int64_t>(-1), test_shape.dim_size(dim)}) { indices_mat(row, dim) = val; Status indices_valid = ValidateSparseTensor<int64_t>( indices, values, shape, index_validation); if (index_validation == IndexValidation::kNone) { TF_EXPECT_OK(indices_valid); } else { EXPECT_THAT( indices_valid, StatusIs(error::INVALID_ARGUMENT, MatchesRegex("Sparse index tuple .* is out of bounds"))) << indices_mat; } } indices_mat(row, dim) = old_val; } } } TEST_P(ValidateSparseTensorTest, IndexOutOfOrderFailsForOrderedValidation) { constexpr int kNumNonZeros = 1000; constexpr int kNumTests = 100; const TensorShape kTensorShapes[] = {{3}, {4, 5}, {6, 7, 8}, {9, 10, 11, 12}}; const IndexValidation index_validation = GetParam(); const bool ordered = (index_validation == IndexValidation::kOrdered); for (const TensorShape& test_shape : kTensorShapes) { Tensor indices, values, shape; GenerateRandomSparseTensor(kNumNonZeros, test_shape, ordered, indices, values, shape); auto indices_mat = indices.matrix<int64_t>(); const int64_t nnz = indices.dim_size(0); const int64_t ndims = indices.dim_size(1); for (int test = 0; test < kNumTests; ++test) { int64_t row1 = RandomPhilox().Uniform64(nnz); int64_t row2; do { row2 = RandomPhilox().Uniform64(nnz); } while (row1 == row2); for (int dim = 0; dim < ndims; ++dim) { std::swap(indices_mat(row1, dim), indices_mat(row2, dim)); } Status indices_valid = ValidateSparseTensor<int64_t>( indices, values, shape, index_validation); if (ordered) { EXPECT_THAT( indices_valid, StatusIs(error::INVALID_ARGUMENT, MatchesRegex("Sparse index tuple .* is out of order"))); } else { TF_EXPECT_OK(indices_valid); } for (int dim = 0; dim < ndims; ++dim) { std::swap(indices_mat(row1, dim), indices_mat(row2, dim)); } } } } INSTANTIATE_TEST_SUITE_P( ValidateSparseTensorTestSuite, ValidateSparseTensorTest, ::testing::Values(IndexValidation::kNone, IndexValidation::kOrdered, IndexValidation::kUnordered), [](const ::testing::TestParamInfo<ValidateSparseTensorTest::ParamType>& info) { switch (info.param) { case IndexValidation::kNone: return "None"; case IndexValidation::kUnordered: return "Unordered"; case IndexValidation::kOrdered: return "Ordered"; } }); void BM_ValidateSparseTensor(::testing::benchmark::State& state, TensorShape dense_shape, IndexValidation index_validation) { Tensor indices, values, shape; const int64_t nnz = state.range(0); GenerateRandomSparseTensor(nnz, dense_shape, true, indices, values, shape); for (auto s : state) { ::benchmark::DoNotOptimize(ValidateSparseTensor<int64_t>( indices, values, shape, index_validation)); } } BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Ordered1024, TensorShape({1024}), IndexValidation::kOrdered) ->Range(8, 512); BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Unordered1024, TensorShape({1024}), IndexValidation::kUnordered) ->Range(8, 512); BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Ordered1024x1024, TensorShape({1024, 1024}), IndexValidation::kOrdered) ->Range(8, 1024); BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Unordered1024x1024, TensorShape({1024, 1024}), IndexValidation::kUnordered) ->Range(8, 1024); BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Ordered1024x1024x1024, TensorShape({1024, 1024, 1024}), IndexValidation::kOrdered) ->Range(8, 1024 * 32); BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Unordered1024x1024x1024, TensorShape({1024, 1024, 1024}), IndexValidation::kUnordered) ->Range(8, 1024 * 32); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5c81ba67-e18a-46de-ad5f-6fb9b38323c9
cpp
tensorflow/tensorflow
ragged_tensor_to_variant_op
tensorflow/core/kernels/ragged_tensor_to_variant_op.cc
tensorflow/core/kernels/ragged_tensor_to_variant_op_test.cc
#include <cstdint> #include <utility> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/framework/variant_op_registry.h" #include "tensorflow/core/kernels/concat_lib.h" #include "tensorflow/core/kernels/ragged_tensor_variant.h" #include "tensorflow/core/kernels/ragged_utils.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/util/tensor_ops_util.h" namespace tensorflow { namespace { template <typename VALUE_TYPE> Status UnbatchDenseZerothDim( const RaggedTensorVariant& batched_ragged, std::vector<RaggedTensorVariant>* ragged_components) { Tensor batched_values = batched_ragged.values(); TensorShape values_shape = batched_values.shape(); if (values_shape.dims() < 1) { return errors::InvalidArgument("Can't unbatch rank-0 tensor."); } auto num_components = values_shape.dim_size(0); values_shape.RemoveDim(0); auto num_values = values_shape.num_elements(); ragged_components->resize(num_components); const auto& batched_flat = batched_values.flat<VALUE_TYPE>(); for (auto i = decltype(num_components){}; i < num_components; i++) { (*ragged_components)[i].set_values( Tensor(DataTypeToEnum<VALUE_TYPE>::value, values_shape)); auto ragged_component_values_flat = (*ragged_components)[i].mutable_values()->flat<VALUE_TYPE>(); for (auto j = decltype(num_values){}; j < num_values; j++) { ragged_component_values_flat(j) = batched_flat(j + i * num_values); } } return absl::OkStatus(); } template <typename VALUE_TYPE, typename SPLIT_TYPE> Status UnbatchRaggedZerothDim( const RaggedTensorVariant& batched_ragged, std::vector<RaggedTensorVariant>* ragged_components) { int ragged_rank = batched_ragged.ragged_rank(); if (ragged_rank == 0) { return UnbatchDenseZerothDim<VALUE_TYPE>(batched_ragged, ragged_components); } auto batched_splits_top_vec = batched_ragged.splits(0).vec<SPLIT_TYPE>(); auto num_components = batched_splits_top_vec.size() - 1; if (num_components < 0) { return errors::Internal("Invalid split argument."); } int num_splits = ragged_rank - 1; ragged_components->resize(num_components); for (RaggedTensorVariant& ragged_component : *ragged_components) { ragged_component.mutable_nested_splits()->reserve(num_splits); } const auto& batched_flat = batched_ragged.values().flat<VALUE_TYPE>(); auto num_inner_elems = batched_ragged.values().NumElements(); if (batched_ragged.values().dim_size(0) > 1) { num_inner_elems /= batched_ragged.values().dim_size(0); } TensorShape values_shape = batched_ragged.values().shape(); if (num_splits == 0) { for (auto i = decltype(num_components){}; i < num_components; i++) { auto start = batched_splits_top_vec(i); auto limit = batched_splits_top_vec(i + 1); auto num_values = limit - start; values_shape.set_dim(0, num_values); (*ragged_components)[i].set_values( Tensor(DataTypeToEnum<VALUE_TYPE>::value, values_shape)); auto ragged_component_values_flat = (*ragged_components)[i].mutable_values()->template flat<VALUE_TYPE>(); for (auto j = decltype(num_values * num_inner_elems){}; j < num_values * num_inner_elems; j++) { ragged_component_values_flat(j) = batched_flat(j + start * num_inner_elems); } } return absl::OkStatus(); } std::vector<typename TTypes<SPLIT_TYPE>::ConstVec> batched_splits_vec; batched_splits_vec.reserve(ragged_rank); for (int i = 0; i < ragged_rank; i++) { batched_splits_vec.push_back(batched_ragged.splits(i).vec<SPLIT_TYPE>()); } std::vector<SPLIT_TYPE> index(num_splits, 1); std::vector<SPLIT_TYPE> ragged_component_values_size(num_components, 0); for (auto i = decltype(num_components){}; i < num_components; i++) { std::vector<typename TTypes<SPLIT_TYPE>::Vec> ragged_component_splits_vec; ragged_component_splits_vec.reserve(num_splits); SPLIT_TYPE split_size = -1; for (int j = 0; j < num_splits; j++) { if (j == 0) { split_size = batched_splits_top_vec(i + 1) - batched_splits_top_vec(i) + 1; } else { SPLIT_TYPE last_index = ragged_component_splits_vec[j - 1].size() - 1; split_size = ragged_component_splits_vec[j - 1](last_index) + 1; } (*ragged_components)[i].append_splits( Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({split_size}))); ragged_component_splits_vec.push_back((*ragged_components)[i] .mutable_splits(j) ->template vec<SPLIT_TYPE>()); SPLIT_TYPE last_split_value = batched_splits_vec[j + 1](index[j] - 1); ragged_component_splits_vec[j](0) = 0; for (SPLIT_TYPE k = 1; k < split_size; k++, index[j]++) { ragged_component_splits_vec[j](k) = batched_splits_vec[j + 1](index[j]) - last_split_value; } } SPLIT_TYPE last_split_size = ragged_component_splits_vec[num_splits - 1].size(); ragged_component_values_size[i] = ragged_component_splits_vec[num_splits - 1](last_split_size - 1); } int64_t value_index = 0; for (auto i = decltype(num_components){}; i < num_components; i++) { SPLIT_TYPE num_values = ragged_component_values_size[i]; values_shape.set_dim(0, num_values); (*ragged_components)[i].set_values( Tensor(DataTypeToEnum<VALUE_TYPE>::value, values_shape)); auto ragged_component_values_flat = (*ragged_components)[i].mutable_values()->template flat<VALUE_TYPE>(); for (int64_t j = 0; j < num_values * num_inner_elems; j++, value_index++) { ragged_component_values_flat(j) = batched_flat(value_index); } } return absl::OkStatus(); } } template <typename VALUE_TYPE, typename SPLIT_TYPE> class RaggedTensorToVariantOp : public OpKernel { public: explicit RaggedTensorToVariantOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("batched_input", &batched_input_)); } void Compute(OpKernelContext* context) override { OpInputList ragged_nested_splits_in; OP_REQUIRES_OK(context, context->input_list("rt_nested_splits", &ragged_nested_splits_in)); const int ragged_nested_splits_len = ragged_nested_splits_in.size(); RaggedTensorVariant batched_ragged_input; batched_ragged_input.set_values(context->input(ragged_nested_splits_len)); batched_ragged_input.mutable_nested_splits()->reserve( ragged_nested_splits_len); for (int i = ragged_nested_splits_len - 1; i >= 0; --i) { SPLIT_TYPE nvals; if (i == ragged_nested_splits_len - 1) { OP_REQUIRES(context, batched_ragged_input.values().dims() >= 1, errors::InvalidArgument( "Requires flat_values to have rank>=1 when " "nested_row_splits is not empty, but is 0.")); nvals = batched_ragged_input.values().dim_size(0); } else { nvals = ragged_nested_splits_in[i + 1].dim_size(0) - 1; } OP_REQUIRES_OK(context, RaggedTensorVerifySplits<SPLIT_TYPE>( ragged_nested_splits_in[i], true, nvals)); } for (int i = 0; i < ragged_nested_splits_len; i++) { batched_ragged_input.append_splits(ragged_nested_splits_in[i]); } if (!batched_input_) { Tensor* encoded_scalar; OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({}), &encoded_scalar)); encoded_scalar->scalar<Variant>()() = std::move(batched_ragged_input); return; } std::vector<RaggedTensorVariant> unbatched_ragged_input; OP_REQUIRES_OK(context, UnbatchRaggedZerothDim<VALUE_TYPE, SPLIT_TYPE>( batched_ragged_input, &unbatched_ragged_input)); Tensor* encoded_vector; int64_t output_size = unbatched_ragged_input.size(); OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({output_size}), &encoded_vector)); auto encoded_vector_t = encoded_vector->vec<Variant>(); for (auto i = decltype(output_size){}; i < output_size; i++) { encoded_vector_t(i) = unbatched_ragged_input[i]; } } private: bool batched_input_; }; template <typename VALUE_TYPE, typename SPLIT_TYPE> class RaggedTensorToVariantGradientOp : public OpKernel { public: using OpKernel::OpKernel; void Compute(OpKernelContext* context) override { Tensor encoded_variant = context->input(0); Tensor row_splits = context->input(1); auto flat_row_splits = row_splits.flat<SPLIT_TYPE>(); TensorShape dense_values_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(context->input(2).vec<int32>(), &dense_values_shape)); if (row_splits.dims()) { OP_REQUIRES_OK( context, RaggedTensorVerifySplits<SPLIT_TYPE>(row_splits, false, 0)); } const auto& flat_variants = encoded_variant.flat<Variant>(); std::vector<Tensor> values; for (int i = 0; i < flat_variants.size(); ++i) { if (const auto* encoded = flat_variants(i).get<RaggedTensorVariant>()) { values.push_back(encoded->values()); } else { const auto value_dtype = DataTypeToEnum<VALUE_TYPE>::v(); auto piece_size = flat_row_splits(i + 1) - flat_row_splits(i); TensorShape zeros_shape = dense_values_shape; zeros_shape.set_dim(0, piece_size); Tensor zero(value_dtype, zeros_shape); zero.flat<VALUE_TYPE>().setZero(); values.push_back(zero); } } if (values.size() == 1) { context->set_output(0, values[0]); } else { Tensor* out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, dense_values_shape, &out)); if (dense_values_shape.num_elements() == 0) return; using Piece = typename TTypes<VALUE_TYPE, 2>::Matrix; using ConstPiece = typename TTypes<VALUE_TYPE, 2>::ConstMatrix; std::vector<std::unique_ptr<ConstPiece>> pieces; pieces.reserve(values.size()); for (const Tensor& t : values) { if (t.NumElements() == 0) continue; pieces.emplace_back( new ConstPiece(t.shaped<VALUE_TYPE, 2>({1, t.NumElements()}))); } Piece out_flat = out->shaped<VALUE_TYPE, 2>({1, dense_values_shape.num_elements()}); ConcatCPU<VALUE_TYPE>(context->device(), pieces, &out_flat); } } }; #define REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, split_type) \ REGISTER_KERNEL_BUILDER(Name("RaggedTensorToVariant") \ .Device(DEVICE_CPU) \ .TypeConstraint<value_type>("Tvalues") \ .TypeConstraint<split_type>("Tsplits"), \ RaggedTensorToVariantOp<value_type, split_type>); \ REGISTER_KERNEL_BUILDER( \ Name("RaggedTensorToVariantGradient") \ .Device(DEVICE_CPU) \ .TypeConstraint<value_type>("Tvalues") \ .TypeConstraint<split_type>("Tsplits"), \ RaggedTensorToVariantGradientOp<value_type, split_type>); #define REGISTER_KERNELS(value_type) \ REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \ REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64_t) TF_CALL_POD_TYPES(REGISTER_KERNELS); TF_CALL_tstring(REGISTER_KERNELS); TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS); TF_CALL_quint16(REGISTER_KERNELS); TF_CALL_qint16(REGISTER_KERNELS); #undef REGISTER_KERNELS #undef REGISTER_KERNELS_WITH_SPLIT_TYPE }
#include "tensorflow/core/kernels/ragged_tensor_to_variant_op_test.h" #include <vector> #include <gtest/gtest.h> #include "absl/strings/match.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ragged_tensor_variant.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace { TEST_F(RaggedTensorToVariantKernelTest, NoValuesInput) { const std::vector<int64_t> batched_splits_1 = {0, 2, 3, 3}; const std::vector<int64_t> batched_splits_2 = {0, 0, 0, 0}; BuildEncodeRaggedTensorGraph<int, int64_t>( {batched_splits_1, batched_splits_2}, TensorShape({0}), {}, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 3); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0, 0, 0}}, {}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0, 0}}, {}), *encoded_list(1).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0}}, {}), *encoded_list(2).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, 1DValuesRaggedRankOneInput) { const std::vector<int64_t> batched_splits = {0, 3, 3, 5, 6}; const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6}; BuildEncodeRaggedTensorGraph<int, int64_t>({batched_splits}, TensorShape({6}), batched_values, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 4); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {1, 2, 3}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {}), *encoded_list(1).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {4, 5}), *encoded_list(2).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {6}), *encoded_list(3).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, 2DBatchedValuesRankOneInput) { const std::vector<int64_t> batched_splits = {0, 1, 2, 3}; const std::vector<int> batched_values = {1, 2, 4, 5, 6, 7}; BuildEncodeRaggedTensorGraph<int, int64_t>( {batched_splits}, TensorShape({3, 2}), batched_values, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 3); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {1, 2}, {1, 2}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {1, 2}, {4, 5}), *encoded_list(1).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {1, 2}, {6, 7}), *encoded_list(2).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, 2DBatchedValuesRankTwoInput) { const std::vector<int64_t> batched_splits_1 = {0, 1, 2}; const std::vector<int64_t> batched_splits_2 = {0, 2, 3}; const std::vector<int> batched_values = {1, 2, 4, 5, 6, 7}; BuildEncodeRaggedTensorGraph<int, int64_t>( {batched_splits_1, batched_splits_2}, TensorShape({3, 2}), batched_values, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 2); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0, 2}}, {2, 2}, {1, 2, 4, 5}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0, 1}}, {1, 2}, {6, 7}), *encoded_list(1).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, EmptyRowInBatchedInput) { const std::vector<int64_t> batched_splits_1 = {0, 3, 3, 5, 7}; const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 3, 8, 11, 11, 15}; const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; BuildEncodeRaggedTensorGraph<int, int64_t>( {batched_splits_1, batched_splits_2}, TensorShape({15}), batched_values, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 4); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0, 1, 3, 3}}, {1, 2, 3}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0}}, {}), *encoded_list(1).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0, 5, 8}}, {4, 5, 6, 7, 8, 9, 10, 11}), *encoded_list(2).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({{0, 0, 4}}, {12, 13, 14, 15}), *encoded_list(3).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, NonEmptyBatchedInput) { const std::vector<int64_t> batched_splits_1 = {0, 5, 10}; const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11}; const std::vector<int64_t> batched_splits_3 = {0, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14}; const std::vector<int> batched_values = {0, 1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 8, 9}; BuildEncodeRaggedTensorGraph<int, int64_t>( {batched_splits_1, batched_splits_2, batched_splits_3}, TensorShape({14}), batched_values, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 2); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>( {{0, 1, 3, 4, 5, 6}, {0, 2, 3, 4, 5, 6, 7}}, {0, 1, 1, 2, 2, 3, 4}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>( {{0, 1, 2, 3, 4, 5}, {0, 1, 2, 5, 6, 7}}, {5, 6, 7, 8, 9, 8, 9}), *encoded_list(1).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, NonEmptyBatchedInputInt32Splits) { const std::vector<int> batched_splits_1 = {0, 5, 10}; const std::vector<int> batched_splits_2 = {0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11}; const std::vector<int> batched_splits_3 = {0, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14}; const std::vector<int> batched_values = {0, 1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 8, 9}; BuildEncodeRaggedTensorGraph<int, int32>( {batched_splits_1, batched_splits_2, batched_splits_3}, TensorShape({14}), batched_values, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 2); ExpectRaggedTensorVariantEqual<int, int32>( CreateVariantFromRagged<int, int32>( {{0, 1, 3, 4, 5, 6}, {0, 2, 3, 4, 5, 6, 7}}, {0, 1, 1, 2, 2, 3, 4}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int32>( CreateVariantFromRagged<int, int32>( {{0, 1, 2, 3, 4, 5}, {0, 1, 2, 5, 6, 7}}, {5, 6, 7, 8, 9, 8, 9}), *encoded_list(1).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, NonBatchInput) { const std::vector<int64_t> batched_splits_1 = {0, 3, 3, 5, 7}; const std::vector<int64_t> batched_splits_2 = {0, 1, 3, 3, 8, 11, 11, 15}; const std::vector<int> batched_values = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; BuildEncodeRaggedTensorGraph<int, int64_t>( {batched_splits_1, batched_splits_2}, TensorShape({15}), batched_values, false); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_scalar = GetOutput(0)->scalar<Variant>()(); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>( {batched_splits_1, batched_splits_2}, batched_values), *encoded_scalar.get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, ShapeFnTestBatched) { ShapeInferenceTestOp op("RaggedTensorToVariant"); (*op.node_def.mutable_attr())["Tvalues"].set_type(DT_INT32); (*op.node_def.mutable_attr())["batched_input"].set_b(true); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0); INFER_OK(op, "?", "[?]"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1); INFER_OK(op, "?;?", "[?]"); INFER_OK(op, "?;[?]", "[?]"); INFER_OK(op, "?;[?,?]", "[?]"); INFER_OK(op, "[?];[5]", "[?]"); INFER_OK(op, "[?];[5,2]", "[?]"); INFER_OK(op, "[5];[5,2]", "[4]"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2); INFER_OK(op, "?;?;?", "[?]"); INFER_OK(op, "?;?;[?]", "[?]"); INFER_OK(op, "?;?;[?,?]", "[?]"); INFER_OK(op, "[?];[?];[5]", "[?]"); INFER_OK(op, "[?];[?];[5,2]", "[?]"); INFER_OK(op, "[6];[?];[5,2]", "[5]"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3); INFER_OK(op, "?;?;?;?", "[?]"); INFER_OK(op, "?;?;?;[?]", "[?]"); INFER_OK(op, "?;?;?;[5]", "[?]"); INFER_OK(op, "[4];?;?;[5]", "[3]"); } TEST_F(RaggedTensorToVariantKernelTest, ShapeFnTestNotBatched) { ShapeInferenceTestOp op("RaggedTensorToVariant"); (*op.node_def.mutable_attr())["Tvalues"].set_type(DT_INT32); (*op.node_def.mutable_attr())["batched_input"].set_b(false); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0); INFER_OK(op, "?", "[]"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1); INFER_OK(op, "?;?", "[]"); INFER_OK(op, "?;[?]", "[]"); INFER_OK(op, "?;[?,?]", "[]"); INFER_OK(op, "[?];[5]", "[]"); INFER_OK(op, "[?];[5,2]", "[]"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2); INFER_OK(op, "?;?;?", "[]"); INFER_OK(op, "?;?;[?]", "[]"); INFER_OK(op, "?;?;[?,?]", "[]"); INFER_OK(op, "[?];[?];[5]", "[]"); INFER_OK(op, "[?];[?];[5,2]", "[]"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?"); (*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3); INFER_OK(op, "?;?;?;?", "[]"); INFER_OK(op, "?;?;?;[?]", "[]"); INFER_OK(op, "?;?;?;[5]", "[]"); } TEST_F(RaggedTensorToVariantKernelTest, NonRaggedInput) { const std::vector<int> values = {1, 2, 3, 4, 5, 6}; BuildEncodeRaggedTensorGraph<int, int64_t>({}, TensorShape({6}), values, false); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_scalar = GetOutput(0)->scalar<Variant>()(); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, values), *encoded_scalar.get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, NonRaggedBatchedInput) { TensorShape shape({2, 3, 2}); const std::vector<int> values = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; BuildEncodeRaggedTensorGraph<int, int64_t>({}, shape, values, true); TF_ASSERT_OK(RunOpKernel()); const auto& encoded_list = GetOutput(0)->vec<Variant>(); EXPECT_EQ(encoded_list.size(), 2); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {3, 2}, {1, 2, 3, 4, 5, 6}), *encoded_list(0).get<RaggedTensorVariant>()); ExpectRaggedTensorVariantEqual<int, int64_t>( CreateVariantFromRagged<int, int64_t>({}, {3, 2}, {7, 8, 9, 10, 11, 12}), *encoded_list(1).get<RaggedTensorVariant>()); } TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsFirstElementNotZeroError) { const std::vector<int64_t> splits = {1, 2}; BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({0}), {}, true); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: first element of " "ragged splits must be 0 but is 1")); } TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsIncreasingError) { const std::vector<int64_t> splits = {0, 2, -1}; BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({0}), {}, true); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: ragged splits must be " "monotonically increasing, but " "ragged_splits[2]=-1 is smaller than " "row_splits[1]=2")); } TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsSizeMismatchError) { const std::vector<int64_t> splits = {0, 2, 3}; BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({5}), {0, 1, 2, 3, 4}, true); EXPECT_THAT( RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: last element of ragged splits " "must be the number of ragged values(5) but is 3")); } TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsInnerDimensionSizeMismatchError) { const std::vector<int64_t> splits1 = {0, 2, 3}; const std::vector<int64_t> splits2 = {0, 3, 3, 4}; BuildEncodeRaggedTensorGraph<int, int64_t>( {splits1, splits2}, TensorShape({5}), {0, 1, 2, 3, 4}, true); EXPECT_THAT( RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: last element of ragged splits " "must be the number of ragged values(5) but is 4")); } TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsSizeOfSplitsMismatchError) { const std::vector<int64_t> splits1 = {0, 2}; const std::vector<int64_t> splits2 = {0, 3, 3, 5}; BuildEncodeRaggedTensorGraph<int, int64_t>( {splits1, splits2}, TensorShape({5}), {0, 1, 2, 3, 4}, true); EXPECT_THAT( RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: last element of ragged splits " "must be the number of ragged values(3) but is 2")); } TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsEmptySplitsError) { const std::vector<int64_t> splits = {}; BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({5}), {0, 1, 2, 3, 4}, true); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: ragged splits must " "have at least one splits, but is empty")); } TEST_F(RaggedTensorToVariantKernelTest, NestedRowSplitsScalarValueError) { const std::vector<int64_t> splits = {0, 2}; BuildEncodeRaggedTensorGraph<int, int64_t>({splits}, TensorShape({}), 1, true); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Requires flat_values to have rank>=1 when " "nested_row_splits is not empty, but is 0.")); } TEST_F(RaggedTensorToVariantGradientKernelTest, RowSplitsMatch) { auto encoded_variant_grad_1 = CreateVariantFromRagged<int, int64_t>({}, {3}, {1, 2, 3}); auto encoded_variant_grad_2 = CreateVariantFromRagged<int, int64_t>({}, {0}, {}); auto encoded_variant_grad_3 = CreateVariantFromRagged<int, int64_t>({}, {2}, {4, 5}); auto encoded_variant_grad_4 = CreateVariantFromRagged<int, int64_t>({}, {1}, {6}); BuildEncodeRaggedTensorGradientGraph<int, int64_t>( {encoded_variant_grad_1, encoded_variant_grad_2, encoded_variant_grad_3, encoded_variant_grad_4}, {0, 3, 3, 5, 6}, {6}); TF_ASSERT_OK(RunOpKernel()); } TEST_F(RaggedTensorToVariantGradientKernelTest, RowSplitsFirstElementNotZeroError) { auto encoded_variant_grad_1 = CreateVariantFromRagged<int, int64_t>({}, {3}, {1, 2, 3}); auto encoded_variant_grad_2 = CreateVariantFromRagged<int, int64_t>({}, {0}, {}); auto encoded_variant_grad_3 = CreateVariantFromRagged<int, int64_t>({}, {2}, {4, 5}); auto encoded_variant_grad_4 = CreateVariantFromRagged<int, int64_t>({}, {1}, {6}); BuildEncodeRaggedTensorGradientGraph<int, int64_t>( {encoded_variant_grad_1, encoded_variant_grad_2, encoded_variant_grad_3, encoded_variant_grad_4}, {1, 3, 3, 5, 6}, {6}); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: first element of " "ragged splits must be 0 but is 1")); } TEST_F(RaggedTensorToVariantGradientKernelTest, RowSplitsIncreasingError) { auto encoded_variant_grad_1 = CreateVariantFromRagged<int, int64_t>({}, {3}, {1, 2, 3}); auto encoded_variant_grad_2 = CreateVariantFromRagged<int, int64_t>({}, {0}, {}); auto encoded_variant_grad_3 = CreateVariantFromRagged<int, int64_t>({}, {2}, {4, 5}); auto encoded_variant_grad_4 = CreateVariantFromRagged<int, int64_t>({}, {1}, {6}); BuildEncodeRaggedTensorGradientGraph<int, int64_t>( {encoded_variant_grad_1, encoded_variant_grad_2, encoded_variant_grad_3, encoded_variant_grad_4}, {0, 3, 2, 5, 6}, {6}); EXPECT_THAT(RunOpKernel(), testing::StatusIs(error::INVALID_ARGUMENT, "Invalid ragged splits: ragged splits must be " "monotonically increasing, but " "ragged_splits[2]=2 is smaller than " "row_splits[1]=3")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_variant_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fa06c82b-257e-4064-877d-65c16a46a30e
cpp
tensorflow/tensorflow
deep_conv2d
tensorflow/core/kernels/deep_conv2d.cc
tensorflow/core/kernels/deep_conv2d_test.cc
#define USE_EIGEN_TENSOR #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/deep_conv2d.h" #include <stdlib.h> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/kernels/winograd_transform.h" #include "tensorflow/core/util/work_sharder.h" namespace tensorflow { static int64_t GetDeepConvCost(int input_tile_rows, int input_tile_cols, int out_tile_rows, int out_tile_cols, int in_depth, int out_depth, int out_rows, int out_cols) { const int64_t input_tile_spatial_size = input_tile_rows * input_tile_cols; const int64_t input_transform_cost = input_tile_spatial_size * input_tile_spatial_size * in_depth; const int64_t product_cost = input_tile_spatial_size * in_depth * out_depth; const int64_t output_tile_spatial_size = out_tile_rows * out_tile_cols; const int64_t output_transform_cost = output_tile_spatial_size * input_tile_spatial_size * out_depth; const int64_t row_tiles = (out_rows + out_tile_rows - 1) / out_tile_rows; const int64_t col_tiles = (out_cols + out_tile_cols - 1) / out_tile_cols; const int64_t num_tiles = row_tiles * col_tiles; return num_tiles * (input_transform_cost + product_cost + output_transform_cost); } static int64_t GetDirectConvCost(int filter_rows, int filter_cols, int in_depth, int out_depth, int out_rows, int out_cols) { return filter_rows * filter_cols * in_depth * out_depth * out_rows * out_cols; } static bool ReadBoolFromEnvVar(const char* env_var_name, bool default_val) { const char* tf_env_var_val = getenv(env_var_name); if (tf_env_var_val != nullptr) { StringPiece tf_env_var_val_str(tf_env_var_val); if (tf_env_var_val_str == "0") { return false; } return true; } return default_val; } bool CanUseDeepConv2D(int stride_rows, int stride_cols, int filter_rows, int filter_cols, int in_depth, int out_depth, int out_rows, int out_cols) { if (stride_rows > 1 || stride_cols > 1 || filter_rows != 3 || filter_cols != 3) { return false; } if (!ReadBoolFromEnvVar("TF_USE_DEEP_CONV2D", false)) { return false; } WinogradTransform<float> t; const int64_t deep_conv_cost = GetDeepConvCost( t.input_shape().rows, t.input_shape().cols, t.output_shape().rows, t.output_shape().cols, in_depth, out_depth, out_rows, out_cols); const int64_t direct_conv_cost = GetDirectConvCost( filter_rows, filter_cols, in_depth, out_depth, out_rows, out_cols); VLOG(2) << "CanUseDeepConv2D" << " deep_conv_cost: " << deep_conv_cost << " direct_conv_cost: " << direct_conv_cost << " deep_direct_ratio: " << (static_cast<float>(deep_conv_cost) / static_cast<float>(direct_conv_cost)) << " use_deep_conv: " << (deep_conv_cost < direct_conv_cost); return deep_conv_cost < direct_conv_cost; } typedef Eigen::ThreadPoolDevice CPUDevice; template <typename T> struct CopyFilterDepth { void operator()(const Conv2DArgs& args, const T* filter_in, T* filter_buf) { typedef typename Eigen::internal::packet_traits<T>::type Packet; static constexpr int64_t kPacketSize = (sizeof(Packet) / sizeof(T)); const int64_t vectorized_size = args.in_depth / kPacketSize; const int64_t scalar_size = args.in_depth % kPacketSize; const int64_t input_stride = args.out_depth * kPacketSize; for (int64_t d = 0; d < vectorized_size; ++d) { auto v = Eigen::internal::pgather<T, Packet>(filter_in + d * input_stride, args.out_depth); Eigen::internal::pstoreu<T>(filter_buf + d * kPacketSize, v); } const int64_t in_scalar_base = vectorized_size * input_stride; const int64_t buf_scalar_base = vectorized_size * kPacketSize; for (int64_t d = 0; d < scalar_size; ++d) { filter_buf[buf_scalar_base + d] = filter_in[in_scalar_base + d * args.out_depth]; } } }; template <typename T> struct ComputeFilterRangeTransform { typedef typename Eigen::internal::packet_traits<T>::type Packet; static constexpr int64_t kPacketSize = (sizeof(Packet) / sizeof(T)); typedef Eigen::Map< Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> MatrixMap; typedef Eigen::Map< const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> ConstMatrixMap; void operator()(const Conv2DArgs& args, const DeepConv2DTransform<T>* transform, const int64_t od_start, const int64_t num_filters, const int64_t shard_rows, const int64_t shard_cols, const T* filter_in, const int64_t in_stride, const int64_t out_stride, const T* transform_matrix, T* out_buffer, T* filter_out) { namespace ei = Eigen::internal; const int64_t in_depth = args.in_depth; const int64_t base_filter_rows = transform->filter_shape().rows; const int64_t base_filter_cols = transform->filter_shape().cols; const int64_t base_filter_spatial_size = base_filter_rows * base_filter_cols; const int64_t tile_rows = transform->input_shape().rows; const int64_t tile_cols = transform->input_shape().cols; const int64_t tile_spatial_size = tile_rows * tile_cols; ConstMatrixMap A(transform_matrix, tile_spatial_size, base_filter_spatial_size); ConstMatrixMap B(filter_in, base_filter_spatial_size, in_stride); MatrixMap C(out_buffer, tile_spatial_size, in_stride); C.noalias() = A * B; const int64_t scalar_size = in_depth % kPacketSize; const int64_t vectorized_size = in_depth / kPacketSize; const int64_t shard_stride = args.in_depth; const int64_t out_depth_stride = shard_rows * shard_cols * shard_stride; for (int64_t od = 0; od < num_filters; ++od) { const int64_t out_depth_buf_base = od * out_depth_stride; const int64_t out_depth_base = (od_start + od) * out_depth_stride; for (int64_t s_r = 0; s_r < shard_rows; ++s_r) { for (int64_t s_c = 0; s_c < shard_cols; ++s_c) { const int64_t shard_base = shard_stride * (s_r * shard_cols + s_c); for (int64_t i = 0; i < tile_spatial_size; ++i) { const int64_t in_base = i * in_stride + out_depth_buf_base + shard_base; const int64_t out_base = i * out_stride + out_depth_base + shard_base; for (int64_t d = 0; d < vectorized_size; ++d) { auto v = ei::ploadu<Packet>(out_buffer + in_base + d * kPacketSize); ei::pstoreu<T>(filter_out + out_base + d * kPacketSize, v); } const int64_t scalar_base = vectorized_size * kPacketSize; for (int64_t d = 0; d < scalar_size; ++d) { filter_out[out_base + scalar_base + d] = out_buffer[in_base + scalar_base + d]; } } } } } } }; template <typename T> struct TransformFilterRange { void operator()(const Conv2DArgs& args, const DeepConv2DTransform<T>* transform, const int64_t od_start, const int64_t od_limit, const T* filter_in, const T* transform_matrix, T* out_buffer, T* filter_buf, T* filter_out) { const int64_t num_filters = od_limit - od_start; const int64_t base_filter_rows = transform->filter_shape().rows; const int64_t base_filter_cols = transform->filter_shape().cols; const int64_t base_filter_spatial_size = base_filter_rows * base_filter_cols; const int64_t residual_row = std::max(int64_t{0}, args.filter_rows - base_filter_rows); const int64_t shard_rows = 1 + (residual_row + 2 - 1) / 2; const int64_t residual_col = std::max(int64_t{0}, args.filter_cols - base_filter_cols); const int64_t shard_cols = 1 + (residual_col + 2 - 1) / 2; const int64_t shard_stride = args.in_depth; const int64_t out_depth_stride = shard_rows * shard_cols * shard_stride; const int64_t coord_stride = out_depth_stride * args.out_depth; const int64_t filter_buf_stride = num_filters * shard_rows * shard_cols * args.in_depth; const int64_t tile_stride_rows = transform->output_shape().rows; const int64_t tile_stride_cols = transform->output_shape().cols; const int64_t filter_buf_size = base_filter_spatial_size * num_filters * shard_rows * shard_cols * args.in_depth; memset(filter_buf, 0, sizeof(T) * filter_buf_size); for (int64_t od = 0; od < num_filters; ++od) { const int64_t out_depth_base = od * out_depth_stride; for (int64_t s_r = 0; s_r < shard_rows; ++s_r) { const int64_t row_offset = s_r == 0 ? 0 : 1; for (int64_t s_c = 0; s_c < shard_cols; ++s_c) { const int64_t col_offset = s_c == 0 ? 0 : 1; const int64_t f_r_start = s_r * tile_stride_rows; const int64_t f_c_start = s_c * tile_stride_cols; const int64_t shard_base = shard_stride * (s_r * shard_cols + s_c); for (int64_t b_r = row_offset; b_r < base_filter_rows; ++b_r) { const int64_t f_r = f_r_start + b_r; if (f_r >= args.filter_rows) continue; for (int64_t b_c = col_offset; b_c < base_filter_cols; ++b_c) { const int64_t f_c = f_c_start + b_c; if (f_c >= args.filter_cols) continue; const int64_t in_index = args.out_depth * (args.in_depth * (f_r * args.filter_cols + f_c)) + (od_start + od); const int64_t buf_index = filter_buf_stride * (b_r * base_filter_cols + b_c) + out_depth_base + shard_base; CopyFilterDepth<T>()(args, filter_in + in_index, filter_buf + buf_index); } } } } } ComputeFilterRangeTransform<T>()(args, transform, od_start, num_filters, shard_rows, shard_cols, filter_buf, filter_buf_stride, coord_stride, transform_matrix, out_buffer, filter_out); } }; template <typename T> struct TransformFilters { void operator()(OpKernelContext* ctx, const Conv2DArgs& args, const DeepConv2DTransform<T>* transform, const int64_t filter_shards_row, const int64_t filter_shards_col, const T* filter_in, T* filter_out) { const int64_t in_depth = args.in_depth; const int64_t out_depth = args.out_depth; const int64_t tile_rows = transform->input_shape().rows; const int64_t tile_cols = transform->input_shape().cols; const int64_t tile_spatial_size = tile_rows * tile_cols; const int64_t base_filter_rows = transform->filter_shape().rows; const int64_t base_filter_cols = transform->filter_shape().cols; const int64_t base_filter_spatial_size = base_filter_rows * base_filter_cols; const int64_t filter_shards_total = filter_shards_row * filter_shards_col; const int64_t cache_size = (256LL << 10) / sizeof(T); const int64_t filter_transform_matrix_size = tile_spatial_size * base_filter_spatial_size; const int64_t filter_total_size = base_filter_spatial_size * in_depth * filter_shards_total; const int64_t filter_transform_buffer_size = base_filter_spatial_size * filter_shards_total * in_depth; const int64_t filter_out_buf_size = tile_spatial_size * filter_shards_total * in_depth; const int64_t per_filter_cost = filter_total_size + filter_transform_buffer_size + filter_out_buf_size; const int64_t num_filters_cache = std::max(int64_t{1}, (cache_size - filter_transform_matrix_size) / per_filter_cost); const int64_t num_filters_transform = std::min(out_depth, num_filters_cache); Tensor filter_transform_matrix; OP_REQUIRES_OK( ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, TensorShape({tile_spatial_size, base_filter_spatial_size}), &filter_transform_matrix)); T* transform_matrix = filter_transform_matrix.template flat<T>().data(); transform->GetFilterTransformMatrix( tile_spatial_size, base_filter_spatial_size, transform_matrix); auto shard = [&ctx, &args, &transform, &base_filter_rows, &base_filter_cols, &num_filters_transform, &in_depth, &filter_shards_row, &filter_shards_col, &tile_spatial_size, &filter_in, &transform_matrix, &filter_out](int64_t start, int64_t limit) { Tensor filter_transform_buffer; OP_REQUIRES_OK(ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, TensorShape({base_filter_rows, base_filter_cols, num_filters_transform, filter_shards_row, filter_shards_col, in_depth}), &filter_transform_buffer)); T* filter_buf = filter_transform_buffer.template flat<T>().data(); Tensor filter_output_buffer; OP_REQUIRES_OK( ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, TensorShape({tile_spatial_size, num_filters_transform, filter_shards_row, filter_shards_col, in_depth}), &filter_output_buffer)); T* out_buffer = filter_output_buffer.template flat<T>().data(); const int64_t num_filters = limit - start; const int64_t od_unroll = num_filters_transform; const int64_t od_unroll_limit = (num_filters / od_unroll) * od_unroll; for (int64_t od = start; od < od_unroll_limit; od += od_unroll) { TransformFilterRange<T>()(args, transform, od, od + od_unroll, filter_in, transform_matrix, out_buffer, filter_buf, filter_out); } if (od_unroll_limit < limit) { TransformFilterRange<T>()(args, transform, od_unroll_limit, limit, filter_in, transform_matrix, out_buffer, filter_buf, filter_out); } }; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); const int64_t shard_cost = args.filter_rows * args.filter_cols * in_depth * filter_shards_total * tile_spatial_size; Shard(1, worker_threads.workers, out_depth, shard_cost, shard); } }; template <typename T> class GemmFilterPacker { public: typedef Eigen::internal::const_blas_data_mapper<T, int64_t, Eigen::RowMajor> LhsMapper; typedef Eigen::internal::gebp_traits<T, T> Traits; Eigen::internal::gemm_pack_lhs< T, int64_t, LhsMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, Eigen::RowMajor> pack_lhs; GemmFilterPacker(const int64_t rows, const int64_t depth, const T* lhs_input, T* lhs_block) : rows_(rows), depth_(depth), lhs_block_(lhs_block), lhs_mapper_(lhs_input, depth_) {} void Run() { pack_lhs(lhs_block_, lhs_mapper_, depth_, rows_); } private: const int64_t rows_; const int64_t depth_; T* lhs_block_; LhsMapper lhs_mapper_; }; template <typename T> struct PackFilters { void operator()(OpKernelContext* ctx, const Conv2DArgs& args, const int64_t tile_spatial_size, const int64_t filter_shards_row, const int64_t filter_shards_col, const T* filter_transform_data, std::vector<Tensor>* packed_filters) { const int64_t in_depth = args.in_depth; const int64_t out_depth = args.out_depth; const int64_t num_filters = filter_shards_row * filter_shards_col * out_depth; auto shard = [&ctx, &packed_filters, &filter_transform_data, &in_depth, &out_depth, &filter_shards_row, &filter_shards_col, &num_filters](int64_t start, int64_t limit) { const int64_t filter_coord_stride = num_filters * in_depth; for (int64_t i = start; i < limit; ++i) { OP_REQUIRES_OK( ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({out_depth, filter_shards_row, filter_shards_col, in_depth}), &(*packed_filters)[i])); T* packed_filter = (*packed_filters)[i].template flat<T>().data(); GemmFilterPacker<T> packer( num_filters, in_depth, filter_transform_data + i * filter_coord_stride, packed_filter); packer.Run(); } }; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, tile_spatial_size, num_filters * in_depth, shard); } }; template <typename T> class GemmState { public: typedef Eigen::internal::const_blas_data_mapper<T, int64_t, Eigen::ColMajor> RhsMapper; typedef Eigen::internal::blas_data_mapper<T, int64_t, Eigen::ColMajor> OutputMapper; typedef Eigen::internal::gebp_traits<T, T> Traits; Eigen::internal::gemm_pack_rhs<T, int64_t, RhsMapper, Traits::nr, Eigen::ColMajor> pack_rhs; Eigen::internal::gebp_kernel<T, T, int64_t, OutputMapper, Traits::mr, Traits::nr, false, false> gebp; GemmState(const int64_t rows, const int64_t cols, const int64_t depth, const int64_t out_buffer_size, const T* lhs_block, const T* rhs_input, T* rhs_block, T* out_buffer) : rows_(rows), cols_(cols), depth_(depth), out_buffer_size_(out_buffer_size), lhs_block_(lhs_block), rhs_block_(rhs_block), out_buffer_(out_buffer), rhs_mapper_(rhs_input, depth_), out_mapper_(out_buffer, rows_) {} void PackRhs() { pack_rhs(rhs_block_, rhs_mapper_, depth_, cols_); } void Compute() { memset(out_buffer_, 0, sizeof(T) * out_buffer_size_); gebp(out_mapper_, lhs_block_, rhs_block_, rows_, depth_, cols_, 1.0); } private: const int64_t rows_; const int64_t cols_; const int64_t depth_; const int64_t out_buffer_size_; const T* lhs_block_; T* rhs_block_; T* out_buffer_; RhsMapper rhs_mapper_; OutputMapper out_mapper_; }; template <typename T> struct CopyInputTile { void operator()(const Conv2DArgs& args, const DeepConv2DTransform<T>* transform, const int64_t num_tiles, const int64_t in_r_start, const int64_t in_c_start, const T* input, T* tile_buffer) { typedef typename Eigen::internal::packet_traits<T>::type Packet; static const int64_t kPacketSize = (sizeof(Packet) / sizeof(T)); const int64_t tile_rows = transform->input_shape().rows; const int64_t tile_cols = transform->input_shape().cols; const int64_t coord_stride = num_tiles * args.in_depth; const int64_t input_vectorized_size = (args.in_depth / kPacketSize) * kPacketSize; const int64_t input_scalar_size = args.in_depth % kPacketSize; for (int64_t r = 0; r < tile_rows; ++r) { const int64_t in_r = in_r_start + r; if (in_r < 0 || in_r >= args.in_rows) continue; for (int64_t c = 0; c < tile_cols; ++c) { const int64_t in_c = in_c_start + c; if (in_c < 0 || in_c >= args.in_cols) continue; auto* in = input + (in_r * args.in_cols + in_c) * args.in_depth; auto* tile = tile_buffer + coord_stride * (r * tile_rows + c); for (int64_t d = 0; d < input_vectorized_size; d += kPacketSize) { auto v = Eigen::internal::ploadu<Packet>(in + d); Eigen::internal::pstoreu<T>(tile, v); tile += kPacketSize; } for (int64_t d = 0; d < input_scalar_size; ++d) { tile[d] = in[input_vectorized_size + d]; } } } } }; template <typename T> struct TransformInputTiles { typedef Eigen::Map< Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> MatrixMap; typedef Eigen::Map< const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> ConstMatrixMap; void operator()(const Conv2DArgs& args, const DeepConv2DTransform<T>* transform, const int64_t num_tiles, const int64_t in_r_start, const int64_t in_c_start, const T* input, const T* transform_matrix, T* tile_buffer, T* tile_transform) { const int64_t tile_rows = transform->input_shape().rows; const int64_t tile_cols = transform->input_shape().cols; const int64_t tile_spatial_size = tile_rows * tile_cols; const int64_t tile_stride_cols = transform->output_shape().cols; const int64_t coord_stride = num_tiles * args.in_depth; const int64_t num_tiles_stride = args.in_depth; memset(tile_buffer, 0, sizeof(T) * tile_spatial_size * coord_stride); const int64_t in_r = in_r_start; for (int64_t t = 0; t < num_tiles; ++t) { const int64_t num_tiles_base = t * num_tiles_stride; const int64_t in_c = in_c_start + t * tile_stride_cols; CopyInputTile<T>()(args, transform, num_tiles, in_r, in_c, input, tile_buffer + num_tiles_base); } ConstMatrixMap A(transform_matrix, tile_spatial_size, tile_spatial_size); ConstMatrixMap B(tile_buffer, tile_spatial_size, coord_stride); MatrixMap C(tile_transform, tile_spatial_size, coord_stride); C.noalias() = A * B; } }; template <typename T> struct TransformOutputTile { typedef Eigen::Map< Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> MatrixMap; typedef Eigen::Map< const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> ConstMatrixMap; void operator()(const Conv2DArgs& args, const DeepConv2DTransform<T>* transform, const int64_t num_tiles, const int64_t in_r, const int64_t in_c, const int64_t filter_shards_row, const int64_t filter_shards_col, const T* out_transform_matrix, const T* out_buffer, T* out_transform_buffer, T* output) { const int64_t tile_rows = transform->input_shape().rows; const int64_t tile_cols = transform->input_shape().cols; const int64_t tile_spatial_size = tile_rows * tile_cols; const int64_t out_buf_stride = num_tiles * args.out_depth * filter_shards_row * filter_shards_col; const int64_t out_tile_rows = transform->output_shape().rows; const int64_t out_tile_cols = transform->output_shape().cols; const int64_t out_tile_spatial_size = out_tile_rows * out_tile_cols; ConstMatrixMap A(out_transform_matrix, out_tile_spatial_size, tile_spatial_size); ConstMatrixMap B(out_buffer, tile_spatial_size, out_buf_stride); MatrixMap C(out_transform_buffer, out_tile_spatial_size, out_buf_stride); C.noalias() = A * B; const int64_t tile_stride_rows = transform->output_shape().rows; const int64_t tile_stride_cols = transform->output_shape().cols; const int64_t out_depth_stride = filter_shards_row * filter_shards_col; const int64_t num_tiles_stride = args.out_depth * out_depth_stride; for (int64_t t = 0; t < num_tiles; ++t) { const int64_t tile_base = t * num_tiles_stride; for (int64_t od = 0; od < args.out_depth; ++od) { const int64_t out_depth_base = od * out_depth_stride; for (int64_t sr = 0; sr < filter_shards_row; ++sr) { for (int64_t sc = 0; sc < filter_shards_col; ++sc) { const int64_t shard_base = sr * filter_shards_col + sc; const int64_t out_buf_base = tile_base + out_depth_base + shard_base; const int64_t out_r_start = in_r + args.pad_rows - sr * tile_stride_rows; const int64_t out_c_start = (in_c + t * tile_stride_cols) + args.pad_cols - sc * tile_stride_cols; if (out_r_start < 0 || out_r_start >= args.out_rows || out_c_start < 0 || out_c_start >= args.out_cols) { continue; } const bool inc_output = (sr == 0 && sc == 0) ? false : true; for (int64_t ot_row = 0; ot_row < out_tile_rows; ++ot_row) { const int64_t out_r = out_r_start + ot_row; if (out_r >= args.out_rows) continue; for (int64_t ot_col = 0; ot_col < out_tile_cols; ++ot_col) { const int64_t out_c = out_c_start + ot_col; if (out_c >= args.out_cols) continue; const int64_t out_buf_index = ot_row * out_tile_cols + ot_col; const T out_val = out_transform_buffer[out_buf_base + out_buf_index * out_buf_stride]; const int64_t output_index = args.out_depth * (out_r * args.out_cols + out_c) + od; if (inc_output) { output[output_index] += out_val; } else { output[output_index] = out_val; } } } } } } } } }; template <typename T> struct Conv2DState { Conv2DState(const int64_t tile_spatial_size, const int64_t filter_shards_row, const int64_t filter_shards_col, const T* input, const T* tile_transform_matrix, const T* output_transform_matrix, T* buffer1, T* buffer2, T* packed_tile_buffer, T* gemm_output_buffer) : tile_spatial_size(tile_spatial_size), filter_shards_row(filter_shards_row), filter_shards_col(filter_shards_col), input(input), tile_transform_matrix(tile_transform_matrix), output_transform_matrix(output_transform_matrix), buffer1(buffer1), buffer2(buffer2), packed_tile_buffer(packed_tile_buffer), gemm_output_buffer(gemm_output_buffer) {} const int64_t tile_spatial_size; const int64_t filter_shards_row; const int64_t filter_shards_col; const T* input; const T* tile_transform_matrix; const T* output_transform_matrix; T* buffer1; T* buffer2; T* packed_tile_buffer; T* gemm_output_buffer; }; template <typename T> struct ComputeConv2D { void operator()(const Conv2DArgs& args, const DeepConv2DTransform<T>* transform, const Conv2DState<T>& cs, const int64_t in_r, const int64_t in_c, const int64_t num_tiles, const std::vector<Tensor>& packed_filters, const T* input, T* output) { TransformInputTiles<T>()(args, transform, num_tiles, in_r, in_c, input, cs.tile_transform_matrix, cs.buffer1, cs.buffer2); const int64_t in_depth = args.in_depth; const int64_t out_depth = args.out_depth; const int64_t num_filters = cs.filter_shards_row * cs.filter_shards_col * out_depth; const int64_t tile_coord_stride = num_tiles * in_depth; const int64_t gemm_out_buf_size = num_tiles * num_filters; const int64_t gemm_out_buf_bytes = gemm_out_buf_size * sizeof(T); for (int64_t i = 0; i < cs.tile_spatial_size; ++i) { GemmState<T> gemm(num_filters, num_tiles, in_depth, gemm_out_buf_size, packed_filters[i].template flat<T>().data(), cs.buffer2 + i * tile_coord_stride, cs.packed_tile_buffer, cs.gemm_output_buffer); gemm.PackRhs(); gemm.Compute(); memcpy(cs.buffer1 + i * gemm_out_buf_size, cs.gemm_output_buffer, gemm_out_buf_bytes); } TransformOutputTile<T>()(args, transform, num_tiles, in_r, in_c, cs.filter_shards_row, cs.filter_shards_col, cs.output_transform_matrix, cs.buffer1, cs.buffer2, output); } }; namespace functor { template <typename T> struct DeepConv2D<CPUDevice, T> { void operator()(OpKernelContext* ctx, const Conv2DArgs& args, const T* input, const T* filter, T* output) { std::unique_ptr<DeepConv2DTransform<T>> transform(new WinogradTransform<T>); const int64_t in_depth = args.in_depth; const int64_t out_depth = args.out_depth; const int64_t tile_rows = transform->input_shape().rows; const int64_t tile_cols = transform->input_shape().cols; const int64_t tile_spatial_size = tile_rows * tile_cols; const int64_t out_tile_rows = transform->output_shape().rows; const int64_t out_tile_cols = transform->output_shape().cols; const int64_t out_tile_spatial_size = out_tile_rows * out_tile_cols; const int64_t base_filter_rows = transform->filter_shape().rows; const int64_t filter_residual_row = std::max(int64_t{0}, args.filter_rows - base_filter_rows); const int64_t filter_shards_row = 1 + (filter_residual_row + 2 - 1) / 2; const int64_t filter_residual_col = std::max(int64_t{0}, args.filter_cols - base_filter_rows); const int64_t filter_shards_col = 1 + (filter_residual_col + 2 - 1) / 2; Tensor filter_transform; OP_REQUIRES_OK( ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, TensorShape({tile_rows, tile_cols, out_depth, filter_shards_row, filter_shards_col, in_depth}), &filter_transform)); T* filter_transform_data = filter_transform.template flat<T>().data(); TransformFilters<T>()(ctx, args, transform.get(), filter_shards_row, filter_shards_col, filter, filter_transform_data); std::vector<Tensor> packed_filters(tile_spatial_size); PackFilters<T>()(ctx, args, tile_spatial_size, filter_shards_row, filter_shards_col, filter_transform_data, &packed_filters); Tensor tile_transform_matrix_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, TensorShape({tile_spatial_size, tile_spatial_size}), &tile_transform_matrix_tensor)); T* tile_transform_matrix = tile_transform_matrix_tensor.template flat<T>().data(); transform->GetInputTransformMatrix(tile_spatial_size, tile_spatial_size, tile_transform_matrix); Tensor output_transform_matrix_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({out_tile_spatial_size, tile_spatial_size}), &output_transform_matrix_tensor)); T* output_transform_matrix = output_transform_matrix_tensor.template flat<T>().data(); transform->GetOutputTransformMatrix( out_tile_spatial_size, tile_spatial_size, output_transform_matrix); auto shard = [&ctx, &args, &transform, &packed_filters, &in_depth, out_depth, out_tile_rows, out_tile_cols, filter_shards_row, filter_shards_col, tile_spatial_size, &input, &tile_transform_matrix, &output_transform_matrix, &output](int64_t batch_start, int64_t batch_limit) { const int64_t row_tiles = (args.out_rows + out_tile_rows - 1) / out_tile_rows + filter_shards_row - 1; const int64_t col_tiles = (args.out_cols + out_tile_cols - 1) / out_tile_cols + filter_shards_col - 1; const int64_t filter_shard_size = filter_shards_row * filter_shards_col; const int64_t out_tile_spatial_size = out_tile_rows * out_tile_cols; const int64_t cache_size = (256LL << 10) / sizeof(T); const int64_t tile_transform_matrix_size = tile_spatial_size * tile_spatial_size; const int64_t output_transform_matrix_size = out_tile_spatial_size * tile_spatial_size; const int64_t filter_depth_size = in_depth * out_depth * filter_shard_size; const bool small_filter = ((filter_depth_size * 100) / cache_size) <= 25; const int64_t cache_reserve_size = small_filter ? filter_depth_size : 1024; const int64_t total_fixed_cost = tile_transform_matrix_size + output_transform_matrix_size + cache_reserve_size; const int64_t buffer1_per_tile_size = tile_spatial_size * std::max(in_depth, out_depth * filter_shard_size); const int64_t buffer2_per_tile_size = std::max(tile_spatial_size * in_depth, out_tile_spatial_size * out_depth * filter_shard_size); const int64_t packed_tile_per_tile_size = in_depth; const int64_t gemm_out_per_tile_size = out_depth * filter_shard_size; const int64_t total_per_tile_cost = buffer1_per_tile_size + buffer2_per_tile_size + packed_tile_per_tile_size + gemm_out_per_tile_size; const int64_t num_tiles_cache = std::max( int64{4}, (cache_size - total_fixed_cost) / total_per_tile_cost); const int64_t num_tiles = std::min(num_tiles_cache, col_tiles); const int64_t buffer1_tile_size = tile_spatial_size * num_tiles * in_depth; const int64_t buffer1_out_size = tile_spatial_size * num_tiles * out_depth * filter_shard_size; const int64_t buffer1_size = std::max(buffer1_tile_size, buffer1_out_size); Tensor buffer1_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({buffer1_size}), &buffer1_tensor)); T* buffer1 = buffer1_tensor.template flat<T>().data(); const int64_t buffer2_tile_transform_size = tile_spatial_size * num_tiles * in_depth; const int64_t buffer2_out_transform_size = out_tile_spatial_size * num_tiles * out_depth * filter_shard_size; const int64_t buffer2_size = std::max(buffer2_tile_transform_size, buffer2_out_transform_size); Tensor buffer2_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({buffer2_size}), &buffer2_tensor)); T* buffer2 = buffer2_tensor.template flat<T>().data(); Tensor packed_tile_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({num_tiles, in_depth}), &packed_tile_tensor)); T* packed_tile_buffer = packed_tile_tensor.template flat<T>().data(); Tensor gemm_output_tensor; OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({num_tiles, out_depth, filter_shards_row, filter_shards_col}), &gemm_output_tensor)); T* gemm_output_buffer = gemm_output_tensor.template flat<T>().data(); Conv2DState<T> conv_state(tile_spatial_size, filter_shards_row, filter_shards_col, input, tile_transform_matrix, output_transform_matrix, buffer1, buffer2, packed_tile_buffer, gemm_output_buffer); const int64_t row_pad = args.pad_rows; const int64_t col_pad = args.pad_cols; const int64_t unroll_col_limit = (col_tiles / num_tiles) * num_tiles; const int64_t input_image_size = args.in_rows * args.in_cols * in_depth; const int64_t output_image_size = args.out_rows * args.out_cols * out_depth; const int64_t tile_stride_rows = transform->output_shape().rows; const int64_t tile_stride_cols = transform->output_shape().cols; for (int64_t b = batch_start; b < batch_limit; ++b) { const int64_t in_base = b * input_image_size; const int64_t out_base = b * output_image_size; for (int64_t tile_r = 0; tile_r < row_tiles; ++tile_r) { const int64_t in_r = tile_r * tile_stride_rows - row_pad; for (int64_t tile_c = 0; tile_c < unroll_col_limit; tile_c += num_tiles) { const int64_t in_c = tile_c * tile_stride_cols - col_pad; ComputeConv2D<T>()(args, transform.get(), conv_state, in_r, in_c, num_tiles, packed_filters, input + in_base, output + out_base); } if (unroll_col_limit < col_tiles) { const int64_t rem_tiles = col_tiles - unroll_col_limit; const int64_t in_c = unroll_col_limit * tile_stride_cols - col_pad; ComputeConv2D<T>()(args, transform.get(), conv_state, in_r, in_c, rem_tiles, packed_filters, input + in_base, output + out_base); } } } }; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); const int64_t shard_cost = args.out_rows * args.out_cols * args.out_depth * tile_spatial_size * args.in_depth; Shard(worker_threads.num_threads, worker_threads.workers, args.batch, shard_cost, shard); } }; } template struct functor::DeepConv2D<CPUDevice, float>; }
#include "tensorflow/core/kernels/winograd_transform.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { static void ComputeKroneckerProduct(const int rows, const int cols, const float* matrix, float* matrix_out) { for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { const float v = matrix[i * cols + j]; const int output_index_base = cols * (i * rows * cols + j); for (int k = 0; k < rows; ++k) { for (int l = 0; l < cols; ++l) { const int input_index = k * cols + l; const int output_index = k * cols * cols + l; matrix_out[output_index_base + output_index] = matrix[input_index] * v; } } } } } TEST(DeepConv2DTransformTest, Basic) { const int rows = 2; const int cols = 2; float transform_matrix[] = {1, 2, 3, 4}; const int kron_rows = rows * rows; const int kron_cols = cols * cols; float transform_matrix_kron[kron_rows * kron_cols]; ComputeKroneckerProduct(rows, cols, &transform_matrix[0], &transform_matrix_kron[0]); float transform_matrix_test[] = {1, 2, 2, 4, 3, 4, 6, 8, 3, 6, 4, 8, 9, 12, 12, 16}; for (int i = 0; i < kron_rows * kron_cols; ++i) { EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]); } } TEST(DeepConv2DTransformTest, WingradFilterTransformMatrix) { const int rows = 4; const int cols = 3; float transform_matrix[] = {1, 0, 0, 0.5, 0.5, 0.5, 0.5, -0.5, 0.5, 0, 0, 1}; const int kron_rows = rows * rows; const int kron_cols = cols * cols; float transform_matrix_kron[kron_rows * kron_cols]; ComputeKroneckerProduct(rows, cols, &transform_matrix[0], &transform_matrix_kron[0]); float transform_matrix_test[kron_rows * kron_cols]; WinogradTransform<float> t; t.GetFilterTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]); for (int i = 0; i < kron_rows * kron_cols; ++i) { EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]); } } TEST(DeepConv2DTransformTest, WingradInputTransformMatrix) { const int rows = 4; const int cols = 4; float transform_matrix[] = {1, 0, -1, 0, 0, 1, 1, 0, 0, -1, 1, 0, 0, 1, 0, -1}; const int kron_rows = rows * rows; const int kron_cols = cols * cols; float transform_matrix_kron[kron_rows * kron_cols]; ComputeKroneckerProduct(rows, cols, &transform_matrix[0], &transform_matrix_kron[0]); float transform_matrix_test[kron_rows * kron_cols]; WinogradTransform<float> t; t.GetInputTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]); for (int i = 0; i < kron_rows * kron_cols; ++i) { EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]); } } TEST(DeepConv2DTransformTest, WingradOutputTransformMatrix) { const int rows = 2; const int cols = 4; float transform_matrix[] = {1, 1, 1, 0, 0, 1, -1, -1}; const int kron_rows = rows * rows; const int kron_cols = cols * cols; float transform_matrix_kron[kron_rows * kron_cols]; ComputeKroneckerProduct(rows, cols, &transform_matrix[0], &transform_matrix_kron[0]); float transform_matrix_test[kron_rows * kron_cols]; WinogradTransform<float> t; t.GetOutputTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]); for (int i = 0; i < kron_rows * kron_cols; ++i) { EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/deep_conv2d.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/deep_conv2d_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b8d6818f-df0e-4df0-98c7-5f5861ecee80
cpp
tensorflow/tensorflow
sendrecv_ops
tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc
tensorflow/core/kernels/sendrecv_ops_test.cc
#include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/shape.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { class SendOp : public XlaOpKernel { public: explicit SendOp(OpKernelConstruction* ctx); void Compile(XlaOpKernelContext* ctx) override; private: string tensor_name_; SendOp(const SendOp&) = delete; void operator=(const SendOp&) = delete; }; SendOp::SendOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("tensor_name", &tensor_name_)); } void SendOp::Compile(XlaOpKernelContext* ctx) { XlaCompiler* compiler = ctx->compiler(); xla::ChannelHandle channel; OP_REQUIRES_OK(ctx, compiler->GetChannelHandle(tensor_name_, &channel)); xla::Send(ctx->Input(0), channel); } REGISTER_XLA_OP(Name("XlaSend"), SendOp); class RecvOp : public XlaOpKernel { public: explicit RecvOp(OpKernelConstruction* ctx); void Compile(XlaOpKernelContext* ctx) override; private: string tensor_name_; xla::Shape shape_; RecvOp(const RecvOp&) = delete; void operator=(const RecvOp&) = delete; }; RecvOp::RecvOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("tensor_name", &tensor_name_)); TensorShape tensor_shape; DataType dtype; OP_REQUIRES_OK(ctx, ctx->GetAttr("shape", &tensor_shape)); OP_REQUIRES_OK(ctx, ctx->GetAttr("dtype", &dtype)); OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, tensor_shape, &shape_)); } void RecvOp::Compile(XlaOpKernelContext* ctx) { XlaCompiler* compiler = ctx->compiler(); xla::ChannelHandle channel; OP_REQUIRES_OK(ctx, compiler->GetChannelHandle(tensor_name_, &channel)); ctx->SetOutput(0, xla::Recv(ctx->builder(), shape_, channel)); } REGISTER_XLA_OP(Name("XlaRecv"), RecvOp); } }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class DummyRendezvous : public Rendezvous { Status Send(const ParsedKey& key, const Args& args, const Tensor& val, const bool is_dead) override { return absl::OkStatus(); } void RecvAsync(const ParsedKey& key, const Args& args, DoneCallback done) override { static Tensor* t = new Tensor(DT_FLOAT, TensorShape({0})); done(absl::OkStatus(), args, args, *t, false); } void StartAbort(const Status& status) override {} }; static Graph* Send() { Graph* g = new Graph(OpRegistry::Global()); Tensor in0(DT_FLOAT, TensorShape({0})); test::graph::Send(g, test::graph::Constant(g, in0), "T", "/cpu:0", 1, "/cpu:0"); test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0"); return g; } static Graph* Recv() { Graph* g = new Graph(OpRegistry::Global()); test::graph::Recv(g, "T", "float", "/cpu:0", 1, "/cpu:0"); return g; } void BM_Send(::testing::benchmark::State& state) { test::Benchmark("cpu", Send(), nullptr, nullptr, new DummyRendezvous, "", false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_Send)->UseRealTime(); void BM_Recv(::testing::benchmark::State& state) { test::Benchmark("cpu", Recv(), nullptr, nullptr, new DummyRendezvous, "", false) .Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_Recv)->UseRealTime(); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sendrecv_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sendrecv_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0bf94df6-92bc-41e0-9aff-4d9ce0fb735c
cpp
tensorflow/tensorflow
sparse_xent_op
tensorflow/core/kernels/sparse_xent_op.cc
tensorflow/core/kernels/sparse_xent_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/sparse_xent_op.h" #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/env_var.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Index> Status CheckInvalidLabelIndex(const Tensor& labels, int64_t max_index) { if (labels.NumElements() == 0) return absl::OkStatus(); const auto label_values = labels.vec<Index>(); int64_t bad_index; auto min_max_dim_value = std::minmax_element( label_values.data(), label_values.data() + label_values.size()); if (*min_max_dim_value.first < 0 || *min_max_dim_value.second >= max_index) { bad_index = (*min_max_dim_value.first < 0) ? *min_max_dim_value.first : *min_max_dim_value.second; return errors::InvalidArgument( "Received a label value of ", bad_index, " which is outside the valid range of [0, ", max_index, "). Label values: ", labels.SummarizeValue(labels.NumElements())); } return absl::OkStatus(); } template <typename Device, typename T, typename Index> class SparseSoftmaxXentWithLogitsOp : public OpKernel { public: explicit SparseSoftmaxXentWithLogitsOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& logits = context->input(0); const Tensor& labels = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsMatrix(logits.shape()), errors::InvalidArgument("logits must be 2-D, but got shape ", logits.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(labels.shape()), errors::InvalidArgument("labels must be 1-D, but got shape ", labels.shape().DebugString())); OP_REQUIRES(context, logits.dim_size(0) == labels.dim_size(0), errors::InvalidArgument( "logits and labels must have the same first dimension, " "got logits shape ", logits.shape().DebugString(), " and labels shape ", labels.shape().DebugString())); OP_REQUIRES(context, logits.dim_size(1) > 0, errors::InvalidArgument( "Must have at least one class, but got logits shape ", logits.shape().DebugString())); if (std::is_same<Device, GPUDevice>::value) { OP_REQUIRES( context, !OpDeterminismRequired(), errors::Unimplemented( "The GPU implementation of SparseSoftmaxCrossEntropyWithLogits" " that would have been executed is not deterministic. Note that" " the Python API uses an alternative, deterministic," " GPU-accelerated path when determinsim is enabled.")); } Tensor scratch; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, labels.shape(), &scratch)); Tensor* loss_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {1}, 0, labels.shape(), &loss_out)); Tensor* back_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 1, logits.shape(), &back_out)); if (logits.dim_size(0) > 0) { if (std::is_same<Device, CPUDevice>::value) { OP_REQUIRES_OK( context, CheckInvalidLabelIndex<Index>(labels, logits.dim_size(1))); } functor::SparseXentFunctor<Device, T, Index> functor; functor(context, logits.matrix<T>(), labels.vec<Index>(), scratch.vec<T>(), loss_out->vec<T>(), back_out->matrix<T>()); } } }; namespace functor { template <typename T, typename Index> struct SparseXentFunctor<CPUDevice, T, Index> { void operator()(OpKernelContext* ctx, typename TTypes<T>::ConstMatrix logits, typename TTypes<Index>::ConstVec labels, typename TTypes<T>::Vec scratch, typename TTypes<T>::Vec loss, typename TTypes<T>::Matrix backprop) { SparseXentEigenImpl<CPUDevice, T, Index>::Compute(ctx, logits, labels, scratch, loss, backprop); } }; } #define REGISTER(Dev, T, Index) \ REGISTER_KERNEL_BUILDER( \ Name("SparseSoftmaxCrossEntropyWithLogits") \ .Device(DEVICE_##Dev) \ .TypeConstraint<T>("T") \ .TypeConstraint<Index>("Tlabels"), \ SparseSoftmaxXentWithLogitsOp<Dev##Device, T, Index>); REGISTER(CPU, float, int32) REGISTER(CPU, float, int64_t) REGISTER(CPU, double, int32) REGISTER(CPU, double, int64_t) REGISTER(CPU, Eigen::half, int32) REGISTER(CPU, Eigen::half, int64_t) REGISTER(CPU, bfloat16, int32) REGISTER(CPU, bfloat16, int64_t) #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM REGISTER(GPU, float, int32) REGISTER(GPU, float, int64_t) REGISTER(GPU, Eigen::half, int32) REGISTER(GPU, Eigen::half, int64_t) REGISTER(GPU, Eigen::bfloat16, int32) REGISTER(GPU, Eigen::bfloat16, int64_t) #endif #undef REGISTER }
#include <random> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/xent_op.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { template <class T> static Graph* SparseXent(int batch_size, int num_classes, DataType type) { Graph* g = new Graph(OpRegistry::Global()); Tensor logits(type, TensorShape({batch_size, num_classes})); logits.flat<T>().setRandom(); Tensor labels(DT_INT64, TensorShape({batch_size})); std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(0, num_classes - 1); auto labels_t = labels.flat<int64_t>(); for (int i = 0; i < batch_size; ++i) { labels_t(i) = dist(gen); } test::graph::Binary(g, "SparseSoftmaxCrossEntropyWithLogits", test::graph::Constant(g, logits), test::graph::Constant(g, labels)); return g; } #define BM_SparseXentDev(BATCH, CLASS, DEVICE, C_TYPE, TF_TYPE) \ static void BM_SparseXent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, SparseXent<C_TYPE>(BATCH, CLASS, TF_TYPE), \ false) \ .Run(state); \ const int64_t tot = \ static_cast<int64_t>(state.iterations()) * BATCH * CLASS; \ state.SetItemsProcessed(tot); \ state.SetBytesProcessed(tot * sizeof(C_TYPE)); \ } \ BENCHMARK(BM_SparseXent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM BM_SparseXentDev(8, 1000000, gpu, float, DT_FLOAT); BM_SparseXentDev(16, 10000, gpu, float, DT_FLOAT); BM_SparseXentDev(16, 30000, gpu, float, DT_FLOAT); BM_SparseXentDev(16, 100000, gpu, float, DT_FLOAT); BM_SparseXentDev(32, 10000, gpu, float, DT_FLOAT); BM_SparseXentDev(32, 30000, gpu, float, DT_FLOAT); BM_SparseXentDev(32, 100000, gpu, float, DT_FLOAT); BM_SparseXentDev(64, 10000, gpu, float, DT_FLOAT); BM_SparseXentDev(64, 30000, gpu, float, DT_FLOAT); BM_SparseXentDev(64, 100000, gpu, float, DT_FLOAT); #endif #define BM_SparseXentDev_CPU(C_TYPE, TF_TYPE) \ BM_SparseXentDev(8, 1000000, cpu, C_TYPE, TF_TYPE); \ BM_SparseXentDev(16, 10000, cpu, C_TYPE, TF_TYPE); \ BM_SparseXentDev(16, 100000, cpu, C_TYPE, TF_TYPE); \ BM_SparseXentDev(32, 10000, cpu, C_TYPE, TF_TYPE); \ BM_SparseXentDev(32, 100000, cpu, C_TYPE, TF_TYPE); \ BM_SparseXentDev(64, 10000, cpu, C_TYPE, TF_TYPE); \ BM_SparseXentDev(64, 100000, cpu, C_TYPE, TF_TYPE); BM_SparseXentDev_CPU(float, DT_FLOAT); BM_SparseXentDev_CPU(bfloat16, DT_BFLOAT16); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_xent_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_xent_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
89a1a9b3-e32c-4cc4-b376-5c91402185c0
cpp
tensorflow/tensorflow
reverse_op
tensorflow/compiler/tf2xla/kernels/reverse_op.cc
tensorflow/core/kernels/reverse_op_test.cc
#include <vector> #include "absl/container/inlined_vector.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { class ReverseOp : public XlaOpKernel { public: explicit ReverseOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const TensorShape x_shape = ctx->InputShape(0); const TensorShape revd_shape = ctx->InputShape(1); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(revd_shape), errors::InvalidArgument("axes must be a vector, not shape ", revd_shape.DebugString())); OP_REQUIRES(ctx, revd_shape.num_elements() == x_shape.dims(), errors::InvalidArgument("axes ", revd_shape.DebugString(), " must have same number of elements as" " than input tensor has dimensions ", x_shape.DebugString(), ".")); if (revd_shape.num_elements() == 0) { ctx->SetOutput(0, ctx->Input(0)); return; } xla::Literal lax; OP_REQUIRES_OK(ctx, ctx->ConstantInput(1, &lax)); std::vector<int64_t> dimensions; for (int d = 0; d < x_shape.dims(); ++d) { if (lax.Get<bool>({d})) { dimensions.push_back(d); } } ctx->SetOutput(0, xla::Rev(ctx->Input(0), dimensions)); } }; REGISTER_XLA_OP(Name("Reverse").CompileTimeConstantInput("dims"), ReverseOp); class ReverseV2Op : public XlaOpKernel { public: explicit ReverseV2Op(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const TensorShape x_shape = ctx->InputShape(0); const TensorShape axes_shape = ctx->InputShape(1); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(axes_shape), errors::InvalidArgument("axes must be a vector, not shape ", axes_shape.DebugString())); OP_REQUIRES(ctx, axes_shape.num_elements() <= x_shape.dims(), errors::InvalidArgument("axes ", axes_shape.DebugString(), " can not have more elements" " than input tensor has dimensions ", x_shape.DebugString(), ".")); if (axes_shape.num_elements() == 0) { ctx->SetOutput(0, ctx->Input(0)); return; } std::vector<int64_t> axes; OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector(1, &axes)); absl::InlinedVector<bool, 8> witnessed_axes(x_shape.dims(), false); for (int d = 0; d < axes.size(); ++d) { OP_REQUIRES( ctx, (-x_shape.dims() <= axes[d]) && (axes[d] < x_shape.dims()), errors::InvalidArgument(axes[d], " is out of range [-", x_shape.dims(), ", ", x_shape.dims(), ").")); if (axes[d] < 0) { axes[d] += x_shape.dims(); } OP_REQUIRES(ctx, !witnessed_axes[axes[d]], errors::InvalidArgument("canonicalized axis ", axes[d], " was repeated.")); witnessed_axes[axes[d]] = true; } ctx->SetOutput(0, xla::Rev(ctx->Input(0), axes)); } }; REGISTER_XLA_OP(Name("ReverseV2").CompileTimeConstantInput("axis"), ReverseV2Op); } }
#include <functional> #include <memory> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { class ReverseOpTest : public OpsTestBase { protected: void MakeOp(DataType data_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "Reverse") .Input(FakeInput(data_type)) .Input(FakeInput()) .Attr("T", data_type) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } template <typename T> void Reverse_0() { MakeOp(DataTypeToEnum<T>::value); AddInputFromArray<T>(TensorShape({}), {3}); AddInputFromArray<bool>(TensorShape({}), {true}); TF_ASSERT_OK(RunOpKernel()); Tensor* output = GetOutput(0); Tensor expected(allocator(), DataTypeToEnum<T>::value, TensorShape({})); expected.scalar<T>() = expected.scalar<T>().constant(3); test::ExpectTensorEqual<T>(expected, *output); } template <typename T> void Reverse_234() { MakeOp(DataTypeToEnum<T>::value); AddInputFromArray<T>(TensorShape({2, 3, 4}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); AddInputFromArray<bool>(TensorShape({3}), {true, false, true}); TF_ASSERT_OK(RunOpKernel()); Tensor* params_tensor = GetOutput(0); Tensor expected(allocator(), DataTypeToEnum<T>::value, TensorShape({2, 3, 4})); test::FillValues<T>(&expected, {15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8}); test::ExpectTensorEqual<T>(expected, *params_tensor); } template <typename T> void Reverse_1234() { MakeOp(DataTypeToEnum<T>::value); AddInputFromArray<T>(TensorShape({1, 2, 3, 4}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); AddInputFromArray<bool>(TensorShape({4}), {true, true, false, true}); TF_ASSERT_OK(RunOpKernel()); Tensor* params_tensor = GetOutput(0); Tensor expected(allocator(), DataTypeToEnum<T>::value, TensorShape({1, 2, 3, 4})); test::FillValues<T>(&expected, {15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8}); test::ExpectTensorEqual<T>(expected, *params_tensor); } }; TEST_F(ReverseOpTest, Reverse_0_uint8) { Reverse_0<uint8>(); } TEST_F(ReverseOpTest, Reverse_0_int8) { Reverse_0<int8>(); } TEST_F(ReverseOpTest, Reverse_0_uint16) { Reverse_0<uint16>(); } TEST_F(ReverseOpTest, Reverse_0_int16) { Reverse_0<int16>(); } TEST_F(ReverseOpTest, Reverse_0_float) { Reverse_0<float>(); } TEST_F(ReverseOpTest, Reverse_0_int32) { Reverse_0<int32>(); } TEST_F(ReverseOpTest, Reverse_0_int64) { Reverse_0<int64_t>(); } TEST_F(ReverseOpTest, Reverse_0_double) { Reverse_0<double>(); } TEST_F(ReverseOpTest, Reverse_0_complex64) { Reverse_0<complex64>(); } TEST_F(ReverseOpTest, Reverse_0_complex128) { Reverse_0<complex128>(); } TEST_F(ReverseOpTest, Reverse_234_uint8) { Reverse_234<uint8>(); } TEST_F(ReverseOpTest, Reverse_234_int8) { Reverse_234<int8>(); } TEST_F(ReverseOpTest, Reverse_234_uint16) { Reverse_234<uint16>(); } TEST_F(ReverseOpTest, Reverse_234_int16) { Reverse_234<int16>(); } TEST_F(ReverseOpTest, Reverse_234_float) { Reverse_234<float>(); } TEST_F(ReverseOpTest, Reverse_234_int32) { Reverse_234<int32>(); } TEST_F(ReverseOpTest, Reverse_234_int64) { Reverse_234<int64_t>(); } TEST_F(ReverseOpTest, Reverse_234_double) { Reverse_234<double>(); } TEST_F(ReverseOpTest, Reverse_234_complex64) { Reverse_234<complex64>(); } TEST_F(ReverseOpTest, Reverse_234_complex128) { Reverse_234<complex128>(); } TEST_F(ReverseOpTest, Reverse_1234_uint8) { Reverse_1234<uint8>(); } TEST_F(ReverseOpTest, Reverse_1234_int8) { Reverse_1234<int8>(); } TEST_F(ReverseOpTest, Reverse_1234_uint16) { Reverse_1234<uint16>(); } TEST_F(ReverseOpTest, Reverse_1234_int16) { Reverse_1234<int16>(); } TEST_F(ReverseOpTest, Reverse_1234_float) { Reverse_1234<float>(); } TEST_F(ReverseOpTest, Reverse_1234_int32) { Reverse_1234<int32>(); } TEST_F(ReverseOpTest, Reverse_1234_int64) { Reverse_1234<int64_t>(); } TEST_F(ReverseOpTest, Reverse_1234_double) { Reverse_1234<double>(); } TEST_F(ReverseOpTest, Reverse_1234_complex64) { Reverse_1234<complex64>(); } TEST_F(ReverseOpTest, Reverse_1234_complex128) { Reverse_1234<complex128>(); } static SessionOptions GetOptions(int intra_threads) { SessionOptions opts; opts.config.set_intra_op_parallelism_threads(intra_threads); opts.config.set_inter_op_parallelism_threads(1); return opts; } template <typename T> static Graph* Reverse(const TensorShape& shape, int reverse_axis) { Graph* g = new Graph(OpRegistry::Global()); Tensor data(DataTypeToEnum<T>::value, shape); data.flat<T>().setRandom(); Tensor axes(DT_INT32, TensorShape({1})); axes.flat<int32>()(0) = reverse_axis; test::graph::Reverse(g, test::graph::Constant(g, data), test::graph::Constant(g, axes)); return g; } template <typename T> static void RunReverseRowsBenchmark(::testing::benchmark::State& state, int outer_dim, int middle_dim, int intra_threads, int channels) { SessionOptions opts = GetOptions(intra_threads); TensorShape shape{outer_dim, middle_dim, channels}; test::Benchmark("cpu", Reverse<T>(shape, 1), &opts, nullptr, nullptr, "", false) .Run(state); const int64_t num_items = static_cast<int64_t>(state.iterations()) * shape.num_elements(); state.SetItemsProcessed(num_items); state.SetBytesProcessed(num_items * sizeof(T)); } void BM_ReverseRowsOf1Channel_1T_float(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim, 1 , 1 ); } BENCHMARK(BM_ReverseRowsOf1Channel_1T_float) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf1Channel_1T_uint8(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim, 1 , 1 ); } BENCHMARK(BM_ReverseRowsOf1Channel_1T_uint8) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf1Channel_4T_float(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim, 4 , 1 ); } BENCHMARK(BM_ReverseRowsOf1Channel_4T_float) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf1Channel_4T_uint8(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim, 4 , 1 ); } BENCHMARK(BM_ReverseRowsOf1Channel_4T_uint8) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf3Channels_1T_float(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim, 1 , 3 ); } BENCHMARK(BM_ReverseRowsOf3Channels_1T_float) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(30, 30) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf3Channels_1T_uint8(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim, 1 , 3 ); } BENCHMARK(BM_ReverseRowsOf3Channels_1T_uint8) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(30, 30) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf3Channels_4T_float(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim, 4 , 3 ); } BENCHMARK(BM_ReverseRowsOf3Channels_4T_float) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(30, 30) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf3Channels_4T_uint8(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim, 4 , 3 ); } BENCHMARK(BM_ReverseRowsOf3Channels_4T_uint8) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(30, 30) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf4Channels_1T_float(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim, 1 , 4 ); } BENCHMARK(BM_ReverseRowsOf4Channels_1T_float) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf4Channels_1T_uint8(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim, 1 , 4 ); } BENCHMARK(BM_ReverseRowsOf4Channels_1T_uint8) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf4Channels_4T_float(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<float>(state, outer_dim, middle_dim, 4 , 4 ); } BENCHMARK(BM_ReverseRowsOf4Channels_4T_float) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); void BM_ReverseRowsOf4Channels_4T_uint8(::testing::benchmark::State& state) { const int outer_dim = state.range(0); const int middle_dim = state.range(1); RunReverseRowsBenchmark<uint8>(state, outer_dim, middle_dim, 4 , 4 ); } BENCHMARK(BM_ReverseRowsOf4Channels_4T_uint8) ->UseRealTime() ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/reverse_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/reverse_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
daaa7d5d-62e4-4907-b147-eab5061f05d7
cpp
tensorflow/tensorflow
quantized_batch_norm_op
tensorflow/core/kernels/quantized_batch_norm_op.cc
tensorflow/core/kernels/quantized_batch_norm_op_test.cc
#define EIGEN_USE_THREADS #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/quantization_utils.h" namespace tensorflow { namespace { template <typename T1, typename T2> void ReferenceBatchNorm(const Tensor& input, const float input_min, const float input_max, const Tensor& mean, float mean_min, float mean_max, const Tensor& var, float var_min, float var_max, const Tensor& beta, float beta_min, float beta_max, const Tensor& gamma, float gamma_min, float gamma_max, float variance_epsilon, bool scale_after_normalization, Tensor* output, float* output_min, float* output_max) { auto input_flat = input.flat<T1>(); auto mean_flat = mean.flat<T1>(); auto var_flat = var.flat<T1>(); auto beta_flat = beta.flat<T1>(); auto gamma_flat = gamma.flat<T1>(); auto output_flat = output->flat<T2>(); const int depth = mean.dim_size(0); const int row_count = input_flat.size() / depth; *output_min = std::numeric_limits<float>::max(); *output_max = std::numeric_limits<float>::lowest(); for (int pass = 0; pass < 2; ++pass) { const bool is_range_pass = (pass == 0); for (int row_index = 0; row_index < row_count; ++row_index) { for (int channel = 0; channel < depth; ++channel) { const int input_index = (row_index * depth) + channel; const float input_value = QuantizedToFloat(input_flat(input_index), input_min, input_max); const float mean_value = QuantizedToFloat(mean_flat(channel), mean_min, mean_max); const float var_value = QuantizedToFloat(var_flat(channel), var_min, var_max); const float beta_value = QuantizedToFloat(beta_flat(channel), beta_min, beta_max); const float gamma_value = QuantizedToFloat(gamma_flat(channel), gamma_min, gamma_max); float output_value; if (scale_after_normalization) { output_value = (((input_value - mean_value) / sqrtf(var_value + variance_epsilon)) * gamma_value) + beta_value; } else { output_value = ((input_value - mean_value) / sqrtf(var_value + variance_epsilon)) + beta_value; } if (is_range_pass) { *output_min = std::min(output_value, *output_min); *output_max = std::max(output_value, *output_max); } else { output_flat(input_index) = FloatToQuantized<T2>(output_value, *output_min, *output_max); } } } } } template <typename T1, typename T2> void FixedPointBatchNorm(const Tensor& input, const float input_min, const float input_max, const Tensor& mean, float mean_min, float mean_max, const Tensor& var, float var_min, float var_max, const Tensor& beta, float beta_min, float beta_max, const Tensor& gamma, float gamma_min, float gamma_max, float variance_epsilon, bool scale_after_normalization, Tensor* output, float* output_min, float* output_max) { auto input_flat = input.flat<T1>(); auto mean_flat = mean.flat<T1>(); auto var_flat = var.flat<T1>(); auto beta_flat = beta.flat<T1>(); auto gamma_flat = gamma.flat<T1>(); auto output_flat = output->flat<T2>(); const int depth = mean.dim_size(0); const int row_count = input_flat.size() / depth; *output_min = -(1 << 20); *output_max = (1 << 20); Tensor scale_tensor(DataTypeToEnum<T2>::v(), {depth}); auto scale_flat = scale_tensor.flat<T2>(); Tensor offset_tensor(DataTypeToEnum<T2>::v(), {depth}); auto offset_flat = offset_tensor.flat<T2>(); for (int channel = 0; channel < depth; ++channel) { const float mean_value = QuantizedToFloat(mean_flat(channel), mean_min, mean_max); const float var_value = QuantizedToFloat(var_flat(channel), var_min, var_max); const float beta_value = QuantizedToFloat(beta_flat(channel), beta_min, beta_max); const float gamma_value = QuantizedToFloat(gamma_flat(channel), gamma_min, gamma_max); float scale_value; if (scale_after_normalization) { scale_value = (1.0f / sqrtf(var_value + variance_epsilon)) * gamma_value; } else { scale_value = (1.0f / sqrtf(var_value + variance_epsilon)); } const float offset_value = (-mean_value * scale_value) + beta_value; scale_flat(channel) = FloatToQuantized<T2>(scale_value, *output_min, *output_max); offset_flat(channel) = FloatToQuantized<T2>(offset_value, *output_min, *output_max); } const T2 one_in_output_space = FloatToQuantized<T2>(1.0f, *output_min, *output_max); for (int row_index = 0; row_index < row_count; ++row_index) { for (int channel = 0; channel < depth; ++channel) { const int input_index = (row_index * depth) + channel; const T2 input_value = RequantizeInNewRange<T1, T2>(input_flat(input_index), input_min, input_max, *output_min, *output_max); const T2 scale_value = scale_flat(channel); const T2 offset_value = offset_flat(channel); const T2 output_value = ((input_value * scale_value) / one_in_output_space) + offset_value; output_flat(input_index) = output_value; } } } } template <typename T1, typename T2> class QuantizedBatchNormOp : public OpKernel { public: explicit QuantizedBatchNormOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("variance_epsilon", &variance_epsilon_)); OP_REQUIRES_OK(context, context->GetAttr("scale_after_normalization", &scale_after_normalization_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const auto& input_min_tensor = context->input(1); OP_REQUIRES(context, input_min_tensor.NumElements() == 1, errors::InvalidArgument("input_min must have 1 element")); const float input_min = input_min_tensor.flat<float>()(0); const auto& input_max_tensor = context->input(2); OP_REQUIRES(context, input_max_tensor.NumElements() == 1, errors::InvalidArgument("input_max must have 1 element")); const float input_max = input_max_tensor.flat<float>()(0); const Tensor& mean = context->input(3); const auto& mean_min_tensor = context->input(4); OP_REQUIRES(context, mean_min_tensor.NumElements() == 1, errors::InvalidArgument("mean_min must have 1 element")); const float mean_min = mean_min_tensor.flat<float>()(0); const auto& mean_max_tensor = context->input(5); OP_REQUIRES(context, mean_max_tensor.NumElements() == 1, errors::InvalidArgument("mean_max must have 1 element")); const float mean_max = mean_max_tensor.flat<float>()(0); const Tensor& var = context->input(6); const auto& var_min_tensor = context->input(7); OP_REQUIRES(context, var_min_tensor.NumElements() == 1, errors::InvalidArgument("var_min must have 1 element")); const float var_min = var_min_tensor.flat<float>()(0); const auto& var_max_tensor = context->input(8); OP_REQUIRES(context, var_max_tensor.NumElements() == 1, errors::InvalidArgument("var_max must have 1 element")); const float var_max = var_max_tensor.flat<float>()(0); const Tensor& beta = context->input(9); const auto& beta_min_tensor = context->input(10); OP_REQUIRES(context, beta_min_tensor.NumElements() == 1, errors::InvalidArgument("beta_min must have 1 element")); const float beta_min = beta_min_tensor.flat<float>()(0); const auto& beta_max_tensor = context->input(11); OP_REQUIRES(context, beta_max_tensor.NumElements() == 1, errors::InvalidArgument("beta_max must have 1 element")); const float beta_max = beta_max_tensor.flat<float>()(0); const Tensor& gamma = context->input(12); const auto& gamma_min_tensor = context->input(13); OP_REQUIRES(context, gamma_min_tensor.NumElements() == 1, errors::InvalidArgument("gamma_min must have 1 element")); const float gamma_min = gamma_min_tensor.flat<float>()(0); const auto& gamma_max_tensor = context->input(14); OP_REQUIRES(context, gamma_max_tensor.NumElements() == 1, errors::InvalidArgument("gamma_max must have 1 element")); const float gamma_max = gamma_max_tensor.flat<float>()(0); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", input.shape().DebugString())); OP_REQUIRES(context, mean.dims() == 1, errors::InvalidArgument("mean must be 1-dimensional", mean.shape().DebugString())); OP_REQUIRES(context, var.dims() == 1, errors::InvalidArgument("var must be 1-dimensional", var.shape().DebugString())); OP_REQUIRES(context, beta.dims() == 1, errors::InvalidArgument("beta must be 1-dimensional", beta.shape().DebugString())); OP_REQUIRES(context, gamma.dims() == 1, errors::InvalidArgument("gamma must be 1-dimensional", gamma.shape().DebugString())); OP_REQUIRES(context, mean.NumElements() > 1, errors::InvalidArgument("Must have at least a mean value", gamma.shape().DebugString())); OP_REQUIRES(context, mean.NumElements() > 1, errors::InvalidArgument("Must have at least a mean value")); const auto last_dim = input.shape().dims() - 1; OP_REQUIRES(context, mean.shape().dim_size(0) == input.shape().dim_size(last_dim), errors::InvalidArgument("Must provide as many means as the " "last dimension of the input tensor: ", mean.shape().DebugString(), " vs. ", input.shape().DebugString())); OP_REQUIRES( context, mean.shape().dim_size(0) == var.shape().dim_size(0), errors::InvalidArgument( "Mean and variance tensors must have the same shape: ", mean.shape().DebugString(), " vs. ", var.shape().DebugString())); OP_REQUIRES( context, mean.shape().dim_size(0) == beta.shape().dim_size(0), errors::InvalidArgument( "Mean and beta tensors must have the same shape: ", mean.shape().DebugString(), " vs. ", beta.shape().DebugString())); OP_REQUIRES( context, mean.shape().dim_size(0) == gamma.shape().dim_size(0), errors::InvalidArgument( "Mean and gamma tensors must have the same shape: ", mean.shape().DebugString(), " vs. ", gamma.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); float output_min; float output_max; FixedPointBatchNorm<T1, T2>(input, input_min, input_max, mean, mean_min, mean_max, var, var_min, var_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, variance_epsilon_, scale_after_normalization_, output, &output_min, &output_max); Tensor* output_min_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min_tensor)); output_min_tensor->flat<float>()(0) = output_min; Tensor* output_max_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max_tensor)); output_max_tensor->flat<float>()(0) = output_max; } private: float variance_epsilon_; bool scale_after_normalization_; }; REGISTER_KERNEL_BUILDER(Name("QuantizedBatchNormWithGlobalNormalization") .Device(DEVICE_CPU) .TypeConstraint<quint8>("Tinput") .TypeConstraint<qint32>("out_type"), QuantizedBatchNormOp<quint8, qint32>); }
#define EIGEN_USE_THREADS #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/batch_norm_op.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { using QuantizedBatchNormOpTest = OpsTestBase; TEST_F(QuantizedBatchNormOpTest, Simple) { TF_EXPECT_OK(NodeDefBuilder("quantized_batch_norm_op", "QuantizedBatchNormWithGlobalNormalization") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("scale_after_normalization", false) .Attr("variance_epsilon", 0.001) .Attr("Tinput", DT_QUINT8) .Attr("out_type", DT_QINT32) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = -128.0f; const float input_max = 127.0f; const int input_batch = 1; const int input_height = 1; const int input_width = 6; const int input_depth = 2; Tensor input_float(DT_FLOAT, {input_batch, input_height, input_width, input_depth}); test::FillValues<float>(&input_float, {1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); const float mean_min = 0.0f; const float mean_max = 20.0f; Tensor mean_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&mean_float, {10, 20}); Tensor mean_quantized = FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); const float variance_min = 0.0f; const float variance_max = 1.0f; Tensor variance_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&variance_float, {0.25, 0.5}); Tensor variance_quantized = FloatTensorToQuantized<quint8>( variance_float, variance_min, variance_max); const float beta_min = 0.0f; const float beta_max = 1.0f; Tensor beta_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&beta_float, {0.1, 0.6}); Tensor beta_quantized = FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); const float gamma_min = 0.0f; const float gamma_max = 1.0f; Tensor gamma_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&gamma_float, {0.0, 0.0}); Tensor gamma_quantized = FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {input_min}); AddInputFromArray<float>(TensorShape({1}), {input_max}); AddInputFromArray<quint8>(mean_quantized.shape(), mean_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {mean_min}); AddInputFromArray<float>(TensorShape({1}), {mean_max}); AddInputFromArray<quint8>(variance_quantized.shape(), variance_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {variance_min}); AddInputFromArray<float>(TensorShape({1}), {variance_max}); AddInputFromArray<quint8>(beta_quantized.shape(), beta_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {beta_min}); AddInputFromArray<float>(TensorShape({1}), {beta_max}); AddInputFromArray<quint8>(gamma_quantized.shape(), gamma_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {gamma_min}); AddInputFromArray<float>(TensorShape({1}), {gamma_max}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_float( allocator(), DT_FLOAT, TensorShape({input_batch, input_height, input_width, input_depth})); test::FillValues<float>( &expected_float, {-17.86, -22.00, -15.87, -20.59, -13.87, -19.18, -21.86, -33.31, -23.85, -34.72, -25.85, -36.13}); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.1); } TEST_F(QuantizedBatchNormOpTest, SameAsFloat) { TF_EXPECT_OK(NodeDefBuilder("quantized_batch_norm_op", "QuantizedBatchNormWithGlobalNormalization") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("scale_after_normalization", false) .Attr("variance_epsilon", 0.001) .Attr("Tinput", DT_QUINT8) .Attr("out_type", DT_QINT32) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); const float input_min = -128.0f; const float input_max = 127.0f; const int input_batch = 1; const int input_height = 1; const int input_width = 6; const int input_depth = 2; Tensor input_float(DT_FLOAT, {input_batch, input_height, input_width, input_depth}); test::FillValues<float>(&input_float, {1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6}); Tensor input_quantized = FloatTensorToQuantized<quint8>(input_float, input_min, input_max); const float mean_min = 0.0f; const float mean_max = 20.0f; Tensor mean_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&mean_float, {10, 20}); Tensor mean_quantized = FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); const float variance_min = 0.0f; const float variance_max = 1.0f; Tensor variance_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&variance_float, {0.25, 0.5}); Tensor variance_quantized = FloatTensorToQuantized<quint8>( variance_float, variance_min, variance_max); const float beta_min = 0.0f; const float beta_max = 1.0f; Tensor beta_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&beta_float, {0.1, 0.6}); Tensor beta_quantized = FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); const float gamma_min = 0.0f; const float gamma_max = 1.0f; Tensor gamma_float(DT_FLOAT, {input_depth}); test::FillValues<float>(&gamma_float, {0.0, 0.0}); Tensor gamma_quantized = FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); AddInputFromArray<quint8>(input_quantized.shape(), input_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {input_min}); AddInputFromArray<float>(TensorShape({1}), {input_max}); AddInputFromArray<quint8>(mean_quantized.shape(), mean_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {mean_min}); AddInputFromArray<float>(TensorShape({1}), {mean_max}); AddInputFromArray<quint8>(variance_quantized.shape(), variance_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {variance_min}); AddInputFromArray<float>(TensorShape({1}), {variance_max}); AddInputFromArray<quint8>(beta_quantized.shape(), beta_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {beta_min}); AddInputFromArray<float>(TensorShape({1}), {beta_max}); AddInputFromArray<quint8>(gamma_quantized.shape(), gamma_quantized.flat<quint8>()); AddInputFromArray<float>(TensorShape({1}), {gamma_min}); AddInputFromArray<float>(TensorShape({1}), {gamma_max}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_float( allocator(), DT_FLOAT, TensorShape({input_batch, input_height, input_width, input_depth})); thread::ThreadPool threadpool(Env::Default(), "test", 1); Eigen::ThreadPoolDevice eigen_cpu_device(threadpool.AsEigenThreadPool(), 1); const Tensor& const_input_float = input_float; const Tensor& const_mean_float = mean_float; const Tensor& const_variance_float = variance_float; const Tensor& const_beta_float = beta_float; const Tensor& const_gamma_float = gamma_float; functor::BatchNorm<Eigen::ThreadPoolDevice, float>()( eigen_cpu_device, const_input_float.tensor<float, 4>(), const_mean_float.vec<float>(), const_variance_float.vec<float>(), const_beta_float.vec<float>(), const_gamma_float.vec<float>(), 0.001, false, expected_float.tensor<float, 4>()); const Tensor& output_quantized = *GetOutput(0); const float output_min = GetOutput(1)->flat<float>()(0); const float output_max = GetOutput(2)->flat<float>()(0); Tensor output_float = QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); test::ExpectTensorNear<float>(expected_float, output_float, 0.1); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_batch_norm_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_batch_norm_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cbc1f255-a4f1-468b-93fc-5e815aab1aa2
cpp
tensorflow/tensorflow
spectrogram_op
tensorflow/core/kernels/spectrogram_op.cc
tensorflow/core/kernels/spectrogram_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/spectrogram.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { class AudioSpectrogramOp : public OpKernel { public: explicit AudioSpectrogramOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("window_size", &window_size_)); OP_REQUIRES_OK(context, context->GetAttr("stride", &stride_)); OP_REQUIRES_OK(context, context->GetAttr("magnitude_squared", &magnitude_squared_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); OP_REQUIRES(context, input.dims() == 2, errors::InvalidArgument("input must be 2-dimensional", input.shape().DebugString())); Spectrogram spectrogram; OP_REQUIRES(context, spectrogram.Initialize(window_size_, stride_), errors::InvalidArgument( "Spectrogram initialization failed for window size ", window_size_, " and stride ", stride_)); const auto input_as_matrix = input.matrix<float>(); const int64_t sample_count = input.dim_size(0); const int64_t channel_count = input.dim_size(1); const int64_t output_width = spectrogram.output_frequency_channels(); const int64_t length_minus_window = (sample_count - window_size_); int64_t output_height; if (length_minus_window < 0) { output_height = 0; } else { output_height = 1 + (length_minus_window / stride_); } const int64_t output_slices = channel_count; Tensor* output_tensor = nullptr; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({output_slices, output_height, output_width}), &output_tensor)); auto output_flat = output_tensor->flat<float>().data(); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { OP_REQUIRES(context, spectrogram.Reset(), errors::InvalidArgument("Failed to Reset()")); float* output_slice = output_flat + (channel * output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_as_matrix(i, channel); } std::vector<std::vector<float>> spectrogram_output; OP_REQUIRES(context, spectrogram.ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output), errors::InvalidArgument("Spectrogram compute failed")); OP_REQUIRES(context, (spectrogram_output.size() == output_height), errors::InvalidArgument( "Spectrogram size calculation failed: Expected height ", output_height, " but got ", spectrogram_output.size())); OP_REQUIRES(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width), errors::InvalidArgument( "Spectrogram size calculation failed: Expected width ", output_width, " but got ", spectrogram_output[0].size())); for (int row_index = 0; row_index < output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; DCHECK_EQ(spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (magnitude_squared_) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } } private: int32 window_size_; int32 stride_; bool magnitude_squared_; }; REGISTER_KERNEL_BUILDER(Name("AudioSpectrogram").Device(DEVICE_CPU), AudioSpectrogramOp); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/audio_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" namespace tensorflow { namespace ops { namespace { TEST(SpectrogramOpTest, SimpleTest) { Scope root = Scope::NewRootScope(); Tensor audio_tensor(DT_FLOAT, TensorShape({8, 1})); test::FillValues<float>(&audio_tensor, {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f}); Output audio_const_op = Const(root.WithOpName("audio_const_op"), Input::Initializer(audio_tensor)); AudioSpectrogram spectrogram_op = AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op, 8, 1); TF_ASSERT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {spectrogram_op.spectrogram}, &outputs)); const Tensor& spectrogram_tensor = outputs[0]; EXPECT_EQ(3, spectrogram_tensor.dims()); EXPECT_EQ(5, spectrogram_tensor.dim_size(2)); EXPECT_EQ(1, spectrogram_tensor.dim_size(1)); EXPECT_EQ(1, spectrogram_tensor.dim_size(0)); test::ExpectTensorNear<float>( spectrogram_tensor, test::AsTensor<float>({0, 1, 2, 1, 0}, TensorShape({1, 1, 5})), 1e-3); } TEST(SpectrogramOpTest, SquaredTest) { Scope root = Scope::NewRootScope(); Tensor audio_tensor(DT_FLOAT, TensorShape({8, 1})); test::FillValues<float>(&audio_tensor, {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f}); Output audio_const_op = Const(root.WithOpName("audio_const_op"), Input::Initializer(audio_tensor)); AudioSpectrogram spectrogram_op = AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op, 8, 1, AudioSpectrogram::Attrs().MagnitudeSquared(true)); TF_ASSERT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {spectrogram_op.spectrogram}, &outputs)); const Tensor& spectrogram_tensor = outputs[0]; EXPECT_EQ(3, spectrogram_tensor.dims()); EXPECT_EQ(5, spectrogram_tensor.dim_size(2)); EXPECT_EQ(1, spectrogram_tensor.dim_size(1)); EXPECT_EQ(1, spectrogram_tensor.dim_size(0)); test::ExpectTensorNear<float>( spectrogram_tensor, test::AsTensor<float>({0, 1, 4, 1, 0}, TensorShape({1, 1, 5})), 1e-3); } TEST(SpectrogramOpTest, MultichannelTest) { Scope root = Scope::NewRootScope(); const int audio_size = 8; const int channel_size = 2; Tensor audio_tensor(DT_FLOAT, TensorShape({audio_size, channel_size})); test::FillValues<float>( &audio_tensor, {-1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, -1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f}); Output audio_const_op = Const(root.WithOpName("audio_const_op"), Input::Initializer(audio_tensor)); AudioSpectrogram spectrogram_op = AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op, audio_size, channel_size); TF_ASSERT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {spectrogram_op.spectrogram}, &outputs)); const Tensor& spectrogram_tensor = outputs[0]; EXPECT_EQ(3, spectrogram_tensor.dims()); EXPECT_EQ(5, spectrogram_tensor.dim_size(2)); EXPECT_EQ(1, spectrogram_tensor.dim_size(1)); EXPECT_EQ(channel_size, spectrogram_tensor.dim_size(0)); for (int channel = 0; channel < channel_size; channel++) { test::ExpectTensorNear<float>( spectrogram_tensor.SubSlice(channel), test::AsTensor<float>({0, 1, 2, 1, 0}, TensorShape({1, 5})), 1e-3); } } TEST(SpectrogramOpTest, InvalidWindowSize) { Scope root = Scope::NewRootScope(); const int audio_size = 8; const int channel_size = 2; Tensor audio_tensor(DT_FLOAT, TensorShape({audio_size, channel_size})); test::FillValues<float>( &audio_tensor, {-1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, -1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f}); Output audio_const_op = Const(root.WithOpName("audio_const_op"), Input::Initializer(audio_tensor)); AudioSpectrogram spectrogram_op = AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op, 1, 1); EXPECT_THAT(root.status(), tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("window size"))); } TEST(SpectrogramOpTest, InvalidStride) { Scope root = Scope::NewRootScope(); const int audio_size = 8; const int channel_size = 2; Tensor audio_tensor(DT_FLOAT, TensorShape({audio_size, channel_size})); test::FillValues<float>( &audio_tensor, {-1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, -1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f}); Output audio_const_op = Const(root.WithOpName("audio_const_op"), Input::Initializer(audio_tensor)); AudioSpectrogram spectrogram_op = AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op, 2, 0); EXPECT_THAT(root.status(), tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("stride"))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/spectrogram_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/spectrogram_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0f3bf028-d254-4a1a-870f-1bc70640aead
cpp
tensorflow/tensorflow
batch_kernels
tensorflow/core/kernels/batch_kernels.cc
tensorflow/core/kernels/batch_kernels_test.cc
#include "tensorflow/core/kernels/batch_kernels.h" #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h" #include "tensorflow/core/kernels/batching_util/batch_resource_base.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h" #include "tensorflow/core/kernels/batching_util/bounded_executor.h" #include "tensorflow/core/kernels/batching_util/concat_split_util.h" #include "tensorflow/core/kernels/batching_util/periodic_function.h" #include "tensorflow/core/kernels/batching_util/warmup.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/monitoring/gauge.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/numbers.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/threadpool.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace { constexpr char kEnableAdaptiveSchedulerAttr[] = "_enable_adaptive_scheduler"; constexpr char kMinInflightBatchesAttr[] = "_min_inflight_batches"; constexpr char kInitialInflightBatchesAttr[] = "_initial_inflight_batches"; constexpr char kMaxInflightBatchesAttr[] = "_max_inflight_batches"; constexpr char kBatchesToAverageOverAttr[] = "_batches_to_average_over"; constexpr char kFullBatchSchedulingBoostMicros[] = "_full_batch_scheduling_boost_micros"; constexpr int64_t kBatchThreadPoolSize = 128; } const int64_t kMinInflightBatches = 1; const int64_t kInitialInflightBatches = 2; const int64_t kBatchesToAverageOver = 10; const int64_t kMaxInflightBatches = 64; void RecordBatchSplitUsage( std::optional<bool> maybe_enable_large_batch_splitting, absl::string_view model_name) { static auto* cell = monitoring::Gauge<std::string, 1>::New( "/tensorflow/serving/batching/enable_large_batch_splitting", "Tracks the usage of attribute `enable_large_batch_splitting` for " "BatchFunction kernel in a saved model.", "model_name"); if (maybe_enable_large_batch_splitting.has_value()) { if (maybe_enable_large_batch_splitting.value()) { cell->GetCell(std::string(model_name))->Set("true"); } else { cell->GetCell(std::string(model_name))->Set("false"); } } else { cell->GetCell(std::string(model_name))->Set("unset"); } } void RecordBatchParamNumBatchThreads(int64_t num_batch_threads, absl::string_view model_name) { static auto* cell = monitoring::Gauge<int64_t, 1>::New( "/tensorflow/serving/batching/num_batch_threads", "Tracks the number of batch threads of a model.", "model_name"); cell->GetCell(std::string(model_name))->Set(num_batch_threads); } absl::string_view GetModelName(OpKernelContext* ctx) { if (ctx->session_metadata() == nullptr || ctx->session_metadata()->name().empty()) { return "model_name_unset"; } return ctx->session_metadata()->name(); } using ::tensorflow::concat_split_util::Concat; using ::tensorflow::concat_split_util::Split; int32 NumBatchThreadsFromEnvironmentWithDefault(int default_num_batch_threads) { int32_t num; const char* val = std::getenv("TF_NUM_BATCH_THREADS"); return (val && strings::safe_strto32(val, &num)) ? num : default_num_batch_threads; } static thread::ThreadPool* GetOrCreateBatchThreadsPool() { static thread::ThreadPool* shared_thread_pool = [&]() -> thread::ThreadPool* { serving::BoundedExecutor::Options options; options.num_threads = NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize); options.thread_name = std::string("adaptive_batch_threads"); auto status_or_executor = serving::BoundedExecutor::Create(options); if (!status_or_executor.ok()) { LOG(WARNING) << "Failed to create a batch threads pool with error " << status_or_executor.status(); return nullptr; } static serving::BoundedExecutor* executor = status_or_executor.value().release(); return new thread::ThreadPool(executor); }(); return shared_thread_pool; } class BatchResource : public serving::BatchResourceBase { public: struct BatchTask : serving::BatchResourceBase::BatchTask { FunctionLibraryRuntime::Handle fhandle; explicit BatchTask(FunctionLibraryRuntime::Handle fhandle) : fhandle(fhandle) {} protected: std::unique_ptr<serving::BatchResourceBase::BatchTask> CreateDerivedTask() override { return std::make_unique<BatchTask>(fhandle); } }; static Status Create(bool has_process_batch_function, int32_t num_batch_threads, int32_t max_execution_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, const std::vector<int32>& allowed_batch_sizes, bool enable_large_batch_splitting, std::unique_ptr<BatchResource>* resource) { return Create(has_process_batch_function, num_batch_threads, max_execution_batch_size, batch_timeout_micros, max_enqueued_batches, allowed_batch_sizes, 0, 0, 0, {}, serving::MixedPriorityBatchingPolicy:: kLowPriorityPaddingWithMaxBatchSize, enable_large_batch_splitting, "PAD_UP", resource); } static Status Create( bool has_process_batch_function, int32_t num_batch_threads, int32_t max_execution_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, const std::vector<int32>& allowed_batch_sizes, int32_t low_priority_max_batch_size, int32_t low_priority_batch_timeout_micros, int32_t low_priority_max_enqueued_batches, const std::vector<int32>& low_priority_allowed_batch_sizes, serving::MixedPriorityBatchingPolicy mixed_priority_batching_policy, bool enable_large_batch_splitting, absl::string_view batch_padding_policy, std::unique_ptr<BatchResource>* resource) { BatcherT::Options batcher_options; batcher_options.num_batch_threads = num_batch_threads; std::shared_ptr<BatcherT> batcher; TF_RETURN_IF_ERROR(BatcherT::Create(batcher_options, &batcher)); resource->reset(new BatchResource( has_process_batch_function, std::move(batcher), GetBatcherQueueOptions( num_batch_threads, max_execution_batch_size, batch_timeout_micros, max_enqueued_batches, allowed_batch_sizes, enable_large_batch_splitting, false, batch_padding_policy, low_priority_max_batch_size, low_priority_batch_timeout_micros, low_priority_max_enqueued_batches, low_priority_allowed_batch_sizes, mixed_priority_batching_policy), allowed_batch_sizes)); return absl::OkStatus(); } static Status Create( bool has_process_batch_function, AdaptiveBatcherT::Options adaptive_shared_batch_scheduler_options, int32_t max_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, const std::vector<int32>& allowed_batch_sizes, std::unique_ptr<BatchResource>* resource) { std::shared_ptr<AdaptiveBatcherT> batcher; TF_RETURN_IF_ERROR(AdaptiveBatcherT::Create( adaptive_shared_batch_scheduler_options, &batcher)); resource->reset(new BatchResource( has_process_batch_function, std::move(batcher), GetAdaptiveBatcherQueueOptions( max_batch_size, batch_timeout_micros, max_enqueued_batches, true, allowed_batch_sizes, false), allowed_batch_sizes)); return absl::OkStatus(); } string DebugString() const final { return "BatchResource"; } private: BatchResource(bool has_process_batch_function, std::shared_ptr<BatcherT> batcher, const BatcherT::QueueOptions& batcher_queue_options, std::vector<int32> allowed_batch_sizes) : BatchResourceBase(has_process_batch_function, std::move(batcher), batcher_queue_options, std::move(allowed_batch_sizes)) {} BatchResource(bool has_process_batch_function, std::shared_ptr<AdaptiveBatcherT> batcher, const AdaptiveBatcherT::QueueOptions& batcher_queue_options, std::vector<int32> allowed_batch_sizes) : BatchResourceBase(has_process_batch_function, std::move(batcher), batcher_queue_options, std::move(allowed_batch_sizes)) {} void ProcessFuncBatchImpl( const serving::BatchResourceBase::BatchTask& last_task, absl::Span<const Tensor> inputs, std::vector<Tensor>* combined_outputs, std::function<void(const Status&)> done) const override { auto* last_task_context = last_task.context; FunctionLibraryRuntime::Options opts; opts.step_container = last_task_context->step_container(); opts.cancellation_manager = last_task_context->cancellation_manager(); opts.collective_executor = last_task_context->collective_executor(); opts.stats_collector = last_task_context->stats_collector(); opts.runner = last_task_context->runner(); opts.run_all_kernels_inline = last_task_context->run_all_kernels_inline(); Notification done_notif; auto* flib = last_task_context->function_library(); FunctionLibraryRuntime::Handle fhandle = down_cast<const BatchTask&>(last_task).fhandle; flib->Run(opts, fhandle, inputs, combined_outputs, [&](const Status& run_status) { done(run_status); done_notif.Notify(); }); done_notif.WaitForNotification(); } }; BatchFunctionKernel::BatchFunctionKernel(OpKernelConstruction* c) : AsyncOpKernel(c) { OP_REQUIRES_OK(c, c->GetAttr("container", &container_)); OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_)); OP_REQUIRES_OK(c, c->GetAttr("batching_queue", &batcher_queue_)); OP_REQUIRES_OK(c, c->GetAttr("num_batch_threads", &num_batch_threads_)); OP_REQUIRES_OK(c, c->GetAttr("max_batch_size", &max_batch_size_)); OP_REQUIRES_OK(c, c->GetAttr("batch_timeout_micros", &batch_timeout_micros_)); OP_REQUIRES_OK(c, c->GetAttr("max_enqueued_batches", &max_enqueued_batches_)); OP_REQUIRES_OK(c, c->GetAttr("allowed_batch_sizes", &allowed_batch_sizes_)); OP_REQUIRES_OK(c, c->GetAttr("low_priority_max_batch_size", &low_priority_max_batch_size_)); OP_REQUIRES_OK(c, c->GetAttr("low_priority_batch_timeout_micros", &low_priority_batch_timeout_micros_)); OP_REQUIRES_OK(c, c->GetAttr("low_priority_allowed_batch_sizes", &low_priority_allowed_batch_sizes_)); OP_REQUIRES_OK(c, c->GetAttr("low_priority_max_enqueued_batches", &low_priority_max_enqueued_batches_)); OP_REQUIRES_OK(c, c->GetAttr("mixed_priority_policy", &mixed_priority_policy_)); OP_REQUIRES_OK(c, c->GetAttr("batch_padding_policy", &batch_padding_policy_)); OP_REQUIRES_OK(c, c->GetAttr("f", &func_)); if (c->HasAttr("enable_large_batch_splitting")) { OP_REQUIRES_OK(c, c->GetAttr("enable_large_batch_splitting", &enable_large_batch_splitting_)); has_attribute_enable_large_batch_splitting_ = true; } SetAdaptiveBatchSchedulerOptions(c, num_batch_threads_); if (!c->status().ok()) { return; } if (enable_adaptive_batch_threads_) { batcher_queue_ = name() + "/" + shared_name_ + batcher_queue_; } if (shared_name_.empty()) { shared_name_ = name(); } OP_REQUIRES_OK(c, ValidateAllowedBatchSizes()); } bool BatchFunctionKernel::IsExpensive() { return false; } void BatchFunctionKernel::ComputeAsync(OpKernelContext* c, DoneCallback done) { RecordBatchSplitUsage(has_attribute_enable_large_batch_splitting_ ? std::make_optional(enable_large_batch_splitting_) : std::nullopt, GetModelName(c)); RecordBatchParamNumBatchThreads(num_batch_threads_, GetModelName(c)); std::function<Status(BatchResource**)> creator; FunctionLibraryRuntime::Handle handle; OP_REQUIRES_OK_ASYNC(c, GetOrCreateFunctionHandle(c, &handle), done); if (adaptive_batch_scheduler_options_ != std::nullopt) { creator = [this, session_metadata = c->session_metadata()](BatchResource** r) { serving::AdaptiveSharedBatchScheduler< serving::BatchResourceBase::BatchTask>::Options adaptive_shared_batch_scheduler_options; adaptive_shared_batch_scheduler_options.thread_pool_name = "adaptive_batch_threads"; adaptive_shared_batch_scheduler_options.thread_pool = GetOrCreateBatchThreadsPool(); adaptive_shared_batch_scheduler_options.num_batch_threads = std::min( NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize), adaptive_batch_scheduler_options_->max_in_flight_batches_limit); adaptive_shared_batch_scheduler_options.min_in_flight_batches_limit = std::min( NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize), adaptive_batch_scheduler_options_->min_in_flight_batches_limit); adaptive_shared_batch_scheduler_options .initial_in_flight_batches_limit = std::min( NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize), adaptive_batch_scheduler_options_->initial_in_flight_batches_limit); adaptive_shared_batch_scheduler_options.batches_to_average_over = adaptive_batch_scheduler_options_->batches_to_average_over; if (adaptive_batch_scheduler_options_ ->full_batch_scheduling_boost_micros != -1) { adaptive_shared_batch_scheduler_options .full_batch_scheduling_boost_micros = adaptive_batch_scheduler_options_ ->full_batch_scheduling_boost_micros; adaptive_shared_batch_scheduler_options.fifo_scheduling = false; } else { adaptive_shared_batch_scheduler_options.fifo_scheduling = true; } std::unique_ptr<BatchResource> new_resource; TF_RETURN_IF_ERROR(BatchResource::Create( true, adaptive_shared_batch_scheduler_options, max_batch_size_, batch_timeout_micros_, max_enqueued_batches_, allowed_batch_sizes_, &new_resource)); if (session_metadata) { new_resource->set_session_metadata(*session_metadata); } *r = new_resource.release(); return absl::OkStatus(); }; } else { creator = [this, session_metadata = c->session_metadata()](BatchResource** r) { TF_ASSIGN_OR_RETURN( serving::MixedPriorityBatchingPolicy mixed_priority_batching_policy, serving::GetMixedPriorityBatchingPolicy(mixed_priority_policy_)); std::unique_ptr<BatchResource> new_resource; TF_RETURN_IF_ERROR(BatchResource::Create( true, num_batch_threads_, max_batch_size_, batch_timeout_micros_, max_enqueued_batches_, allowed_batch_sizes_, low_priority_max_batch_size_, low_priority_batch_timeout_micros_, low_priority_max_enqueued_batches_, low_priority_allowed_batch_sizes_, mixed_priority_batching_policy, enable_large_batch_splitting_, batch_padding_policy_, &new_resource)); if (session_metadata) { new_resource->set_session_metadata(*session_metadata); } *r = new_resource.release(); return absl::OkStatus(); }; } BatchResource* br; OP_REQUIRES_OK_ASYNC(c, c->resource_manager()->LookupOrCreate( container_, shared_name_, &br, creator), done); const uint64_t guid = random::New64(); auto create_batch_task_fn = [handle]() -> absl::StatusOr< std::unique_ptr<serving::BatchResourceBase::BatchTask>> { return {std::make_unique<BatchResource::BatchTask>(handle)}; }; Status status; if (serving::ShouldWarmupAllBatchSizes(c)) { status = br->RegisterWarmupInputs(guid, c, batcher_queue_, create_batch_task_fn, done); } else { status = br->RegisterInput(guid, c, batcher_queue_, create_batch_task_fn, done); } br->Unref(); OP_REQUIRES_OK_ASYNC(c, status, done); } Status BatchFunctionKernel::InstantiateFunction( OpKernelContext* c, FunctionLibraryRuntime::Handle* handle) const { FunctionLibraryRuntime* flib = c->function_library(); if (!flib) { return errors::Internal("No function library"); } FunctionLibraryRuntime::InstantiateOptions opts; opts.target = flib->device() == nullptr ? "" : flib->device()->name(); opts.is_multi_device_function = true; const ConfigProto* config = flib->config_proto(); if (config) { opts.config_proto = *config; } Device* cpu_device; TF_RETURN_IF_ERROR(flib->device_mgr()->LookupDevice("CPU:0", &cpu_device)); const FunctionDef* fdef = flib->GetFunctionLibraryDefinition()->Find(func_.name()); if (!fdef) { return errors::NotFound("Failed to find definition for function \"", func_.name(), "\""); } OpInputList in_tensors; TF_RETURN_IF_ERROR(c->input_list("in_tensors", &in_tensors)); for (int i = 0; i < in_tensors.size(); i++) { if (in_tensors[i].dtype() == DT_RESOURCE) { return errors::InvalidArgument( "BatchFunction cannot take resource inputs but input ", i, " is a resource."); } else { opts.input_devices.push_back(cpu_device->name()); } } OpInputList captured_tensors; TF_RETURN_IF_ERROR(c->input_list("captured_tensors", &captured_tensors)); for (const Tensor& t : captured_tensors) { if (t.dtype() == DT_RESOURCE) { const ResourceHandle& rhandle = t.flat<ResourceHandle>()(0); opts.input_devices.push_back(rhandle.device()); } else { opts.input_devices.push_back(cpu_device->name()); } } const OpDef& signature = fdef->signature(); for (int i = 0; i < signature.output_arg_size(); i++) { opts.output_devices.push_back(cpu_device->name()); } if (opts.input_devices.size() != signature.input_arg_size()) { return errors::InvalidArgument( "Function takes ", signature.input_arg_size(), " argument(s) but ", opts.input_devices.size(), " argument(s) were passed"); } return flib->Instantiate(func_.name(), AttrSlice(&func_.attr()), opts, handle); } Status BatchFunctionKernel::GetOrCreateFunctionHandle( OpKernelContext* c, FunctionLibraryRuntime::Handle* handle) { mutex_lock ml(mu_); if (!fhandle_) { TF_RETURN_IF_ERROR(InstantiateFunction(c, handle)); fhandle_ = *handle; } else { *handle = fhandle_.value(); } return absl::OkStatus(); } Status BatchFunctionKernel::ValidateAllowedBatchSizes() const { if (allowed_batch_sizes_.empty()) { return absl::OkStatus(); } int32_t last_size = 0; for (size_t i = 0; i < allowed_batch_sizes_.size(); ++i) { const int32_t size = allowed_batch_sizes_.at(i); if (i > 0 && size <= last_size) { return errors::InvalidArgument( "allowed_batch_sizes entries must be monotonically increasing"); } if ((!enable_large_batch_splitting_) && (i == allowed_batch_sizes_.size() - 1) && (size != max_batch_size_)) { return errors::InvalidArgument( "final entry in allowed_batch_sizes must equal max_batch_size when " "enable_large_batch_splitting is False"); } last_size = size; } return absl::OkStatus(); } void BatchFunctionKernel::SetAdaptiveBatchSchedulerOptions( OpKernelConstruction* c, int32_t num_batch_threads) { if (c->HasAttr(kEnableAdaptiveSchedulerAttr)) { OP_REQUIRES_OK(c, c->GetAttr(kEnableAdaptiveSchedulerAttr, &enable_adaptive_batch_threads_)); } if (num_batch_threads <= 0) { enable_adaptive_batch_threads_ = true; } if (!enable_adaptive_batch_threads_) { return; } AdaptiveBatchSchedulerOptions options; if (c->HasAttr(kBatchesToAverageOverAttr)) { OP_REQUIRES_OK(c, c->GetAttr(kBatchesToAverageOverAttr, &options.batches_to_average_over)); } if (c->HasAttr(kMinInflightBatchesAttr)) { OP_REQUIRES_OK(c, c->GetAttr(kMinInflightBatchesAttr, &options.min_in_flight_batches_limit)); } if (c->HasAttr(kInitialInflightBatchesAttr)) { OP_REQUIRES_OK(c, c->GetAttr(kInitialInflightBatchesAttr, &options.initial_in_flight_batches_limit)); } if (c->HasAttr(kMaxInflightBatchesAttr)) { OP_REQUIRES_OK(c, c->GetAttr(kMaxInflightBatchesAttr, &options.max_in_flight_batches_limit)); } if (c->HasAttr(kFullBatchSchedulingBoostMicros)) { OP_REQUIRES_OK(c, c->GetAttr(kFullBatchSchedulingBoostMicros, &options.full_batch_scheduling_boost_micros)); } thread::ThreadPool* thread_pool = GetOrCreateBatchThreadsPool(); OP_REQUIRES( c, thread_pool != nullptr, errors::FailedPrecondition("Failed to create batch threads pool")); adaptive_batch_scheduler_options_ = options; } REGISTER_KERNEL_BUILDER(Name("BatchFunction").Device(DEVICE_CPU), BatchFunctionKernel); REGISTER_KERNEL_BUILDER(Name("BatchFunction") .Device(DEVICE_GPU) .HostMemory("in_tensors") .HostMemory("captured_tensors") .HostMemory("out_tensors"), BatchFunctionKernel); REGISTER_KERNEL_BUILDER(Name("BatchFunction") .Device(DEVICE_DEFAULT) .HostMemory("in_tensors") .HostMemory("captured_tensors") .HostMemory("out_tensors"), BatchFunctionKernel); class BatchKernel : public AsyncOpKernel { public: explicit BatchKernel(OpKernelConstruction* c) : AsyncOpKernel(c) { OP_REQUIRES_OK(c, c->GetAttr("container", &container_)); OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_)); if (shared_name_.empty()) { shared_name_ = name(); } OP_REQUIRES_OK(c, c->GetAttr("batching_queue", &batcher_queue_)); OP_REQUIRES_OK(c, c->GetAttr("num_batch_threads", &num_batch_threads_)); OP_REQUIRES_OK(c, c->GetAttr("max_batch_size", &max_batch_size_)); OP_REQUIRES_OK(c, c->GetAttr("batch_timeout_micros", &batch_timeout_micros_)); OP_REQUIRES_OK(c, c->GetAttr("max_enqueued_batches", &max_enqueued_batches_)); OP_REQUIRES_OK(c, c->GetAttr("allowed_batch_sizes", &allowed_batch_sizes_)); OP_REQUIRES_OK(c, ValidateAllowedBatchSizes()); } void ComputeAsync(OpKernelContext* c, DoneCallback done) final { BatchResource* br; std::function<Status(BatchResource**)> creator = [this](BatchResource** r) { std::unique_ptr<BatchResource> new_resource; TF_RETURN_IF_ERROR(BatchResource::Create( false, num_batch_threads_, max_batch_size_, batch_timeout_micros_, max_enqueued_batches_, allowed_batch_sizes_, false, &new_resource)); *r = new_resource.release(); return absl::OkStatus(); }; OP_REQUIRES_OK_ASYNC(c, c->resource_manager()->LookupOrCreate( container_, shared_name_, &br, creator), done); const Status status = br->RegisterInput( random::New64(), c, batcher_queue_, []() -> absl::StatusOr< std::unique_ptr<serving::BatchResourceBase::BatchTask>> { return {std::make_unique<BatchResource::BatchTask>(kInvalidHandle)}; }, done); br->Unref(); OP_REQUIRES_OK_ASYNC(c, status, done); } Status ValidateAllowedBatchSizes() const { if (allowed_batch_sizes_.empty()) { return absl::OkStatus(); } int32_t last_size = 0; for (size_t i = 0; i < allowed_batch_sizes_.size(); ++i) { const int32_t size = allowed_batch_sizes_.at(i); if (i > 0 && size <= last_size) { return errors::InvalidArgument( "allowed_batch_sizes entries must be monotonically increasing"); } if (i == allowed_batch_sizes_.size() - 1 && size != max_batch_size_) { return errors::InvalidArgument( "final entry in allowed_batch_sizes must equal max_batch_size"); } last_size = size; } return absl::OkStatus(); } private: string container_; string shared_name_; string batcher_queue_; int32 num_batch_threads_; int32 max_batch_size_; int32 batch_timeout_micros_; int32 max_enqueued_batches_; std::vector<int32> allowed_batch_sizes_; }; REGISTER_KERNEL_BUILDER(Name("Batch").Device(DEVICE_CPU), BatchKernel); class UnbatchResource : public ResourceBase { public: explicit UnbatchResource(int32_t timeout_micros) : timeout_micros_(timeout_micros), timeout_enforcer_(new serving::PeriodicFunction( [this] { EnforceTimeout(); }, 1000 )) {} ~UnbatchResource() override { timeout_enforcer_ = nullptr; } string DebugString() const final { return "UnbatchResource"; } Status Compute(OpKernelContext* context, AsyncOpKernel::DoneCallback done) { const Tensor& data_t = context->input(0); const Tensor& batch_index_t = context->input(1); if (batch_index_t.shape().dim_size(0) > data_t.shape().dim_size(0)) { return errors::InvalidArgument( "Wrong shape for index tensor. Expected 0th dimension size to be no " "greater than ", data_t.shape().dim_size(0), "; Got: ", batch_index_t.shape().dim_size(0), "."); } if (batch_index_t.shape().dim_size(1) != 3) { return errors::InvalidArgument( "Wrong shape for index tensor. Expected 1st dimension size to be 3 ; " "Got: ", batch_index_t.shape().dim_size(1), "."); } if (!TensorShapeUtils::IsScalar(context->input(2).shape())) { return errors::InvalidArgument( "Input id should be scalar; " "Got: ", context->input(2).DebugString(), "."); } const int64_t batch_key = context->input(2).scalar<int64_t>()(); const bool nonempty_input = batch_index_t.dim_size(0) > 0; std::vector<int64_t> sizes; std::vector<int64_t> batch_keys; std::vector<Tensor> split_inputs; if (nonempty_input) { auto batch_indices = batch_index_t.shaped<int64_t, 2>({batch_index_t.dim_size(0), 3}); for (int i = 0; i < batch_index_t.dim_size(0); ++i) { sizes.push_back(batch_indices(i, 2) - batch_indices(i, 1)); batch_keys.push_back(batch_indices(i, 0)); } TF_RETURN_IF_ERROR(Split(context, data_t, sizes, &split_inputs)); } std::vector<AsyncOpKernel::DoneCallback> done_callbacks_to_call; Status status = [&]() -> Status { mutex_lock ml(mu_); auto tensor_it = waiting_tensors_.find(batch_key); if (tensor_it != waiting_tensors_.end()) { context->set_output(0, tensor_it->second.tensor); waiting_tensors_.erase(tensor_it); done_callbacks_to_call.push_back(done); return absl::OkStatus(); } const uint64 deadline_micros = Env::Default()->NowMicros() + timeout_micros_; if (!waiting_callbacks_ .emplace(batch_key, WaitingCallback{deadline_micros, context, done}) .second) { return errors::AlreadyExists( "Multiple session runs with the same batch key."); } if (nonempty_input) { for (size_t i = 0; i < batch_keys.size(); ++i) { auto runs_it = waiting_callbacks_.find(batch_keys[i]); if (runs_it != waiting_callbacks_.end()) { runs_it->second.context->set_output(0, split_inputs[i]); done_callbacks_to_call.push_back(runs_it->second.done); waiting_callbacks_.erase(runs_it); } else { if (!waiting_tensors_ .emplace(batch_keys[i], WaitingTensor{deadline_micros, split_inputs[i]}) .second) { return errors::AlreadyExists( "Multiple tensors returned for same batch key."); } } } } return absl::OkStatus(); }(); for (const AsyncOpKernel::DoneCallback& done_callback : done_callbacks_to_call) { done_callback(); } return status; } private: void EnforceTimeout() { const uint64 now = Env::Default()->NowMicros(); std::vector<WaitingCallback> evicted_callbacks; { mutex_lock ml(mu_); for (auto it = waiting_tensors_.begin(); it != waiting_tensors_.end();) { const WaitingTensor& waiting_tensor = it->second; if (waiting_tensor.deadline_micros < now) { it = waiting_tensors_.erase(it); } else { ++it; } } for (auto it = waiting_callbacks_.begin(); it != waiting_callbacks_.end();) { const WaitingCallback& waiting_callback = it->second; if (waiting_callback.deadline_micros < now) { evicted_callbacks.push_back(waiting_callback); it = waiting_callbacks_.erase(it); } else { ++it; } } } for (const WaitingCallback& evicted_callback : evicted_callbacks) { evicted_callback.context->CtxFailureWithWarning(errors::DeadlineExceeded( "Batched data did not arrive within timeout window.")); evicted_callback.done(); } } struct WaitingTensor { uint64 deadline_micros; Tensor tensor; }; struct WaitingCallback { uint64 deadline_micros; OpKernelContext* context; AsyncOpKernel::DoneCallback done; }; const int32 timeout_micros_; mutex mu_; std::unordered_map<int64_t, WaitingTensor> waiting_tensors_ TF_GUARDED_BY(mu_); std::unordered_map<int64_t, WaitingCallback> waiting_callbacks_ TF_GUARDED_BY(mu_); std::unique_ptr<serving::PeriodicFunction> timeout_enforcer_; }; class UnbatchKernel : public AsyncOpKernel { public: explicit UnbatchKernel(OpKernelConstruction* c) : AsyncOpKernel(c) { OP_REQUIRES_OK(c, c->GetAttr("container", &container_)); OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_)); if (shared_name_.empty()) { shared_name_ = name(); } OP_REQUIRES_OK(c, c->GetAttr("timeout_micros", &timeout_micros_)); } void ComputeAsync(OpKernelContext* c, DoneCallback done) final { UnbatchResource* ubr; std::function<Status(UnbatchResource**)> creator = [this](UnbatchResource** r) { *r = new UnbatchResource(timeout_micros_); return absl::OkStatus(); }; OP_REQUIRES_OK_ASYNC(c, c->resource_manager()->LookupOrCreate( container_, shared_name_, &ubr, creator), done); auto status = ubr->Compute(c, done); ubr->Unref(); OP_REQUIRES_OK_ASYNC(c, status, done); } private: string container_; string shared_name_; int32 timeout_micros_; }; REGISTER_KERNEL_BUILDER(Name("Unbatch").Device(DEVICE_CPU), UnbatchKernel); class UnbatchGradResource : public ResourceBase { public: UnbatchGradResource() {} string DebugString() const final { return "UnbatchGradResource"; } Status OutputBatch(OpKernelContext* context, const AsyncOpKernel::DoneCallback& done) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { const Tensor& batch_index_t = context->input(1); auto batch_index = batch_index_t.shaped<int64_t, 2>({batch_index_t.dim_size(0), 3}); std::vector<Tensor> tensors; for (int i = 0; i < batch_index_t.dim_size(0); ++i) { auto available_it = available_tensors_.find(batch_index(i, 0)); if (available_it == available_tensors_.end()) { return errors::Internal("bad bookkeeping of available tensors."); } tensors.push_back(available_it->second); available_tensors_.erase(available_it); } const DataType type = tensors[0].dtype(); Tensor concatenated_tensor; switch (type) { #define CASE(type) \ case DataTypeToEnum<type>::value: \ TF_RETURN_IF_ERROR(Concat<type>(context, tensors, &concatenated_tensor)); \ context->set_output(0, concatenated_tensor); \ break; TF_CALL_ALL_TYPES(CASE); #undef CASE default: return errors::InvalidArgument("Unsupported data type: ", type); } done(); return absl::OkStatus(); } Status Compute(OpKernelContext* context, const AsyncOpKernel::DoneCallback& done) { const Tensor& data_t = context->input(0); const Tensor& batch_index_t = context->input(1); const Tensor& grad_t = context->input(2); const Tensor& batch_key_t = context->input(3); mutex_lock ml(mu_); if (!TensorShapeUtils::IsScalar(batch_key_t.shape())) { return errors::InvalidArgument("Expected `id` to be scalar. Received ", batch_key_t.DebugString()); } const int64_t batch_key = context->input(3).scalar<int64_t>()(); if (!available_tensors_.emplace(batch_key, grad_t).second) { return errors::InvalidArgument("Two runs with the same batch key."); } if (data_t.NumElements() > 0) { if (batch_index_t.NumElements() == 0) { return errors::InvalidArgument( "batch_index is empty while the tensor isn't."); } std::unordered_set<int64_t> missing_tensors; if (batch_index_t.NumElements() != batch_index_t.dim_size(0) * 3) { return errors::InvalidArgument( "batch_index should contain ", batch_index_t.dim_size(0) * 3, " elements. Received ", batch_index_t.NumElements()); } const auto batch_index = batch_index_t.shaped<int64_t, 2>({batch_index_t.dim_size(0), 3}); for (int i = 0; i < batch_index_t.dim_size(0); ++i) { const int64_t batch_key = batch_index(i, 0); if (available_tensors_.find(batch_key) == available_tensors_.end()) { missing_tensors.emplace(batch_key); } } if (missing_tensors.empty()) { return OutputBatch(context, done); } if (!available_batches_ .emplace(batch_key, Batch{missing_tensors, context, done}) .second) { return errors::InvalidArgument( "Batch key with valid batch used twice."); } for (const int64_t i : missing_tensors) { if (!desired_tensor_to_batch_map_.emplace(i, batch_key).second) { return errors::InvalidArgument( "Missing tensor wanted by more than one batch."); } } } else { TensorShape output_shape(grad_t.shape()); output_shape.set_dim(0, 0); Tensor* output = nullptr; TF_RETURN_IF_ERROR(context->allocate_output(0, output_shape, &output)); done(); } auto desire_it = desired_tensor_to_batch_map_.find(batch_key); if (desire_it != desired_tensor_to_batch_map_.end()) { auto batch_it = available_batches_.find(desire_it->second); desired_tensor_to_batch_map_.erase(desire_it); if (batch_it == available_batches_.end()) { return errors::InvalidArgument("Batch no longer exists."); } batch_it->second.missing_tensors.erase(batch_key); if (batch_it->second.missing_tensors.empty()) { TF_RETURN_IF_ERROR( OutputBatch(batch_it->second.context, batch_it->second.done)); available_batches_.erase(batch_it); } } return absl::OkStatus(); } private: mutex mu_; struct Batch { std::unordered_set<int64_t> missing_tensors; OpKernelContext* context; AsyncOpKernel::DoneCallback done; }; std::unordered_map<int64_t, Batch> available_batches_; std::unordered_map<int64_t, Tensor> available_tensors_; std::unordered_map<int64_t, int64_t> desired_tensor_to_batch_map_; }; class UnbatchGradKernel : public AsyncOpKernel { public: explicit UnbatchGradKernel(OpKernelConstruction* c) : AsyncOpKernel(c) { OP_REQUIRES_OK(c, c->GetAttr("container", &container_)); OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_)); if (shared_name_.empty()) { shared_name_ = name(); } } void ComputeAsync(OpKernelContext* c, DoneCallback done) final { UnbatchGradResource* ubr; std::function<Status(UnbatchGradResource**)> creator = [](UnbatchGradResource** r) { *r = new UnbatchGradResource(); return absl::OkStatus(); }; OP_REQUIRES_OK_ASYNC(c, c->resource_manager()->LookupOrCreate( container_, shared_name_, &ubr, creator), done); Status status = ubr->Compute(c, done); ubr->Unref(); OP_REQUIRES_OK_ASYNC(c, status, done); } private: string container_; string shared_name_; }; REGISTER_KERNEL_BUILDER(Name("UnbatchGrad").Device(DEVICE_CPU), UnbatchGradKernel); }
#include "tensorflow/core/kernels/batch_kernels.h" #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/common_runtime/rendezvous_mgr.h" #include "tensorflow/core/framework/device_factory.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/batch_kernel_test_util.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler.h" #include "tensorflow/core/kernels/batching_util/warmup.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/public/version.h" #include "tsl/platform/blocking_counter.h" #include "tsl/platform/criticality.h" #include "tsl/platform/errors.h" #include "tsl/platform/refcount.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace { using PerModelData = serving::WarmupStateRegistry::PerModelData; class BatchFunctionKernelTest : public test_util::BatchFunctionKernelTestBase { }; TEST_P(BatchFunctionKernelTest, EnableAdaptiveScheduler) { const bool adaptive_scheduler_enabled = GetParam(); TF_EXPECT_OK(Init(adaptive_scheduler_enabled)); BatchFunctionKernel *batch_kernel = dynamic_cast<BatchFunctionKernel *>(op_kernel()); EXPECT_EQ(adaptive_scheduler_enabled, test_util::BatchFunctionKernelTestAccess(batch_kernel) .enable_adaptive_batch_threads()); } INSTANTIATE_TEST_SUITE_P(Params, BatchFunctionKernelTest, ::testing::Bool()); class SharedBatchFunctionTestState : public OpsTestBase { public: void CreateFunctionLibraryRuntime() { pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>( device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(), nullptr, nullptr, nullptr, Rendezvous::Factory{[](const int64_t, const DeviceMgr *device_mgr, tsl::core::RefCountPtr<Rendezvous> *r) { *r = tsl::core::RefCountPtr<Rendezvous>( new IntraProcessRendezvous(device_mgr)); return absl::OkStatus(); }}); } protected: absl::StatusOr<NodeDefBuilder> CreateBatchFunctionBuilder( const std::vector<int> &allowed_batch_sizes, int max_batch_size, absl::string_view padding_policy, const TensorShape &expected_output_shape) { NameAttrList f; f.set_name("ShapeEnforcingFunction"); FunctionDef func = FunctionDefHelper::Create( f.name(), {"x:int64"}, {"o:int64"}, {}, {{{"o"}, "EnsureShape", {"x"}, {{"T", DataType::DT_INT64}, {"shape", expected_output_shape}}}}, {{"o", "o:output"}}); TF_RETURN_IF_ERROR(flib_def_->AddFunctionDef(func)); SharedBatchFunctionTestState::CreateFunctionLibraryRuntime(); std::vector<NodeDefBuilder::NodeOut> inputs( {NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64})}); return NodeDefBuilder(absl::StrCat("BatchTPUInput", padding_policy), "BatchFunction") .Attr("max_batch_size", max_batch_size) .Attr("num_batch_threads", 8) .Attr("allowed_batch_sizes", allowed_batch_sizes) .Attr("batch_timeout_micros", 1000000) .Attr("max_enqueued_batches", 10) .Attr("enable_large_batch_splitting", true) .Attr("batch_padding_policy", padding_policy) .Attr("Tin", {DataType::DT_INT64}) .Input(inputs) .Attr("Tcaptured", std::vector<DataType>{}) .Input(std::vector<NodeDefBuilder::NodeOut>{}) .Attr("Tout", std::vector<DataType>{DT_INT64}) .Attr("f", f); } }; class BatchFunctionTestState : public SharedBatchFunctionTestState { public: absl::Status Init(Device *device, bool enable_low_priority_queue, absl::string_view mixed_priority_policy, int64_t expected_batch_size) { device_ = device; const TensorShape expected_output_shape({expected_batch_size, 2}); TF_ASSIGN_OR_RETURN( NodeDefBuilder builder, CreateBatchFunctionBuilder({4, 8}, 8, "PAD_UP", expected_output_shape)); TF_RETURN_IF_ERROR(builder .Attr("low_priority_max_batch_size", enable_low_priority_queue ? 8 : 0) .Attr("low_priority_batch_timeout_micros", enable_low_priority_queue ? 2000000 : 0) .Attr("low_priority_allowed_batch_sizes", enable_low_priority_queue ? std::vector<int>{4, 8} : std::vector<int>()) .Attr("low_priority_max_enqueued_batches", enable_low_priority_queue ? 2 : 0) .Attr("mixed_priority_policy", mixed_priority_policy) .Finalize(node_def())); return OpsTestBase::InitOp(); } void TestBody() override {} }; class BatchFunctionTest : public ::testing::TestWithParam<bool> { protected: void SetUp() override { cpu_device_ = DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"); } std::unique_ptr<Device> cpu_device_; }; TEST_P(BatchFunctionTest, BatchingWorksWithoutCriticality) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); bool enable_low_priority_queue = GetParam(); { tsl::BlockingCounter blocking_counter(8); for (int i = 0; i < 8; ++i) { Env::Default()->SchedClosure([&]() { ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCritical); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 8)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } TEST_P(BatchFunctionTest, PaddingWorksWithoutCriticality) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); bool enable_low_priority_queue = GetParam(); { tsl::BlockingCounter blocking_counter(2); for (int i = 0; i < 2; ++i) { Env::Default()->SchedClosure([&]() { ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCritical); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } #if defined(PLATFORM_GOOGLE) TEST_P(BatchFunctionTest, LowPriorityTaskPaddingHighPriorityBatchUptoMaxBatchSize) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); bool enable_low_priority_queue = GetParam(); { tsl::BlockingCounter blocking_counter(8); for (int i = 0; i < 4; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCriticalPlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCriticalPlus); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 8)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } for (int i = 0; i < 4; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 8)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } TEST_P(BatchFunctionTest, LowPriorityTaskPaddingHighPriorityBatchWithExtraPadding) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); bool enable_low_priority_queue = GetParam(); { tsl::BlockingCounter blocking_counter(2); Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCriticalPlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCriticalPlus); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK( test_state.Init(cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK( test_state.Init(cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); blocking_counter.Wait(); } } TEST_P(BatchFunctionTest, LowPriorityTaskPaddingHighPriorityBatchUptoNextAllowedBatchSize) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); bool enable_low_priority_queue = GetParam(); { tsl::BlockingCounter blocking_counter(4); for (int i = 0; i < 2; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCriticalPlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCriticalPlus); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } for (int i = 0; i < 2; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), enable_low_priority_queue, serving::kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } #endif INSTANTIATE_TEST_SUITE_P(BatchFunctionTest, BatchFunctionTest, ::testing::Bool()); #if defined(PLATFORM_GOOGLE) TEST_F(BatchFunctionTest, HighPriorityBatchNotPaddedWithLowPriorityTasks) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); { tsl::BlockingCounter blocking_counter(8); for (int i = 0; i < 4; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCriticalPlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCriticalPlus); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init(cpu_device_.get(), true, serving::kPriorityIsolationAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } for (int i = 0; i < 4; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init(cpu_device_.get(), true, serving::kPriorityIsolationAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } TEST_F(BatchFunctionTest, LowPriorityOnlyBatchAtMaxLowPriorityBatchSize) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); { tsl::BlockingCounter blocking_counter(8); for (int i = 0; i < 8; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), true, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 8)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } TEST_F(BatchFunctionTest, LowPriorityBatchPaddedToLowPriorityAllowedBatchSize) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); { tsl::BlockingCounter blocking_counter(2); for (int i = 0; i < 2; ++i) { Env::Default()->SchedClosure([&]() { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); BatchFunctionTestState test_state; test_state.set_session_metadata(session_metadata); TF_ASSERT_OK(test_state.Init( cpu_device_.get(), true, serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue, 4)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } #endif class BatchFunctionKernelParallelWarmupTestState : public SharedBatchFunctionTestState { public: absl::Status Init(bool enable_splitting) { static auto *const cpu_device = []() { auto device = DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"); return device.release(); }(); device_ = cpu_device; const TensorShape expected_output_shape({2}); TF_ASSIGN_OR_RETURN( NodeDefBuilder builder, CreateBatchFunctionBuilder({2, 4, 8}, enable_splitting ? 16 : 8, "PAD_UP", expected_output_shape)); TF_RETURN_IF_ERROR(builder.Finalize(node_def())); return OpsTestBase::InitOp(); } void TestBody() override {} }; class BatchFunctionKernelParallelWarmupTest : public ::testing::TestWithParam<bool> {}; TEST_P(BatchFunctionKernelParallelWarmupTest, ParallelWarmup) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); serving::WarmupStateRegistry::Key key(session_metadata.name(), session_metadata.version()); int num_requests = 16; bool enable_splitting = GetParam(); { auto per_model_data = std::make_unique<PerModelData>(); auto handle = serving::GetGlobalWarmupStateRegistry().Register( key, std::move(per_model_data)); tsl::BlockingCounter blocking_counter(num_requests); for (int i = 0; i < num_requests; ++i) { Env::Default()->SchedClosure([&]() { BatchFunctionKernelParallelWarmupTestState test; test.set_session_metadata(session_metadata); TF_CHECK_OK(test.Init(enable_splitting)); test.AddInputFromList<int64_t>(TensorShape({2}), {123, 456}); TF_CHECK_OK(test.RunOpKernel()); test::ExpectTensorEqual<int64_t>(*test.GetOutput(0), test::AsTensor<int64_t>({123, 456})); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } EXPECT_FALSE(serving::GetGlobalWarmupStateRegistry().Lookup(key)); { tsl::BlockingCounter blocking_counter(num_requests); for (int i = 0; i < num_requests; ++i) { Env::Default()->SchedClosure([&]() { BatchFunctionKernelParallelWarmupTestState test; test.set_session_metadata(session_metadata); TF_CHECK_OK(test.Init(enable_splitting)); test.AddInputFromList<int64_t>(TensorShape({2}), {123, 456}); EXPECT_FALSE(test.RunOpKernel().ok()); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } INSTANTIATE_TEST_SUITE_P(BatchFunctionKernelParallelWarmupTestSuite, BatchFunctionKernelParallelWarmupTest, ::testing::Bool()); class BatchFunctionKernelPaddingTestState : public SharedBatchFunctionTestState { public: absl::Status Init(absl::string_view padding_policy, int expected_batch_size) { static auto *const cpu_device = []() { auto device = DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"); return device.release(); }(); device_ = cpu_device; const TensorShape expected_output_shape({expected_batch_size, 2}); TF_RETURN_IF_ERROR(CreateBatchFunctionBuilder({4, 8}, 8, padding_policy, expected_output_shape) ->Finalize(node_def())); return OpsTestBase::InitOp(); } void TestBody() override {} }; class BatchFunctionKernelPaddingTest : public ::testing::TestWithParam<std::string> {}; TEST_P(BatchFunctionKernelPaddingTest, PadUp) { SessionMetadata session_metadata; session_metadata.set_name("test_model"); session_metadata.set_version(123); int64_t num_requests = 5; int64_t expected_batch_size = 0; std::string padding_policy = GetParam(); if (padding_policy == "PAD_UP") { expected_batch_size = 8; } else if (padding_policy == "BATCH_DOWN") { expected_batch_size = 4; } else if (padding_policy == "MINIMIZE_TPU_COST_PER_REQUEST") { expected_batch_size = 8; } else { FAIL() << "Unsupported padding policy: " << padding_policy; } { tsl::BlockingCounter blocking_counter(num_requests); for (int i = 0; i < num_requests; ++i) { Env::Default()->SchedClosure([&]() { BatchFunctionKernelPaddingTestState test_state; test_state.set_session_metadata(session_metadata); TF_CHECK_OK(test_state.Init(padding_policy, expected_batch_size)); test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456}); TF_EXPECT_OK(test_state.RunOpKernel()); test::ExpectTensorEqual<int64_t>( *test_state.GetOutput(0), test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2}))); blocking_counter.DecrementCount(); }); } blocking_counter.Wait(); } } INSTANTIATE_TEST_SUITE_P(BatchFunctionKernelPaddingTestSuite, BatchFunctionKernelPaddingTest, ::testing::Values("PAD_UP", "BATCH_DOWN", "MINIMIZE_TPU_COST_PER_REQUEST")); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batch_kernels.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batch_kernels_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
19c19b88-4648-4428-b84f-97d4a6024198
cpp
tensorflow/tensorflow
roll_op
tensorflow/compiler/tf2xla/kernels/roll_op.cc
tensorflow/core/kernels/roll_op_test.cc
#include <vector> #include "absl/strings/str_cat.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/slicing.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { namespace { class RollOp : public XlaOpKernel { public: explicit RollOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const TensorShape input_shape = ctx->InputShape(0); xla::XlaOp shift = ctx->Input(1); const TensorShape shift_shape = ctx->InputShape(1); const TensorShape axis_shape = ctx->InputShape(2); int64_t input_dims = input_shape.dims(); OP_REQUIRES(ctx, input_dims >= 1, errors::InvalidArgument("input must be 1-D or higher")); OP_REQUIRES(ctx, shift_shape.dims() <= 1, errors::InvalidArgument( "shift must be a scalar or a 1-D vector. Found: ", shift_shape.DebugString())); OP_REQUIRES(ctx, axis_shape.dims() <= 1, errors::InvalidArgument( "axis must be a scalar or a 1-D vector. Found: ", shift_shape.DebugString())); OP_REQUIRES( ctx, shift_shape == axis_shape, errors::InvalidArgument("shift and axis must have the same size")); xla::Literal axis; OP_REQUIRES_OK(ctx, ctx->ConstantInput(2, &axis)); xla::XlaOp output = ctx->Input(0); xla::PrimitiveType shift_type = ctx->input_xla_type(1); int64_t num_axes = axis_shape.dims() == 0 ? 1 : axis_shape.dim_size(0); for (int64_t i = 0; i != num_axes; ++i) { int64_t cur_axis = axis_shape.dims() == 0 ? *axis.GetIntegralAsS64({}) : *axis.GetIntegralAsS64({i}); OP_REQUIRES(ctx, cur_axis >= -input_dims && cur_axis < input_dims, errors::InvalidArgument( absl::StrCat("axis ", cur_axis, " is out of range [-", input_dims, ", ", input_dims, ")."))); if (cur_axis < 0) { cur_axis += input_dims; } xla::XlaOp offset = shift_shape.dims() == 0 ? shift : xla::Reshape(xla::SliceInDim(shift, i, i + 1, 1, 0), {}); xla::XlaOp axis_size = xla::ConstantR0WithType( ctx->builder(), shift_type, input_shape.dim_size(cur_axis)); offset = ((offset % axis_size) + axis_size) % axis_size; xla::XlaOp concat = xla::ConcatInDim(ctx->builder(), {output, output}, cur_axis); std::vector<xla::XlaOp> start_indices( input_shape.dims(), xla::Zero(ctx->builder(), shift_type)); start_indices[cur_axis] = axis_size - offset; output = xla::DynamicSlice(concat, start_indices, input_shape.dim_sizes()); } ctx->SetOutput(0, output); } private: RollOp(const RollOp&) = delete; void operator=(const RollOp&) = delete; }; REGISTER_XLA_OP(Name("Roll").CompileTimeConstantInput("axis"), RollOp); } }
#include <functional> #include <memory> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class RollOpTest : public OpsTestBase { protected: void MakeOp(DataType data_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "Roll") .Input(FakeInput(data_type)) .Input(FakeInput(index_type)) .Input(FakeInput(index_type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(RollOpTest, ScalarIndices) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 3, 4}); AddInputFromArray<int32>(TensorShape({}), {3}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5})); test::FillValues<float>(&expected, {2, 3, 4, 0, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, ScalarIndices_NoMemcpy) { MakeOp(DT_STRING, DT_INT32); AddInputFromArray<tstring>(TensorShape({5}), {"a", "b", "c", "d", "e"}); AddInputFromArray<int32>(TensorShape({}), {3}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({5})); test::FillValues<tstring>(&expected, {"c", "d", "e", "a", "b"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, ScalarIndices_Complex) { MakeOp(DT_COMPLEX64, DT_INT32); AddInputFromArray<std::complex<float>>( TensorShape({5}), {std::complex<float>(0, 10), std::complex<float>(1, 11), std::complex<float>(2, 12), std::complex<float>(3, 13), std::complex<float>(4, 14)}); AddInputFromArray<int32>(TensorShape({}), {3}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_COMPLEX64, TensorShape({5})); test::FillValues<std::complex<float>>( &expected, {std::complex<float>(2, 12), std::complex<float>(3, 13), std::complex<float>(4, 14), std::complex<float>(0, 10), std::complex<float>(1, 11)}); test::ExpectTensorEqual<std::complex<float>>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_TwoD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({3, 5}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int32>(TensorShape({2}), {2, -1}); AddInputFromArray<int32>(TensorShape({2}), {0, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({3, 5})); test::FillValues<float>(&expected, {6, 7, 8, 9, 5, 11, 12, 13, 14, 10, 1, 2, 3, 4, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_TwoD32_NoMemcpy) { MakeOp(DT_STRING, DT_INT32); AddInputFromArray<tstring>(TensorShape({3, 5}), {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o"}); AddInputFromArray<int32>(TensorShape({2}), {2, -1}); AddInputFromArray<int32>(TensorShape({2}), {0, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3, 5})); test::FillValues<tstring>(&expected, {"g", "h", "i", "j", "f", "l", "m", "n", "o", "k", "b", "c", "d", "e", "a"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_ThreeD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({2, 2, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); AddInputFromArray<int32>(TensorShape({3}), {1, -1, -1}); AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 3})); test::FillValues<float>(&expected, {10, 11, 9, 7, 8, 6, 4, 5, 3, 1, 2, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_ThreeD32_NoMemcpy) { MakeOp(DT_STRING, DT_INT32); AddInputFromArray<tstring>( TensorShape({2, 2, 3}), {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"}); AddInputFromArray<int32>(TensorShape({3}), {1, -1, -1}); AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({2, 2, 3})); test::FillValues<tstring>( &expected, {"k", "l", "j", "h", "i", "g", "e", "f", "d", "b", "c", "a"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_TwoD64) { MakeOp(DT_FLOAT, DT_INT64); AddInputFromArray<float>(TensorShape({5, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int64_t>(TensorShape({2}), {-1, 4}); AddInputFromArray<int64_t>(TensorShape({2}), {0, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {5, 3, 4, 8, 6, 7, 11, 9, 10, 14, 12, 13, 2, 0, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_TwoD64_NoMemcpy) { MakeOp(DT_STRING, DT_INT64); AddInputFromArray<tstring>(TensorShape({5, 3}), {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o"}); AddInputFromArray<int64_t>(TensorShape({2}), {-1, 4}); AddInputFromArray<int64_t>(TensorShape({2}), {0, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({5, 3})); test::FillValues<tstring>(&expected, {"f", "d", "e", "i", "g", "h", "l", "j", "k", "o", "m", "n", "c", "a", "b"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_ThreeD64) { MakeOp(DT_FLOAT, DT_INT64); AddInputFromArray<float>(TensorShape({4, 1, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); AddInputFromArray<int64_t>(TensorShape({3}), {4, 3, 2}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 1, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4, 1, 3})); test::FillValues<float>(&expected, {1, 2, 0, 4, 5, 3, 7, 8, 6, 10, 11, 9}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Simple_ThreeD64_NoMemcpy) { MakeOp(DT_STRING, DT_INT64); AddInputFromArray<tstring>( TensorShape({4, 1, 3}), {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"}); AddInputFromArray<int64_t>(TensorShape({3}), {4, 3, 2}); AddInputFromArray<int64_t>(TensorShape({3}), {0, 1, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({4, 1, 3})); test::FillValues<tstring>( &expected, {"b", "c", "a", "e", "f", "d", "h", "i", "g", "k", "l", "j"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, ZeroShift_ThreeD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({2, 2, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 3})); test::FillValues<float>(&expected, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, ZeroShift_ThreeD32_NoMemcpy) { MakeOp(DT_STRING, DT_INT32); AddInputFromArray<tstring>( TensorShape({2, 2, 3}), {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"}); AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0}); AddInputFromArray<int32>(TensorShape({3}), {0, 1, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({2, 2, 3})); test::FillValues<tstring>( &expected, {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, ZeroSize_ThreeD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 0, 0}), {}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 0, 0})); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, ZeroSize_ThreeD32_NoMemcpy) { MakeOp(DT_STRING, DT_INT32); AddInputFromArray<tstring>(TensorShape({5, 0, 0}), {}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({5, 0, 0})); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, OneSize_ThreeD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({1, 1, 1}), {5}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1})); test::FillValues<float>(&expected, {5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, OneSize_ThreeD32_NoMemcpy) { MakeOp(DT_STRING, DT_INT32); AddInputFromArray<tstring>(TensorShape({1, 1, 1}), {"a"}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({1, 1, 1})); test::FillValues<tstring>(&expected, {"a"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, MultiShifts_TwoD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({3, 5}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int32>(TensorShape({4}), {-2, 2, -1, 1}); AddInputFromArray<int32>(TensorShape({4}), {1, 0, 0, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({3, 5})); test::FillValues<float>(&expected, {11, 12, 13, 14, 10, 1, 2, 3, 4, 0, 6, 7, 8, 9, 5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(RollOpTest, MultiShifts_TwoD32_NoMemcpy) { MakeOp(DT_STRING, DT_INT32); AddInputFromArray<tstring>(TensorShape({3, 5}), {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o"}); AddInputFromArray<int32>(TensorShape({4}), {-2, 2, -1, 1}); AddInputFromArray<int32>(TensorShape({4}), {1, 0, 0, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({3, 5})); test::FillValues<tstring>(&expected, {"l", "m", "n", "o", "k", "b", "c", "d", "e", "a", "g", "h", "i", "j", "f"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(RollOpTest, Error_InputMustBeVectorOrHigher) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({}), {7}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({}), {0}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains(s.ToString(), "input must be 1-D or higher")) << s; } TEST_F(RollOpTest, Error_AxisMustBeScalarOrVector) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({2, 2}), {1, 2, 3, 4}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({1, 2}), {0, 1}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "axis must be a scalar or a 1-D vector")) << s; } TEST_F(RollOpTest, Error_ShiftMustBeScalarOrVector) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({2, 2}), {1, 2, 3, 4}); AddInputFromArray<int32>(TensorShape({1, 2}), {0, 1}); AddInputFromArray<int32>(TensorShape({}), {1}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "shift must be a scalar or a 1-D vector")) << s; } TEST_F(RollOpTest, Error_ShiftAndAxisMustBeSameSize) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({2, 2}), {1, 2, 3, 4}); AddInputFromArray<int32>(TensorShape({1}), {1}); AddInputFromArray<int32>(TensorShape({2}), {0, 1}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "shift and axis must have the same size")) << s; } TEST_F(RollOpTest, Error_AxisOutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({}), {1}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains(s.ToString(), "is out of range")) << s; } static Graph* RollGraph(const TensorShape& shape, int isd) { Graph* g = new Graph(OpRegistry::Global()); Tensor input(DT_FLOAT, shape); input.flat<float>().setRandom(); const int dims = static_cast<int>(input.dims()); Tensor shift(DT_INT32, TensorShape({dims})); for (int i = 0; i < dims; i++) { shift.flat<int32>()(i) = (i <= isd) ? 2 : 0; } Tensor axis(DT_INT32, TensorShape({dims})); for (int i = 0; i < dims; i++) { axis.flat<int32>()(i) = i; } test::graph::Roll(g, test::graph::Constant(g, input), test::graph::Constant(g, shift), test::graph::Constant(g, axis)); return g; } #define BM_ROLL_OUTER(DEVICE) \ static void BM_##DEVICE##_roll_outer(::testing::benchmark::State& state) { \ const int rows = state.range(0); \ const int columns = state.range(1); \ \ TensorShape shape{rows, columns}; \ test::Benchmark(#DEVICE, RollGraph(shape, 0), false) \ .Run(state); \ const int64_t num_items = \ static_cast<int64_t>(state.iterations()) * shape.num_elements(); \ state.SetItemsProcessed(num_items); \ state.SetBytesProcessed(num_items * sizeof(float)); \ } \ BENCHMARK(BM_##DEVICE##_roll_outer) \ ->UseRealTime() \ ->ArgPair(256, 256) \ ->ArgPair(512, 512) \ ->ArgPair(1024, 1024) \ ->ArgPair(2048, 2048) #define BM_ROLL_ALL(DEVICE) \ static void BM_##DEVICE##_roll_all(::testing::benchmark::State& state) { \ const int rows = state.range(0); \ const int columns = state.range(1); \ \ TensorShape shape{rows, columns}; \ test::Benchmark(#DEVICE, RollGraph(shape, 1), false) \ .Run(state); \ const int64_t num_items = \ static_cast<int64_t>(state.iterations()) * shape.num_elements(); \ state.SetItemsProcessed(num_items); \ state.SetBytesProcessed(num_items * sizeof(float)); \ } \ BENCHMARK(BM_##DEVICE##_roll_all) \ ->UseRealTime() \ ->ArgPair(256, 256) \ ->ArgPair(512, 512) \ ->ArgPair(1024, 1024) \ ->ArgPair(2048, 2048) BM_ROLL_OUTER(cpu); BM_ROLL_ALL(cpu); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/roll_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/roll_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
749cf212-4dad-46a0-ab63-46bc108d5073
cpp
tensorflow/tensorflow
quantized_reshape_op
tensorflow/core/kernels/quantized_reshape_op.cc
tensorflow/core/kernels/quantized_reshape_op_test.cc
#include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_op.h" namespace tensorflow { class QuantizedReshapeOp : public ReshapeOp { public: explicit QuantizedReshapeOp(OpKernelConstruction* c) : ReshapeOp(c) {} void Compute(OpKernelContext* ctx) override { ReshapeOp::Compute(ctx); if (!ctx->status().ok()) { return; } const auto& input_min_float_tensor = ctx->input(2); const auto& input_min_float_shape = input_min_float_tensor.shape(); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(input_min_float_shape) || (TensorShapeUtils::IsVector(input_min_float_shape) && (input_min_float_shape.dim_size(0) == 1)), errors::InvalidArgument( "input_min must be a scalar or a vector of 1 element")); const float input_min_float = input_min_float_tensor.flat<float>()(0); const auto& input_max_float_tensor = ctx->input(3); const auto& input_max_float_shape = input_max_float_tensor.shape(); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(input_max_float_shape) || (TensorShapeUtils::IsVector(input_max_float_shape) && (input_max_float_shape.dim_size(0) == 1)), errors::InvalidArgument( "input_max must be a scalar or a vector of 1 element")); const float input_max_float = input_max_float_tensor.flat<float>()(0); Tensor* output_min = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); output_min->flat<float>()(0) = input_min_float; Tensor* output_max = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max)); output_max->flat<float>()(0) = input_max_float; } }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedReshape") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .TypeConstraint<type>("T"), \ QuantizedReshapeOp) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); #undef REGISTER_CPU_KERNEL }
#include <functional> #include <memory> #include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { class QuantizedReshapeTest : public OpsTestBase { protected: QuantizedReshapeTest() {} }; TEST_F(QuantizedReshapeTest, Reshape) { TF_ASSERT_OK(NodeDefBuilder("quantized_reshape", "QuantizedReshape") .Input(FakeInput(DT_QUINT8)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); Tensor input(DT_QUINT8, {10, 20}); Tensor expected(DT_QUINT8, {5, 10, 4}); for (int i = 0; i < input.shape().num_elements(); ++i) { input.flat<quint8>()(i) = quint8(i); expected.flat<quint8>()(i) = quint8(i); } AddInputFromArray<quint8>(input.shape(), input.flat<quint8>()); AddInputFromList<int32>({3}, {5, 10, 4}); AddInputFromArray<float>(TensorShape({1}), {-10}); AddInputFromArray<float>(TensorShape({1}), {20}); TF_ASSERT_OK(RunOpKernel()); EXPECT_EQ(-10, GetOutput(1)->flat<float>()(0)); EXPECT_EQ(20, GetOutput(2)->flat<float>()(0)); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_reshape_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_reshape_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
58fa6423-d526-43fc-b63e-1b2185a19835
cpp
tensorflow/tensorflow
mfcc_mel_filterbank
tensorflow/lite/kernels/internal/mfcc_mel_filterbank.cc
tensorflow/core/kernels/mfcc_mel_filterbank_test.cc
#include "tensorflow/lite/kernels/internal/mfcc_mel_filterbank.h" #include <math.h> namespace tflite { namespace internal { MfccMelFilterbank::MfccMelFilterbank() : initialized_(false) {} bool MfccMelFilterbank::Initialize(int input_length, double input_sample_rate, int output_channel_count, double lower_frequency_limit, double upper_frequency_limit) { num_channels_ = output_channel_count; sample_rate_ = input_sample_rate; input_length_ = input_length; if (num_channels_ < 1) { return false; } if (sample_rate_ <= 0) { return false; } if (input_length < 2) { return false; } if (lower_frequency_limit < 0) { return false; } if (upper_frequency_limit <= lower_frequency_limit) { return false; } center_frequencies_.resize(num_channels_ + 1); const double mel_low = FreqToMel(lower_frequency_limit); const double mel_hi = FreqToMel(upper_frequency_limit); const double mel_span = mel_hi - mel_low; const double mel_spacing = mel_span / static_cast<double>(num_channels_ + 1); for (int i = 0; i < num_channels_ + 1; ++i) { center_frequencies_[i] = mel_low + (mel_spacing * (i + 1)); } const double hz_per_sbin = 0.5 * sample_rate_ / static_cast<double>(input_length_ - 1); start_index_ = static_cast<int>(1.5 + (lower_frequency_limit / hz_per_sbin)); end_index_ = static_cast<int>(upper_frequency_limit / hz_per_sbin); band_mapper_.resize(input_length_); int channel = 0; for (int i = 0; i < input_length_; ++i) { double melf = FreqToMel(i * hz_per_sbin); if ((i < start_index_) || (i > end_index_)) { band_mapper_[i] = -2; } else { while ((channel < num_channels_) && (center_frequencies_[channel] < melf)) { ++channel; } band_mapper_[i] = channel - 1; } } weights_.resize(input_length_); for (int i = 0; i < input_length_; ++i) { channel = band_mapper_[i]; if ((i < start_index_) || (i > end_index_)) { weights_[i] = 0.0; } else { if (channel >= 0) { weights_[i] = (center_frequencies_[channel + 1] - FreqToMel(i * hz_per_sbin)) / (center_frequencies_[channel + 1] - center_frequencies_[channel]); } else { weights_[i] = (center_frequencies_[0] - FreqToMel(i * hz_per_sbin)) / (center_frequencies_[0] - mel_low); } } } std::vector<int> bad_channels; for (int c = 0; c < num_channels_; ++c) { float band_weights_sum = 0.0; for (int i = 0; i < input_length_; ++i) { if (band_mapper_[i] == c - 1) { band_weights_sum += (1.0 - weights_[i]); } else if (band_mapper_[i] == c) { band_weights_sum += weights_[i]; } } if (band_weights_sum < 0.5) { bad_channels.push_back(c); } } if (!bad_channels.empty()) { } initialized_ = true; return true; } void MfccMelFilterbank::Compute(const std::vector<double> &input, std::vector<double> *output) const { if (!initialized_) { return; } if (input.size() <= end_index_) { return; } output->assign(num_channels_, 0.0); for (int i = start_index_; i <= end_index_; i++) { double spec_val = sqrt(input[i]); double weighted = spec_val * weights_[i]; int channel = band_mapper_[i]; if (channel >= 0) (*output)[channel] += weighted; channel++; if (channel < num_channels_) (*output)[channel] += spec_val - weighted; } } double MfccMelFilterbank::FreqToMel(double freq) const { return 1127.0 * log1p(freq / 700.0); } } }
#include "tensorflow/core/kernels/mfcc_mel_filterbank.h" #include <limits> #include <vector> #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { TEST(MfccMelFilterbankTest, AgreesWithPythonGoldenValues) { MfccMelFilterbank filterbank; std::vector<double> input; const int kSampleCount = 513; input.reserve(kSampleCount); for (int i = 0; i < kSampleCount; ++i) { input.push_back(i + 1); } const int kChannelCount = 20; filterbank.Initialize( input.size(), 22050 , kChannelCount , 20.0 , 4000.0 ); std::vector<double> output; filterbank.Compute(input, &output); std::vector<double> expected = { 7.38894574, 10.30330648, 13.72703292, 17.24158686, 21.35253118, 25.77781089, 31.30624108, 37.05877236, 43.9436536, 51.80306637, 60.79867148, 71.14363376, 82.90910141, 96.50069158, 112.08428368, 129.96721968, 150.4277597, 173.74997634, 200.86037462, 231.59802942}; ASSERT_EQ(output.size(), kChannelCount); for (int i = 0; i < kChannelCount; ++i) { EXPECT_NEAR(output[i], expected[i], 1e-04); } } TEST(MfccMelFilterbankTest, IgnoresExistingContentOfOutputVector) { MfccMelFilterbank filterbank; const int kSampleCount = 513; std::vector<double> input; std::vector<double> output; filterbank.Initialize(kSampleCount, 22050 , 20 , 20.0 , 4000.0 ); input.assign(kSampleCount, 1.0); filterbank.Compute(input, &output); for (const double value : output) { EXPECT_LE(0.0, value); } input.assign(kSampleCount, 0.0); filterbank.Compute(input, &output); for (const double value : output) { EXPECT_EQ(0.0, value); } } TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxIntValue) { MfccMelFilterbank filterbank; const int kSampleCount = 513; std::size_t num_channels = std::numeric_limits<int>::max(); bool initialized = filterbank.Initialize( kSampleCount, 2 , num_channels , 1.0 , 5.0 ); EXPECT_FALSE(initialized); } TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxSize) { MfccMelFilterbank filterbank; const int kSampleCount = 513; std::size_t num_channels = std::vector<double>().max_size() + 1; bool initialized = filterbank.Initialize( kSampleCount, 2 , num_channels , 1.0 , 5.0 ); EXPECT_FALSE(initialized); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/mfcc_mel_filterbank.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_mel_filterbank_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
764c94d8-d1a3-4617-8303-2918aaf03f58
cpp
tensorflow/tensorflow
training_ops
tensorflow/compiler/tf2xla/kernels/training_ops.cc
tensorflow/core/ops/training_ops_test.cc
#include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/math.h" #include "xla/hlo/builder/xla_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { class ResourceApplyGradientDescent : public XlaOpKernel { public: explicit ResourceApplyGradientDescent(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { xla::XlaOp handle; DataType type = ctx->input_type(1); TensorShape var_shape; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &var_shape, &handle)); TensorShape alpha_shape = ctx->InputShape(1); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape), errors::InvalidArgument("alpha is not a scalar: ", alpha_shape.DebugString())); TensorShape delta_shape = ctx->InputShape(2); OP_REQUIRES( ctx, var_shape.IsSameSize(delta_shape), errors::InvalidArgument("var and delta do not have the same shape: ", var_shape.DebugString(), " vs ", delta_shape.DebugString())); handle = handle - ctx->Input(1) * ctx->Input(2); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, handle)); } }; REGISTER_XLA_OP(Name("ResourceApplyGradientDescent") .TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyGradientDescent); xla::XlaOp ProximalGradientDescentUpdate(xla::XlaOp var, xla::XlaOp lr, xla::XlaOp l1, xla::XlaOp l2, xla::XlaOp grad) { xla::XlaOp one = xla::ScalarLike(lr, 1.0); xla::XlaOp zero = xla::ScalarLike(lr, 0.0); xla::XlaOp prox_var = var - grad * lr; xla::XlaOp l1_gt_zero = xla::Sign(prox_var) * xla::Max(xla::Abs(prox_var) - lr * l1, zero); xla::XlaOp l1_le_zero = prox_var; return xla::Select(xla::Gt(l1, zero), l1_gt_zero, l1_le_zero) / (one + lr * l2); } class ResourceApplyProximalGradientDescent : public XlaOpKernel { public: explicit ResourceApplyProximalGradientDescent(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { xla::XlaOp var; TensorShape var_shape; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var)); TensorShape alpha_shape = ctx->InputShape(1); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape), errors::InvalidArgument("alpha is not a scalar: ", alpha_shape.DebugString())); TensorShape l1_shape = ctx->InputShape(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape), errors::InvalidArgument("l1 is not a scalar: ", l1_shape.DebugString())); TensorShape l2_shape = ctx->InputShape(3); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape), errors::InvalidArgument("l2 is not a scalar: ", l2_shape.DebugString())); TensorShape delta_shape = ctx->InputShape(4); OP_REQUIRES( ctx, var_shape.IsSameSize(delta_shape), errors::InvalidArgument("var and delta do not have the same shape: ", var_shape.DebugString(), " vs ", delta_shape.DebugString())); xla::XlaOp alpha = ctx->Input(1); xla::XlaOp l1 = ctx->Input(2); xla::XlaOp l2 = ctx->Input(3); xla::XlaOp delta = ctx->Input(4); var = ProximalGradientDescentUpdate(var, alpha, l1, l2, delta); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var)); } private: DataType dtype_; }; REGISTER_XLA_OP(Name("ResourceApplyProximalGradientDescent") .TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyProximalGradientDescent); class ResourceApplyMomentum : public XlaOpKernel { public: explicit ResourceApplyMomentum(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("use_nesterov", &use_nesterov_)); } void Compile(XlaOpKernelContext* ctx) override { DataType type = ctx->input_type(2); TensorShape var_shape, accum_shape; xla::XlaOp var, accum; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, type, &accum_shape, &accum)); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); TensorShape lr_shape = ctx->InputShape(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(3); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); TensorShape momentum_shape = ctx->InputShape(4); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(momentum_shape), errors::InvalidArgument("momentum is not a scalar: ", momentum_shape.DebugString())); xla::XlaOp lr = ctx->Input(2); xla::XlaOp grad = ctx->Input(3); xla::XlaOp momentum = ctx->Input(4); accum = accum * momentum + grad; if (use_nesterov_) { var = var - (grad * lr + accum * momentum * lr); } else { var = var - accum * lr; } OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, type, accum)); } private: bool use_nesterov_; }; REGISTER_XLA_OP(Name("ResourceApplyMomentum").TypeConstraint("T", kFloatTypes), ResourceApplyMomentum); class ResourceApplyKerasMomentum : public XlaOpKernel { public: explicit ResourceApplyKerasMomentum(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("use_nesterov", &use_nesterov_)); } void Compile(XlaOpKernelContext* ctx) override { DataType type = ctx->input_type(2); TensorShape var_shape, accum_shape; xla::XlaOp var, accum; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, type, &accum_shape, &accum)); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); TensorShape lr_shape = ctx->InputShape(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(3); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); TensorShape momentum_shape = ctx->InputShape(4); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(momentum_shape), errors::InvalidArgument("momentum is not a scalar: ", momentum_shape.DebugString())); xla::XlaOp lr = ctx->Input(2); xla::XlaOp grad = ctx->Input(3); xla::XlaOp momentum = ctx->Input(4); accum = accum * momentum - grad * lr; if (use_nesterov_) { var = var + accum * momentum - grad * lr; } else { var = var + accum; } OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, type, accum)); } private: bool use_nesterov_; }; REGISTER_XLA_OP(Name("ResourceApplyKerasMomentum") .TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyKerasMomentum); class ResourceApplyAdagrad : public XlaOpKernel { public: explicit ResourceApplyAdagrad(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("update_slots", &update_slots_)); } void Compile(XlaOpKernelContext* ctx) override { DataType type = ctx->input_type(2); TensorShape var_shape, accum_shape; xla::XlaOp var, accum; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, type, &accum_shape, &accum)); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); TensorShape lr_shape = ctx->InputShape(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(3); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); xla::XlaOp lr = ctx->Input(2); xla::XlaOp grad = ctx->Input(3); if (update_slots_) { accum = accum + xla::Square(grad); } var = var - grad * lr * xla::Rsqrt(accum); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, type, accum)); } private: bool update_slots_; }; REGISTER_XLA_OP( Name("ResourceApplyAdagrad").TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyAdagrad); class ResourceApplyAdagradV2 : public XlaOpKernel { public: explicit ResourceApplyAdagradV2(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("update_slots", &update_slots_)); } void Compile(XlaOpKernelContext* ctx) override { DataType type = ctx->input_type(2); TensorShape var_shape, accum_shape; xla::XlaOp var, accum; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, type, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, type, &accum_shape, &accum)); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); TensorShape lr_shape = ctx->InputShape(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); TensorShape epsilon_shape = ctx->InputShape(3); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape), errors::InvalidArgument("epsilon is not a scalar: ", epsilon_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(4); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); xla::XlaOp lr = ctx->Input(2); xla::XlaOp epsilon = ctx->Input(3); xla::XlaOp grad = ctx->Input(4); if (update_slots_) { accum = accum + xla::Square(grad); } var = var - grad * lr / (xla::Sqrt(accum) + epsilon); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, type, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, type, accum)); } private: bool update_slots_; }; REGISTER_XLA_OP( Name("ResourceApplyAdagradV2").TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyAdagradV2); class ResourceApplyProximalAdagrad : public XlaOpKernel { public: explicit ResourceApplyProximalAdagrad(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape var_shape, accum_shape; xla::XlaOp var, accum; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &accum_shape, &accum)); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); TensorShape lr_shape = ctx->InputShape(2); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); TensorShape l1_shape = ctx->InputShape(3); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l1_shape), errors::InvalidArgument("l1 is not a scalar: ", l1_shape.DebugString())); TensorShape l2_shape = ctx->InputShape(4); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l2_shape), errors::InvalidArgument("l2 is not a scalar: ", l2_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(5); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape: ", var_shape.DebugString(), " vs ", grad_shape.DebugString())); xla::XlaOp lr = ctx->Input(2); xla::XlaOp l1 = ctx->Input(3); xla::XlaOp l2 = ctx->Input(4); xla::XlaOp grad = ctx->Input(5); accum = accum + xla::Square(grad); xla::XlaOp adagrad_lr = lr * xla::Rsqrt(accum); var = ProximalGradientDescentUpdate(var, adagrad_lr, l1, l2, grad); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, accum)); } private: DataType dtype_; }; REGISTER_XLA_OP(Name("ResourceApplyProximalAdagrad") .TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyProximalAdagrad); class ResourceApplyAdagradDA : public XlaOpKernel { public: explicit ResourceApplyAdagradDA(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape var_shape, accum_shape, squared_accum_shape; xla::XlaOp var, accum, squared_accum; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &accum_shape, &accum)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype_, &squared_accum_shape, &squared_accum)); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); OP_REQUIRES( ctx, var_shape.IsSameSize(squared_accum_shape), errors::InvalidArgument( "var and squared accum do not have the same shape", var_shape.DebugString(), " ", squared_accum_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(3); TensorShape lr_shape = ctx->InputShape(4); TensorShape l1_shape = ctx->InputShape(5); TensorShape l2_shape = ctx->InputShape(6); TensorShape global_step_shape = ctx->InputShape(7); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l1_shape), errors::InvalidArgument("l1 is not a scalar: ", l1_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l2_shape), errors::InvalidArgument("l2 is not a scalar: ", l2_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(global_step_shape), errors::InvalidArgument("global step is not a scalar: ", global_step_shape.DebugString())); xla::XlaOp grad = ctx->Input(3); xla::XlaOp lr = ctx->Input(4); xla::XlaOp l1 = ctx->Input(5); xla::XlaOp l2 = ctx->Input(6); xla::XlaOp global_step = XlaHelpers::ConvertElementType(ctx->Input(7), dtype_); accum = accum + grad; squared_accum = squared_accum + xla::Square(grad); xla::XlaOp zero = xla::ScalarLike(lr, 0.0); xla::XlaOp denominator = global_step * lr * l2 + xla::Sqrt(squared_accum); xla::XlaOp l1_le_zero = -lr * accum / denominator; xla::XlaOp l1_gt_zero = -lr * xla::Sign(accum) * xla::Max(xla::Abs(accum) - global_step * l1, zero) / denominator; var = xla::Select(xla::Gt(l1, zero), l1_gt_zero, l1_le_zero); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, accum)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype_, squared_accum)); } private: DataType dtype_; }; REGISTER_XLA_OP(Name("ResourceApplyAdagradDA").TypeConstraint("T", kFloatTypes), ResourceApplyAdagradDA); class ResourceApplyAdam : public XlaOpKernel { public: explicit ResourceApplyAdam(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("use_nesterov", &use_nesterov_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape var_shape, m_shape, v_shape; xla::XlaOp var, m, v; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &m_shape, &m)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype_, &v_shape, &v)); TensorShape beta1_power_shape = ctx->InputShape(3); TensorShape beta2_power_shape = ctx->InputShape(4); TensorShape lr_shape = ctx->InputShape(5); TensorShape beta1_shape = ctx->InputShape(6); TensorShape beta2_shape = ctx->InputShape(7); TensorShape epsilon_shape = ctx->InputShape(8); TensorShape grad_shape = ctx->InputShape(9); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta1_power_shape), errors::InvalidArgument("beta1_power is not a scalar: ", beta1_power_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta2_power_shape), errors::InvalidArgument("beta2_power is not a scalar: ", beta2_power_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar : ", lr_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta1_shape), errors::InvalidArgument("beta1 is not a scalar: ", beta1_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta2_shape), errors::InvalidArgument("beta2 is not a scalar: ", beta2_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape), errors::InvalidArgument("epsilon is not a scalar: ", epsilon_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(m_shape), errors::InvalidArgument("var and m do not have the same shape", var_shape.DebugString(), " ", m_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(v_shape), errors::InvalidArgument("var and v do not have the same shape", var_shape.DebugString(), " ", v_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); xla::XlaOp beta1_power = ctx->Input(3); xla::XlaOp beta2_power = ctx->Input(4); xla::XlaOp lr = ctx->Input(5); xla::XlaOp beta1 = ctx->Input(6); xla::XlaOp beta2 = ctx->Input(7); xla::XlaOp epsilon = ctx->Input(8); xla::XlaOp grad = ctx->Input(9); xla::XlaBuilder* b = ctx->builder(); xla::XlaOp one = XlaHelpers::FloatLiteral(b, dtype_, 1.0); xla::XlaOp alpha = lr * xla::Sqrt(one - beta2_power) / (one - beta1_power); auto m_t = m + (grad - m) * (one - beta1); v = v + (xla::Square(grad) - v) * (one - beta2); if (use_nesterov_) { var = var - alpha * (m_t * beta1 + (one - beta1) * grad) / (xla::Sqrt(v) + epsilon); } else { var = var - m_t * alpha / (xla::Sqrt(v) + epsilon); } OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, m_t)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype_, v)); } private: DataType dtype_; bool use_nesterov_; }; REGISTER_XLA_OP( Name("ResourceApplyAdam").TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyAdam); class ResourceApplyAdaMax : public XlaOpKernel { public: explicit ResourceApplyAdaMax(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape var_shape, m_shape, v_shape; xla::XlaOp var, m, v; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &m_shape, &m)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype_, &v_shape, &v)); TensorShape beta1_power_shape = ctx->InputShape(3); TensorShape lr_shape = ctx->InputShape(4); TensorShape beta1_shape = ctx->InputShape(5); TensorShape beta2_shape = ctx->InputShape(6); TensorShape epsilon_shape = ctx->InputShape(7); TensorShape grad_shape = ctx->InputShape(8); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta1_power_shape), errors::InvalidArgument("beta1_power is not a scalar: ", beta1_power_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar : ", lr_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta1_shape), errors::InvalidArgument("beta1 is not a scalar: ", beta1_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta2_shape), errors::InvalidArgument("beta2 is not a scalar: ", beta2_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape), errors::InvalidArgument("epsilon is not a scalar: ", epsilon_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(m_shape), errors::InvalidArgument("var and m do not have the same shape", var_shape.DebugString(), " ", m_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(v_shape), errors::InvalidArgument("var and v do not have the same shape", var_shape.DebugString(), " ", v_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); xla::XlaOp beta1_power = ctx->Input(3); xla::XlaOp lr = ctx->Input(4); xla::XlaOp beta1 = ctx->Input(5); xla::XlaOp beta2 = ctx->Input(6); xla::XlaOp epsilon = ctx->Input(7); xla::XlaOp grad = ctx->Input(8); xla::XlaOp one = xla::ScalarLike(lr, 1.0); m = beta1 * m + (one - beta1) * grad; v = xla::Max(beta2 * v, xla::Abs(grad)); var = var - lr / (one - beta1_power) * (m / (v + epsilon)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, m)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype_, v)); } private: DataType dtype_; }; REGISTER_XLA_OP(Name("ResourceApplyAdaMax").TypeConstraint("T", kFloatTypes), ResourceApplyAdaMax); class ResourceApplyRMSProp : public XlaOpKernel { public: explicit ResourceApplyRMSProp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape var_shape, ms_shape, mom_shape, mg_shape; xla::XlaOp var, ms, mom, mg; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput("var", dtype_, &var_shape, &var)); if (centered_) { OP_REQUIRES_OK(ctx, ctx->ReadVariableInput("mg", dtype_, &mg_shape, &mg)); } OP_REQUIRES_OK(ctx, ctx->ReadVariableInput("ms", dtype_, &ms_shape, &ms)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput("mom", dtype_, &mom_shape, &mom)); TensorShape lr_shape = ctx->InputShape("lr"); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); TensorShape rho_shape = ctx->InputShape("rho"); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(rho_shape), errors::InvalidArgument("rho is not a scalar: ", rho_shape.DebugString())); TensorShape momentum_shape = ctx->InputShape("momentum"); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(momentum_shape), errors::InvalidArgument("momentum is not a scalar: ", momentum_shape.DebugString())); TensorShape epsilon_shape = ctx->InputShape("epsilon"); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape), errors::InvalidArgument("epsilon is not a scalar: ", epsilon_shape.DebugString())); TensorShape grad_shape = ctx->InputShape("grad"); OP_REQUIRES(ctx, var_shape.IsSameSize(ms_shape), errors::InvalidArgument("var and ms do not have the same shape", var_shape.DebugString(), " ", ms_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(mom_shape), errors::InvalidArgument( "var and mom do not have the same shape", var_shape.DebugString(), " ", mom_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); xla::XlaOp lr = ctx->Input("lr"); xla::XlaOp rho = ctx->Input("rho"); xla::XlaOp momentum = ctx->Input("momentum"); xla::XlaOp epsilon = ctx->Input("epsilon"); xla::XlaOp grad = ctx->Input("grad"); xla::XlaOp one = xla::ScalarLike(ms, 1.0); xla::XlaOp new_ms = xla::Square(grad) * (one - rho) + ms * rho; xla::XlaOp denominator; if (centered_) { mg = grad * (one - rho) + mg * rho; denominator = new_ms - xla::Square(mg) + epsilon; } else { denominator = new_ms + epsilon; } xla::XlaOp new_mom = mom * momentum + grad * lr * xla::Rsqrt(denominator); xla::XlaOp new_var = var - new_mom; OP_REQUIRES_OK(ctx, ctx->AssignVariable("var", dtype_, new_var)); if (centered_) { OP_REQUIRES_OK(ctx, ctx->AssignVariable("mg", dtype_, mg)); } OP_REQUIRES_OK(ctx, ctx->AssignVariable("ms", dtype_, new_ms)); OP_REQUIRES_OK(ctx, ctx->AssignVariable("mom", dtype_, new_mom)); } protected: bool centered_ = false; private: DataType dtype_; }; REGISTER_XLA_OP( Name("ResourceApplyRMSProp").TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyRMSProp); class ResourceApplyCenteredRMSProp : public ResourceApplyRMSProp { public: explicit ResourceApplyCenteredRMSProp(OpKernelConstruction* ctx) : ResourceApplyRMSProp(ctx) { centered_ = true; } }; REGISTER_XLA_OP(Name("ResourceApplyCenteredRMSProp") .TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyCenteredRMSProp); void CompileFtrl(XlaOpKernelContext* ctx, DataType dtype, bool has_l2_shrinkage, bool multiply_linear_by_lr) { xla::XlaBuilder* b = ctx->builder(); TensorShape var_shape, accum_shape, linear_shape; xla::XlaOp var, accum, linear; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype, &accum_shape, &accum)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype, &linear_shape, &linear)); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(linear_shape), errors::InvalidArgument( "var and linear do not have the same shape", var_shape.DebugString(), " ", linear_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(3); TensorShape lr_shape = ctx->InputShape(4); TensorShape l1_shape = ctx->InputShape(5); TensorShape l2_shape = ctx->InputShape(6); TensorShape l2_shrinkage_shape; TensorShape lr_power_shape; if (has_l2_shrinkage) { l2_shrinkage_shape = ctx->InputShape(7); lr_power_shape = ctx->InputShape(8); } else { lr_power_shape = ctx->InputShape(7); } OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument("var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(l1_shape), errors::InvalidArgument("l1 is not a scalar: ", l1_shape.DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsScalar(l2_shape), errors::InvalidArgument("l2 is not a scalar: ", l2_shape.DebugString())); if (has_l2_shrinkage) { OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(l2_shrinkage_shape), errors::InvalidArgument("l2_shrinkage is not a scalar: ", l2_shrinkage_shape.DebugString())); } OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_power_shape), errors::InvalidArgument("lr_power is not a scalar: ", lr_power_shape.DebugString())); xla::XlaOp grad = ctx->Input(3); xla::XlaOp lr = ctx->Input(4); xla::XlaOp l1 = ctx->Input(5); xla::XlaOp l2 = ctx->Input(6); xla::XlaOp l2_shrinkage; xla::XlaOp lr_power; if (has_l2_shrinkage) { l2_shrinkage = ctx->Input(7); lr_power = ctx->Input(8); } else { lr_power = ctx->Input(7); } xla::XlaOp two = XlaHelpers::FloatLiteral(b, dtype, 2.0); xla::XlaOp grad_to_use; if (has_l2_shrinkage) { grad_to_use = grad + two * l2_shrinkage * var; } else { grad_to_use = grad; } xla::XlaOp new_accum = accum + xla::Square(grad); xla::XlaOp new_accum_lr_pow = xla::Pow(new_accum, -lr_power); xla::XlaOp accum_lr_pow = xla::Pow(accum, -lr_power); if (multiply_linear_by_lr) { linear = linear + grad_to_use * lr - (new_accum_lr_pow - accum_lr_pow) * var; } else { linear = linear + grad_to_use - (new_accum_lr_pow - accum_lr_pow) / lr * var; } xla::XlaOp linear_clipped = (multiply_linear_by_lr ? xla::Clamp(-l1 * lr, linear, l1 * lr) : xla::Clamp(-l1, linear, l1)); xla::XlaOp quadratic = (multiply_linear_by_lr ? new_accum_lr_pow + two * l2 * lr : new_accum_lr_pow / lr + two * l2); var = (linear_clipped - linear) / quadratic; accum = new_accum; OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype, accum)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype, linear)); } class ResourceApplyFtrl : public XlaOpKernel { public: explicit ResourceApplyFtrl(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); OP_REQUIRES_OK( ctx, ctx->GetAttr("multiply_linear_by_lr", &multiply_linear_by_lr_)); } void Compile(XlaOpKernelContext* ctx) override { CompileFtrl(ctx, dtype_, false, multiply_linear_by_lr_); } private: DataType dtype_; bool multiply_linear_by_lr_; }; REGISTER_XLA_OP(Name("ResourceApplyFtrl").TypeConstraint("T", kFloatTypes), ResourceApplyFtrl); class ResourceApplyFtrlV2 : public XlaOpKernel { public: explicit ResourceApplyFtrlV2(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); OP_REQUIRES_OK( ctx, ctx->GetAttr("multiply_linear_by_lr", &multiply_linear_by_lr_)); } void Compile(XlaOpKernelContext* ctx) override { CompileFtrl(ctx, dtype_, true, multiply_linear_by_lr_); } private: DataType dtype_; bool multiply_linear_by_lr_; }; REGISTER_XLA_OP(Name("ResourceApplyFtrlV2").TypeConstraint("T", kFloatTypes), ResourceApplyFtrlV2); class ResourceApplyAdadelta : public XlaOpKernel { public: explicit ResourceApplyAdadelta(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape var_shape, accum_shape, accum_update_shape; xla::XlaOp var, accum, accum_update; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &accum_shape, &accum)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(2, dtype_, &accum_update_shape, &accum_update)); TensorShape lr_shape = ctx->InputShape(3); TensorShape rho_shape = ctx->InputShape(4); TensorShape epsilon_shape = ctx->InputShape(5); TensorShape grad_shape = ctx->InputShape(6); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(rho_shape), errors::InvalidArgument("rho is not a scalar: ", rho_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(epsilon_shape), errors::InvalidArgument("epsilon is not a scalar: ", epsilon_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(accum_shape), errors::InvalidArgument( "var and accum do not have the same shape", var_shape.DebugString(), " ", accum_shape.DebugString())); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); xla::XlaOp lr = ctx->Input(3); xla::XlaOp rho = ctx->Input(4); xla::XlaOp epsilon = ctx->Input(5); xla::XlaOp grad = ctx->Input(6); xla::XlaBuilder* b = ctx->builder(); xla::XlaOp one = XlaHelpers::FloatLiteral(b, dtype_, 1.0); accum = rho * accum + (one - rho) * xla::Square(grad); xla::XlaOp update = xla::Sqrt(accum_update + epsilon) * xla::Rsqrt(accum + epsilon) * grad; accum_update = rho * accum_update + (one - rho) * xla::Square(update); var = var - update * lr; OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, accum)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(2, dtype_, accum_update)); } private: DataType dtype_; }; REGISTER_XLA_OP( Name("ResourceApplyAdadelta").TypeConstraint("T", kFloatAndComplexTypes), ResourceApplyAdadelta); class ResourceApplySignBase : public XlaOpKernel { public: explicit ResourceApplySignBase(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_)); } void Compile(XlaOpKernelContext* ctx) override { TensorShape var_shape, m_shape; xla::XlaOp var, m; OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &var_shape, &var)); OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(1, dtype_, &m_shape, &m)); OP_REQUIRES(ctx, var_shape.IsSameSize(m_shape), errors::InvalidArgument("var and m do not have the same shape", var_shape.DebugString(), " ", m_shape.DebugString())); TensorShape grad_shape = ctx->InputShape(6); OP_REQUIRES(ctx, var_shape.IsSameSize(grad_shape), errors::InvalidArgument( "var and grad do not have the same shape", var_shape.DebugString(), " ", grad_shape.DebugString())); CheckScalarParams(ctx); xla::XlaOp lr = ctx->Input(2); xla::XlaOp alpha = ctx->Input(3); xla::XlaOp sign_decay = ctx->Input(4); xla::XlaOp beta = ctx->Input(5); xla::XlaOp grad = ctx->Input(6); m = m * beta + grad * (xla::ScalarLike(beta, 1.0) - beta); xla::XlaOp decay = xla::Sign(grad) * xla::Sign(m) * sign_decay; xla::XlaOp grad_scale = ComputeGradientScale(alpha, decay); var = var - lr * grad_scale * grad; OP_REQUIRES_OK(ctx, ctx->AssignVariable(0, dtype_, var)); OP_REQUIRES_OK(ctx, ctx->AssignVariable(1, dtype_, m)); } virtual void CheckScalarParams(XlaOpKernelContext* ctx) { TensorShape lr_shape = ctx->InputShape(2); TensorShape sign_decay_shape = ctx->InputShape(4); TensorShape beta_shape = ctx->InputShape(5); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(lr_shape), errors::InvalidArgument("lr is not a scalar: ", lr_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(sign_decay_shape), errors::InvalidArgument("sign_decay is not a scalar: ", sign_decay_shape.DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(beta_shape), errors::InvalidArgument("beta is not a scalar: ", beta_shape.DebugString())); } virtual xla::XlaOp ComputeGradientScale(xla::XlaOp alpha, xla::XlaOp decay) = 0; private: DataType dtype_; }; class ResourceApplyAddSign : public ResourceApplySignBase { public: explicit ResourceApplyAddSign(OpKernelConstruction* ctx) : ResourceApplySignBase(ctx) {} void CheckScalarParams(XlaOpKernelContext* ctx) override { ResourceApplySignBase::CheckScalarParams(ctx); TensorShape alpha_shape = ctx->InputShape(3); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(alpha_shape), errors::InvalidArgument("alpha is not a scalar: ", alpha_shape.DebugString())); } xla::XlaOp ComputeGradientScale(xla::XlaOp alpha, xla::XlaOp decay) override { return alpha + decay; } }; REGISTER_XLA_OP(Name("ResourceApplyAddSign").TypeConstraint("T", kFloatTypes), ResourceApplyAddSign); class ResourceApplyPowerSign : public ResourceApplySignBase { public: explicit ResourceApplyPowerSign(OpKernelConstruction* ctx) : ResourceApplySignBase(ctx) {} void CheckScalarParams(XlaOpKernelContext* ctx) override { ResourceApplySignBase::CheckScalarParams(ctx); TensorShape logbase_shape = ctx->InputShape(3); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(logbase_shape), errors::InvalidArgument("logbase is not a scalar: ", logbase_shape.DebugString())); } xla::XlaOp ComputeGradientScale(xla::XlaOp alpha, xla::XlaOp decay) override { return xla::Exp(alpha * decay); } }; REGISTER_XLA_OP(Name("ResourceApplyPowerSign").TypeConstraint("T", kFloatTypes), ResourceApplyPowerSign); } }
#include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { static void TestGradAndIndicesErrorHandling(const ShapeInferenceTestOp& op, string shape_spec_middle, const string& shape_spec_end = "") { auto shape_spec = [&shape_spec_middle, shape_spec_end]( const char* var_spec, const char* grad_indices_spec) { return strings::StrCat(var_spec, ";", shape_spec_middle, ";", grad_indices_spec, shape_spec_end); }; INFER_ERROR("Dimension 1 in both shapes must be equal", op, shape_spec("[?,1]", "[?,2];[?]")); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, shape_spec("?", "[2,?];[1]")); INFER_ERROR("must be equal rank", op, shape_spec("[1]", "[?,2];[?]")); INFER_ERROR("Shape must be rank 1 but is rank 2", op, shape_spec("[?]", "[?];[1,2]")); } TEST(TrainingOpsTest, ApplyGradientDescent_ShapeFn) { ShapeInferenceTestOp op("ApplyGradientDescent"); INFER_OK(op, "[1,?];[];[?,2]", "[d0_0,d2_1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?];?"); } TEST(TrainingOpsTest, ApplyProximalGradientDescent_ShapeFn) { ShapeInferenceTestOp op("ApplyProximalGradientDescent"); INFER_OK(op, "[1,?];[];[];[];[?,2]", "[d0_0,d4_1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[];[];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?"); } TEST(TrainingOpsTest, SparseApplyProximalGradientDescent_ShapeFn) { ShapeInferenceTestOp op("SparseApplyProximalGradientDescent"); INFER_OK(op, "[1,?];[];[];[];[?,2];[3]", "[d0_0,d4_1]"); TestGradAndIndicesErrorHandling(op, "[];[];[]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?];?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?"); } TEST(TrainingOpsTest, ApplyAdadelta_ShapeFn) { ShapeInferenceTestOp op("ApplyAdadelta"); INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[?,?,?,4]", "[d0_0,d1_1,d2_2,d6_3]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[1];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[2];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[1];[];[];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?"); } TEST(TrainingOpsTest, SparseApplyAdadelta_ShapeFn) { ShapeInferenceTestOp op("SparseApplyAdadelta"); INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[?,?,?,4];?", "[d0_0,d1_1,d2_2,d6_3]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[1];[];[];[];[1];?"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[2];[];[];[];[1];?"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op, "[?,1];[?,1];[?,1];[];[];[];[?,2];?"); TestGradAndIndicesErrorHandling(op, "?;?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?"); } TEST(TrainingOpsTest, ApplyAdagrad_ShapeFn) { ShapeInferenceTestOp op("ApplyAdagrad"); INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3]", "[d0_0,d1_1,d3_2]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?"); } TEST(TrainingOpsTest, SparseApplyAdagrad_ShapeFn) { ShapeInferenceTestOp op("SparseApplyAdagrad"); INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3];?", "[d0_0,d1_1,d3_2]"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op, "[?,1];[?,2];[];[?,1];?"); INFER_ERROR("Shapes must be equal rank, but are 2 and 3", op, "[?,1];[?,1];[];[?,?,2];?"); TestGradAndIndicesErrorHandling(op, "?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?"); } TEST(TrainingOpsTest, ApplyProximalAdagrad_ShapeFn) { ShapeInferenceTestOp op("ApplyProximalAdagrad"); INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[?,?,3]", "[d0_0,d1_1,d5_2]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[];[];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?"); } TEST(TrainingOpsTest, SparseApplyProximalAdagrad_ShapeFn) { ShapeInferenceTestOp op("SparseApplyProximalAdagrad"); INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[?,?,3];?", "[d0_0,d1_1,d5_2]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[];[];[];[?,1];?"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op, "[?,1];[?,1];[];[];[];[?,2];?"); TestGradAndIndicesErrorHandling(op, "?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?"); } TEST(TrainingOpsTest, ApplyFtrl_ShapeFn) { ShapeInferenceTestOp op("ApplyFtrl"); INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[?,?,?,4];[];[];[];[]", "[d0_0,d1_1,d2_2,d3_3]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[1];[1];[];[];[];[]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[2];[1];[];[];[];[]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[1];[2];[];[];[];[]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;?;[?]"); } TEST(TrainingOpsTest, SparseApplyFtrl_ShapeFn) { ShapeInferenceTestOp op("SparseApplyFtrl"); INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[?,?,?,4];?;[];[];[];[]", "[d0_0,d1_1,d2_2,d3_3]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[1];[?,1];?;[];[];[];[]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[2];[?,1];?;[];[];[];[]"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op, "[?,1];[?,1];[?,1];[?,2];?;[];[];[];[]"); TestGradAndIndicesErrorHandling(op, "?;?", ";?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;?;[?];?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;?;?;[?]"); } TEST(TrainingOpsTest, ApplyMomentum_ShapeFn) { ShapeInferenceTestOp op("ApplyMomentum"); INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3];[]", "[d0_0,d1_1,d3_2]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[];[1];[]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[];[2];[]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?]"); } TEST(TrainingOpsTest, SparseApplyMomentum_ShapeFn) { ShapeInferenceTestOp op("SparseApplyMomentum"); INFER_OK(op, "[1,?,?];[?,2,?];[];[?,?,3];?;[]", "[d0_0,d1_1,d3_2]"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op, "[?,1];[?,2];[];[?,1];?;[]"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op, "[?,1];[?,1];[];[?,2];?;[]"); TestGradAndIndicesErrorHandling(op, "?;?", ";?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?]"); } TEST(TrainingOpsTest, ApplyAdam_ShapeFn) { ShapeInferenceTestOp op("ApplyAdam"); INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[];[];[];[?,?,?,4]", "[d0_0,d1_1,d2_2,d9_3]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[1];[];[];[];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[2];[];[];[];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[1];[];[];[];[];[];[];[2]"); const char err[] = "Shape must be rank 0 but is rank 1"; INFER_ERROR(err, op, "?;?;?;[?];?;?;?;?;?;?"); INFER_ERROR(err, op, "?;?;?;?;[?];?;?;?;?;?"); INFER_ERROR(err, op, "?;?;?;?;?;[?];?;?;?;?"); INFER_ERROR(err, op, "?;?;?;?;?;?;[?];?;?;?"); INFER_ERROR(err, op, "?;?;?;?;?;?;?;[?];?;?"); INFER_ERROR(err, op, "?;?;?;?;?;?;?;?;[?];?"); } TEST(TrainingOpsTest, ApplyRMSProp_ShapeFn) { ShapeInferenceTestOp op("ApplyRMSProp"); INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[];[?,?,?,4]", "[d0_0,d1_1,d2_2,d7_3]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[1];[];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[2];[];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[1];[];[];[];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?"); } TEST(TrainingOpsTest, SparseApplyRMSProp_ShapeFn) { ShapeInferenceTestOp op("SparseApplyRMSProp"); INFER_OK(op, "[1,?,?,?];[?,2,?,?];[?,?,3,?];[];[];[];[];[?,?,?,4];?", "[d0_0,d1_1,d2_2,d7_3]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[1];[];[];[];[];[1];?"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[2];[];[];[];[];[1];?"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 1 and 2", op, "[?,1];[?,1];[?,1];[];[];[];[];[?,2];?"); TestGradAndIndicesErrorHandling(op, "?;?;?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;?;[?];?;?"); } TEST(TrainingOpsTest, ApplyAddSign_ShapeFn) { ShapeInferenceTestOp op("ApplyAddSign"); INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[];[?,?,2]", "[d0_0,d1_1,d6_2]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[];[];[];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?"); } TEST(TrainingOpsTest, ApplyPowerSign_ShapeFn) { ShapeInferenceTestOp op("ApplyPowerSign"); INFER_OK(op, "[1,?,?];[?,2,?];[];[];[];[];[?,?,2]", "[d0_0,d1_1,d6_2]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[2];[];[];[];[];[1]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op, "[1];[1];[];[];[];[];[2]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[?];?;?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;[?];?;?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;[?];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;?;?;?;[?];?"); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/training_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/training_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
524d7df9-40ce-40ac-b402-546c47ddbe12
cpp
tensorflow/tensorflow
ragged_range_op
tensorflow/core/kernels/ragged_range_op.cc
tensorflow/core/kernels/ragged_range_op_test.cc
#include <cstdint> #include <limits> #include <memory> #include <string> #include <type_traits> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tsl/platform/errors.h" namespace tensorflow { using errors::InvalidArgument; template <typename T, typename SPLITS_TYPE> class RaggedRangeOp : public OpKernel { public: using OpKernel::OpKernel; void Compute(OpKernelContext* context) override { const Tensor& starts_in = context->input(0); const Tensor& limits_in = context->input(1); const Tensor& deltas_in = context->input(2); OP_REQUIRES(context, starts_in.shape().dims() <= 1, InvalidArgument("starts must be a scalar or vector")); OP_REQUIRES(context, limits_in.shape().dims() <= 1, InvalidArgument("limits must be a scalar or vector")); OP_REQUIRES(context, deltas_in.shape().dims() <= 1, InvalidArgument("deltas must be a scalar or vector")); bool broadcast_starts = starts_in.shape().dims() == 0; bool broadcast_limits = limits_in.shape().dims() == 0; bool broadcast_deltas = deltas_in.shape().dims() == 0; std::vector<int> in_sizes; if (!broadcast_starts) in_sizes.push_back(starts_in.shape().dim_size(0)); if (!broadcast_limits) in_sizes.push_back(limits_in.shape().dim_size(0)); if (!broadcast_deltas) in_sizes.push_back(deltas_in.shape().dim_size(0)); for (int i = 1; i < in_sizes.size(); ++i) { OP_REQUIRES(context, in_sizes[i] == in_sizes[i - 1], InvalidArgument("starts, limits, and deltas must have the " "same shape")); } SPLITS_TYPE nrows = in_sizes.empty() ? 1 : in_sizes[0]; const auto& starts = starts_in.flat<T>(); const auto& limits = limits_in.flat<T>(); const auto& deltas = deltas_in.flat<T>(); Tensor* rt_nested_splits_out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({nrows + 1}), &rt_nested_splits_out)); auto rt_nested_splits = rt_nested_splits_out->flat<SPLITS_TYPE>(); rt_nested_splits(0) = 0; for (int row = 0; row < nrows; ++row) { T start = broadcast_starts ? starts(0) : starts(row); T limit = broadcast_limits ? limits(0) : limits(row); T delta = broadcast_deltas ? deltas(0) : deltas(row); OP_REQUIRES(context, delta != 0, InvalidArgument("Requires delta != 0")); SPLITS_TYPE size; if (((delta > 0) && (limit < start)) || ((delta < 0) && (limit > start))) { size = 0; } else if constexpr (std::is_integral<T>::value) { size = Eigen::divup(Eigen::numext::abs(limit - start), Eigen::numext::abs(delta)); } else { auto size_auto = Eigen::numext::ceil(Eigen::numext::abs((limit - start) / delta)); OP_REQUIRES( context, size_auto <= std::numeric_limits<int64_t>::max(), errors::InvalidArgument("Requires ((limit - start) / delta) <= ", std::numeric_limits<int64_t>::max())); size = static_cast<SPLITS_TYPE>(size_auto); } OP_REQUIRES(context, size >= 0, InvalidArgument("Requires size >= 0")); OP_REQUIRES( context, size <= std::numeric_limits<SPLITS_TYPE>::max() - rt_nested_splits(row), InvalidArgument("The total range size overflowed. Consider using " "int64 instead of int32 for row_splits_dtype.")); rt_nested_splits(row + 1) = rt_nested_splits(row) + size; } SPLITS_TYPE nvals = rt_nested_splits(nrows); Tensor* rt_dense_values_out = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape({nvals}), &rt_dense_values_out)); auto rt_dense_values = rt_dense_values_out->flat<T>(); int value_index = 0; for (int row = 0; row < nrows; ++row) { SPLITS_TYPE row_size = rt_nested_splits(row + 1) - rt_nested_splits(row); T value = broadcast_starts ? starts(0) : starts(row); T delta = broadcast_deltas ? deltas(0) : deltas(row); for (SPLITS_TYPE i = 0; i < row_size; ++i) { rt_dense_values(value_index++) = T(value); value += delta; } } } }; #define REGISTER_CPU_KERNEL(TYPE) \ REGISTER_KERNEL_BUILDER(Name("RaggedRange") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint<int32>("Tsplits"), \ RaggedRangeOp<TYPE, int32>); \ REGISTER_KERNEL_BUILDER(Name("RaggedRange") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("T") \ .TypeConstraint<int64_t>("Tsplits"), \ RaggedRangeOp<TYPE, int64>); TF_CALL_float(REGISTER_CPU_KERNEL); TF_CALL_double(REGISTER_CPU_KERNEL); TF_CALL_int32(REGISTER_CPU_KERNEL); TF_CALL_int64(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL }
#include <gtest/gtest.h> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class RaggedRangeOpTest : public ::tensorflow::OpsTestBase { protected: static constexpr int kSplitsOutput = 0; static constexpr int kValuesOutput = 1; template <typename T> void BuildRaggedRangeGraph() { const auto& dtype = DataTypeToEnum<T>::v(); TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedRange") .Input(FakeInput(dtype)) .Input(FakeInput(dtype)) .Input(FakeInput(dtype)) .Attr("T", dtype) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(RaggedRangeOpTest, IntValues) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5}); AddInputFromArray<int>(TensorShape({4}), {8, 7, 8, 1}); AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 4, 6, 6, 10})); test::ExpectTensorEqual<int>( *GetOutput(kValuesOutput), test::AsTensor<int>({0, 2, 4, 6, 5, 6, 5, 4, 3, 2})); } TEST_F(RaggedRangeOpTest, FloatValues) { BuildRaggedRangeGraph<float>(); AddInputFromArray<float>(TensorShape({4}), {0, 5, 8, 5}); AddInputFromArray<float>(TensorShape({4}), {8, 7, 8, 1}); AddInputFromArray<float>(TensorShape({4}), {2, 1, 1, -1}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 4, 6, 6, 10})); test::ExpectTensorNear<float>( *GetOutput(kValuesOutput), test::AsTensor<float>({0, 2, 4, 6, 5, 6, 5, 4, 3, 2}), 0.1); } TEST_F(RaggedRangeOpTest, RangeSizeOverflow) { BuildRaggedRangeGraph<float>(); AddInputFromArray<float>(TensorShape({2}), {1.1, 0.1}); AddInputFromArray<float>(TensorShape({2}), {10.0, 1e10}); AddInputFromArray<float>(TensorShape({2}), {1, 1e-10}); EXPECT_EQ(absl::StrCat("Requires ((limit - start) / delta) <= ", std::numeric_limits<int64_t>::max()), RunOpKernel().message()); } TEST_F(RaggedRangeOpTest, BroadcastDeltas) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({3}), {0, 5, 8}); AddInputFromArray<int>(TensorShape({3}), {8, 7, 8}); AddInputFromArray<int>(TensorShape({}), {1}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 8, 10, 10})); test::ExpectTensorEqual<int>( *GetOutput(kValuesOutput), test::AsTensor<int>({0, 1, 2, 3, 4, 5, 6, 7, 5, 6})); } TEST_F(RaggedRangeOpTest, BroadcastLimitsAndDeltas) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({}), {0}); AddInputFromArray<int>(TensorShape({3}), {3, 0, 2}); AddInputFromArray<int>(TensorShape({}), {1}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 3, 3, 5})); test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput), test::AsTensor<int>({0, 1, 2, 0, 1})); } TEST_F(RaggedRangeOpTest, BroadcastStartsAndLimits) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({}), {0}); AddInputFromArray<int>(TensorShape({}), {12}); AddInputFromArray<int>(TensorShape({3}), {3, 4, 5}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 4, 7, 10})); test::ExpectTensorEqual<int>( *GetOutput(kValuesOutput), test::AsTensor<int>({0, 3, 6, 9, 0, 4, 8, 0, 5, 10})); } TEST_F(RaggedRangeOpTest, AllScalarInputs) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({}), {0}); AddInputFromArray<int>(TensorShape({}), {5}); AddInputFromArray<int>(TensorShape({}), {1}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 5})); test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput), test::AsTensor<int>({0, 1, 2, 3, 4})); } TEST_F(RaggedRangeOpTest, InvalidArgsStarts) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({4, 1}), {0, 5, 8, 5}); AddInputFromArray<int>(TensorShape({4}), {8, 7, 8, 1}); AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1}); EXPECT_EQ("starts must be a scalar or vector", RunOpKernel().message()); } TEST_F(RaggedRangeOpTest, InvalidArgsLimits) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5}); AddInputFromArray<int>(TensorShape({4, 1}), {8, 7, 8, 1}); AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1}); EXPECT_EQ("limits must be a scalar or vector", RunOpKernel().message()); } TEST_F(RaggedRangeOpTest, InvalidArgsDeltas) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5}); AddInputFromArray<int>(TensorShape({4}), {8, 7, 8, 1}); AddInputFromArray<int>(TensorShape({4, 1}), {2, 1, 1, -1}); EXPECT_EQ("deltas must be a scalar or vector", RunOpKernel().message()); } TEST_F(RaggedRangeOpTest, InvalidArgsShapeMismatch) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5}); AddInputFromArray<int>(TensorShape({3}), {7, 8, 1}); AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1}); EXPECT_EQ("starts, limits, and deltas must have the same shape", RunOpKernel().message()); } TEST_F(RaggedRangeOpTest, InvalidArgsZeroDelta) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5}); AddInputFromArray<int>(TensorShape({4}), {7, 8, 8, 1}); AddInputFromArray<int>(TensorShape({4}), {2, 1, 0, -1}); EXPECT_EQ("Requires delta != 0", RunOpKernel().message()); } TEST_F(RaggedRangeOpTest, EmptyRangePositiveDelta) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({2}), {0, 5}); AddInputFromArray<int>(TensorShape({2}), {5, 0}); AddInputFromArray<int>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 3, 3})); test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput), test::AsTensor<int>({0, 2, 4})); } TEST_F(RaggedRangeOpTest, EmptyRangeNegativeDelta) { BuildRaggedRangeGraph<int>(); AddInputFromArray<int>(TensorShape({2}), {0, 5}); AddInputFromArray<int>(TensorShape({2}), {5, 0}); AddInputFromArray<int>(TensorShape({}), {-2}); TF_ASSERT_OK(RunOpKernel()); test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput), test::AsTensor<int64_t>({0, 0, 3})); test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput), test::AsTensor<int>({5, 3, 1})); } TEST_F(RaggedRangeOpTest, ShapeFn) { ShapeInferenceTestOp op("RaggedRange"); INFER_OK(op, "?;?;?", "[?];[?]"); INFER_OK(op, "[3];[3];[3]", "[4];[?]"); INFER_OK(op, "[3];[3];[]", "[4];[?]"); INFER_OK(op, "[3];[];[3]", "[4];[?]"); INFER_OK(op, "[];[3];[3]", "[4];[?]"); INFER_OK(op, "[];[];[]", "[2];[?]"); INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[5,5];[5];[5]"); INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[5];[5,5];[5]"); INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[5];[5];[5,5]"); INFER_ERROR("Dimensions must be equal, but are 4 and 3", op, "[3];[4];[3]"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_range_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_range_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
877ed332-87ba-4632-88e8-c712cc99cafa
cpp
tensorflow/tensorflow
unary_ops_composition
tensorflow/compiler/tf2xla/kernels/unary_ops_composition.cc
tensorflow/compiler/tests/unary_ops_composition_test.cc
#include <functional> #include <string> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/strings/string_view.h" #include "tensorflow/compiler/tf2xla/kernels/elu_op.h" #include "tensorflow/compiler/tf2xla/kernels/relu_op.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/math.h" #include "xla/hlo/builder/xla_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { using XlaUnaryOpGenerator = std::function<xla::XlaOp(xla::XlaOp)>; using XlaOpGeneratorMap = absl::flat_hash_map<string, XlaUnaryOpGenerator>; void PopulateXlaOpGeneratorMap(XlaOpGeneratorMap* op_generator_map) { auto add_xla_op_generator = [&](std::string name, XlaUnaryOpGenerator xla_op_generator) { CHECK(op_generator_map->insert({name, xla_op_generator}).second); }; #define ADD_XLA_OP_GENERATOR(Name) add_xla_op_generator(#Name, xla::Name); ADD_XLA_OP_GENERATOR(Abs); ADD_XLA_OP_GENERATOR(Acos); ADD_XLA_OP_GENERATOR(Acosh); ADD_XLA_OP_GENERATOR(Asin); ADD_XLA_OP_GENERATOR(Asinh); ADD_XLA_OP_GENERATOR(Atan); ADD_XLA_OP_GENERATOR(Atanh); ADD_XLA_OP_GENERATOR(Ceil); ADD_XLA_OP_GENERATOR(Cos); ADD_XLA_OP_GENERATOR(Cosh); ADD_XLA_OP_GENERATOR(Expm1); ADD_XLA_OP_GENERATOR(Exp); ADD_XLA_OP_GENERATOR(Floor); add_xla_op_generator( "Inv", [](xla::XlaOp x) { return xla::ScalarLike(x, 1.0) / x; }); ADD_XLA_OP_GENERATOR(Log); ADD_XLA_OP_GENERATOR(Log1p); ADD_XLA_OP_GENERATOR(Neg); ADD_XLA_OP_GENERATOR(Reciprocal); add_xla_op_generator("Rint", xla::RoundToEven); ADD_XLA_OP_GENERATOR(Round); ADD_XLA_OP_GENERATOR(Rsqrt); add_xla_op_generator("Sigmoid", xla::Logistic); ADD_XLA_OP_GENERATOR(Sin); ADD_XLA_OP_GENERATOR(Sinh); ADD_XLA_OP_GENERATOR(Sqrt); ADD_XLA_OP_GENERATOR(Square); ADD_XLA_OP_GENERATOR(Tan); ADD_XLA_OP_GENERATOR(Tanh); ADD_XLA_OP_GENERATOR(Elu); ADD_XLA_OP_GENERATOR(Relu); ADD_XLA_OP_GENERATOR(Relu6); ADD_XLA_OP_GENERATOR(Selu); #undef ADD_XLA_OP_GENERATOR } const XlaOpGeneratorMap& GetXlaOpGeneratorMap() { static XlaOpGeneratorMap* result = []() { auto* result = new XlaOpGeneratorMap; PopulateXlaOpGeneratorMap(result); return result; }(); return *result; } class UnaryOpsCompositionOp : public XlaOpKernel { public: explicit UnaryOpsCompositionOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("op_names", &op_names_)); const XlaOpGeneratorMap& op_generator_map = GetXlaOpGeneratorMap(); for (absl::string_view op_name : op_names_) { OP_REQUIRES(ctx, op_generator_map.contains(op_name), errors::Unimplemented( op_name, " not supported in _UnaryOpsComposition")); } } void Compile(XlaOpKernelContext* ctx) override { xla::XlaOp x = ctx->Input(0); const XlaOpGeneratorMap& op_generator_map = GetXlaOpGeneratorMap(); for (absl::string_view op_name : op_names_) { x = op_generator_map.find(op_name)->second(x); } ctx->SetOutput(0, x); } private: std::vector<string> op_names_; }; REGISTER_XLA_OP(Name("_UnaryOpsComposition"), UnaryOpsCompositionOp); } }
#include <algorithm> #include <cmath> #include <memory> #include <string> #include <vector> #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/device_factory.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/util/port.h" #include "tsl/platform/status.h" namespace tensorflow { namespace { static bool Initialized = [] { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; return true; }(); class UnaryOpsCompositionTest : public OpsTestBase { protected: template <typename T> void RunComposedOp(const std::vector<string> op_names, T input_scalar_value, T expected_scalar_value) { string xla_device_name = tensorflow::IsGoogleCudaEnabled() ? DEVICE_XLA_GPU : DEVICE_XLA_CPU; SetDevice(DeviceType(xla_device_name), std::unique_ptr<tensorflow::Device>(DeviceFactory::NewDevice( xla_device_name, {}, "/job:a/replica:0/task:0"))); TF_ASSERT_OK(NodeDefBuilder("unary_op_composition", "_UnaryOpsComposition") .Input(FakeInput(DataTypeToEnum<T>::v())) .Attr("T", DataTypeToEnum<T>::v()) .Attr("op_names", op_names) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); TensorShape shape({}); AllocatorAttributes host_alloc_attrs; host_alloc_attrs.set_gpu_compatible(true); host_alloc_attrs.set_on_host(true); Allocator* cpu_allocator = device_->GetAllocator(host_alloc_attrs); DataType dtype = DataTypeToEnum<T>::value; Tensor input_on_host(cpu_allocator, dtype, shape); test::FillValues<T>(&input_on_host, {input_scalar_value}); Tensor* input = AddInput(dtype, shape); DeviceContext* device_context = device_->tensorflow_accelerator_device_info()->default_context; TF_CHECK_OK(device_context->CopyCPUTensorToDeviceSync(&input_on_host, device_, input)); TF_ASSERT_OK(RunOpKernel()); Tensor expected_tensor(cpu_allocator, dtype, shape); test::FillValues<T>(&expected_tensor, {expected_scalar_value}); Tensor* output = GetOutput(0); Tensor output_on_host(cpu_allocator, output->dtype(), output->shape()); TF_CHECK_OK(device_context->CopyDeviceTensorToCPUSync( output, "output 0", device_, &output_on_host)); test::ExpectClose(expected_tensor, output_on_host, 1e-5, 1e-5); } }; TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sqrt_F) { RunComposedOp<float>({"Sqrt", "Sqrt"}, 81.0, 3.0); } TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sqrt_D) { RunComposedOp<double>({"Sqrt", "Sqrt"}, 81.0, 3.0); } TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sin_F) { RunComposedOp<float>({"Sqrt", "Sin"}, 81.0, std::sin(9.0f)); } TEST_F(UnaryOpsCompositionTest, Compose_Cos_Acos_F) { RunComposedOp<float>({"Cos", "Acos"}, 0.5, std::acos(std::cos(0.5f))); } TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu_F) { RunComposedOp<float>({"Tanh", "Relu"}, 0.5, std::max(0.0f, std::tanh(0.5f))); } TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu_D) { RunComposedOp<double>({"Tanh", "Relu"}, 0.5, std::max(0.0, std::tanh(0.5))); } TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu6_F) { RunComposedOp<float>({"Relu6"}, 11.0f, 6.0f); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/unary_ops_composition.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tests/unary_ops_composition_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8e51eff6-da41-42b0-ae49-1405c1a5da7d
cpp
tensorflow/tensorflow
mfcc_op
tensorflow/core/kernels/mfcc_op.cc
tensorflow/core/kernels/mfcc_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/mfcc.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { class MfccOp : public OpKernel { public: explicit MfccOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("upper_frequency_limit", &upper_frequency_limit_)); OP_REQUIRES_OK(context, context->GetAttr("lower_frequency_limit", &lower_frequency_limit_)); OP_REQUIRES_OK(context, context->GetAttr("filterbank_channel_count", &filterbank_channel_count_)); OP_REQUIRES_OK(context, context->GetAttr("dct_coefficient_count", &dct_coefficient_count_)); } void Compute(OpKernelContext* context) override { const Tensor& spectrogram = context->input(0); OP_REQUIRES(context, spectrogram.dims() == 3, errors::InvalidArgument("spectrogram must be 3-dimensional", spectrogram.shape().DebugString())); const Tensor& sample_rate_tensor = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsScalar(sample_rate_tensor.shape()), errors::InvalidArgument( "Input sample_rate should be a scalar tensor, got ", sample_rate_tensor.shape().DebugString(), " instead.")); const int32_t sample_rate = sample_rate_tensor.scalar<int32>()(); const int spectrogram_channels = spectrogram.dim_size(2); const int spectrogram_samples = spectrogram.dim_size(1); const int audio_channels = spectrogram.dim_size(0); Mfcc mfcc; mfcc.set_upper_frequency_limit(upper_frequency_limit_); mfcc.set_lower_frequency_limit(lower_frequency_limit_); mfcc.set_filterbank_channel_count(filterbank_channel_count_); mfcc.set_dct_coefficient_count(dct_coefficient_count_); OP_REQUIRES( context, mfcc.Initialize(spectrogram_channels, sample_rate), errors::InvalidArgument("Mfcc initialization failed for channel count ", spectrogram_channels, ", sample rate ", sample_rate, " and filterbank_channel_count ", filterbank_channel_count_)); Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({audio_channels, spectrogram_samples, dct_coefficient_count_}), &output_tensor)); const float* spectrogram_flat = spectrogram.flat<float>().data(); float* output_flat = output_tensor->flat<float>().data(); for (int audio_channel = 0; audio_channel < audio_channels; ++audio_channel) { for (int spectrogram_sample = 0; spectrogram_sample < spectrogram_samples; ++spectrogram_sample) { const float* sample_data = spectrogram_flat + (audio_channel * spectrogram_samples * spectrogram_channels) + (spectrogram_sample * spectrogram_channels); std::vector<double> mfcc_input(sample_data, sample_data + spectrogram_channels); std::vector<double> mfcc_output; mfcc.Compute(mfcc_input, &mfcc_output); DCHECK_EQ(dct_coefficient_count_, mfcc_output.size()); float* output_data = output_flat + (audio_channel * spectrogram_samples * dct_coefficient_count_) + (spectrogram_sample * dct_coefficient_count_); for (int i = 0; i < dct_coefficient_count_; ++i) { output_data[i] = mfcc_output[i]; } } } } private: float upper_frequency_limit_; float lower_frequency_limit_; int32 filterbank_channel_count_; int32 dct_coefficient_count_; }; REGISTER_KERNEL_BUILDER(Name("Mfcc").Device(DEVICE_CPU), MfccOp); }
#define EIGEN_USE_THREADS #include <functional> #include <memory> #include <vector> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/ops/audio_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace ops { namespace { TEST(MfccOpTest, SimpleTest) { Scope root = Scope::DisabledShapeInferenceScope(); Tensor spectrogram_tensor(DT_FLOAT, TensorShape({1, 1, 513})); test::FillIota<float>(&spectrogram_tensor, 1.0f); Output spectrogram_const_op = Const(root.WithOpName("spectrogram_const_op"), Input::Initializer(spectrogram_tensor)); Output sample_rate_const_op = Const(root.WithOpName("sample_rate_const_op"), 22050); Mfcc mfcc_op = Mfcc(root.WithOpName("mfcc_op"), spectrogram_const_op, sample_rate_const_op); TF_ASSERT_OK(root.status()); ClientSession session(root); std::vector<Tensor> outputs; TF_EXPECT_OK( session.Run(ClientSession::FeedType(), {mfcc_op.output}, &outputs)); const Tensor& mfcc_tensor = outputs[0]; EXPECT_EQ(3, mfcc_tensor.dims()); EXPECT_EQ(13, mfcc_tensor.dim_size(2)); EXPECT_EQ(1, mfcc_tensor.dim_size(1)); EXPECT_EQ(1, mfcc_tensor.dim_size(0)); test::ExpectTensorNear<float>( mfcc_tensor, test::AsTensor<float>( {29.13970072, -6.41568601, -0.61903012, -0.96778652, -0.26819878, -0.40907028, -0.15614748, -0.23203119, -0.10481487, -0.1543029, -0.0769791, -0.10806114, -0.06047613}, TensorShape({1, 1, 13})), 1e-3); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
dfecc821-d7b7-48f6-9ad7-d9bbaf890ca2
cpp
tensorflow/tensorflow
slice_op
tensorflow/compiler/tf2xla/kernels/slice_op.cc
tensorflow/core/kernels/slice_op_test.cc
#include <vector> #include "absl/container/inlined_vector.h" #include "absl/types/span.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/dynamic_shaped_ops.h" #include "xla/hlo/builder/value_inference.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { class SliceOp : public XlaOpKernel { public: explicit SliceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { const TensorShape input_shape = ctx->InputShape(0); const TensorShape begin_tensor_shape = ctx->InputShape(1); const TensorShape size_tensor_shape = ctx->InputShape(2); const int input_dims = input_shape.dims(); OP_REQUIRES( ctx, TensorShapeUtils::IsVector(begin_tensor_shape) && TensorShapeUtils::IsVector(size_tensor_shape) && begin_tensor_shape.num_elements() == input_dims && size_tensor_shape.num_elements() == input_dims, errors::InvalidArgument( "Expected begin and size arguments to be 1-D tensors of size ", input_dims, ", but got shapes ", begin_tensor_shape.DebugString(), " and ", size_tensor_shape.DebugString(), " instead.")); std::vector<int64_t> begin; std::vector<int64_t> size; const bool all_begins_are_constant = ctx->ConstantInputAsIntVector(1, &begin).ok(); const bool all_sizes_are_constant = ctx->ConstantInputAsIntVector(2, &size).ok(); if (all_begins_are_constant && all_sizes_are_constant) { std::vector<int64_t> wrapped_size(size.size()); for (int i = 0; i < input_dims; ++i) { if (size[i] == -1) { wrapped_size[i] = input_shape.dim_size(i) - begin[i]; } else { wrapped_size[i] = size[i]; } } for (int i = 0; i < input_dims; ++i) { int64_t b = begin[i]; int64_t s = wrapped_size[i]; if (input_shape.dim_size(i) == 0) { OP_REQUIRES(ctx, b == 0 && s == 0, errors::InvalidArgument( "Expected begin[", i, "] == 0 (got ", b, ") and size[", i, "] == 0 ", "(got ", s, ") when ", "input_shape.dim_size(", i, ") == 0")); } else { OP_REQUIRES(ctx, 0 <= b && b <= input_shape.dim_size(i), errors::InvalidArgument("Expected begin[", i, "] in [0, ", input_shape.dim_size(i), "], but got ", b)); OP_REQUIRES(ctx, 0 <= s && b + s <= input_shape.dim_size(i), errors::InvalidArgument("Expected size[", i, "] in [0, ", input_shape.dim_size(i) - b, "], but ", "got ", s)); } } std::vector<int64_t> limits; limits.reserve(begin.size()); for (int i = 0; i < begin.size(); ++i) { limits.push_back(begin[i] + wrapped_size[i]); } std::vector<int64_t> strides(begin.size(), 1); auto slice = xla::Slice(ctx->Input(0), begin, limits, strides); std::vector<bool> size_is_dynamic; OP_REQUIRES_OK( ctx, ctx->ResolveInputDynamismIntoPredVector(2, &size_is_dynamic)); for (int64_t i = 0; i < size.size(); ++i) { if (size_is_dynamic[i]) { if (size[i] != -1) { auto dynamic_size = xla::Reshape(xla::Slice(ctx->Input(2), {i}, {i + 1}, {1}), {}); slice = xla::SetDimensionSize(slice, dynamic_size, i); } } } ctx->SetOutput(0, slice); } else { bool constant_size_is_minus_one = false; if (all_sizes_are_constant) { for (int i = 0; i < input_dims; ++i) { if (size[i] < 0) { OP_REQUIRES(ctx, size[i] == -1, errors::InvalidArgument( "Negative size of slice operator can only be -1")); constant_size_is_minus_one = true; } OP_REQUIRES(ctx, size[i] <= input_shape.dim_size(i), errors::InvalidArgument("Expected size[", i, "] in [0, ", input_shape.dim_size(i), "], but ", "got ", size[i])); } } absl::InlinedVector<xla::XlaOp, 4> begin_indices; begin_indices.reserve(input_dims); xla::XlaOp begin = ctx->Input("begin"); for (int i = 0; i < input_dims; i++) { begin_indices.push_back( xla::Reshape(xla::Slice(begin, {i}, {i + 1}, {1}), {})); } if (all_sizes_are_constant && !constant_size_is_minus_one) { xla::XlaOp input = ctx->Input(0); ctx->SetOutput(0, xla::DynamicSlice(input, begin_indices, size)); } else { xla::PaddingConfig padding_config; xla::XlaOp input = ctx->Input(0); for (int64_t i = 0; i < input_dims; ++i) { auto* dims = padding_config.add_dimensions(); dims->set_edge_padding_low(0); dims->set_edge_padding_high(input_shape.dim_size(i)); dims->set_interior_padding(0); input = xla::RemoveDynamicDimension(input, i); } auto padded_input = xla::Pad(input, xla::Zero(ctx->builder(), ctx->input_xla_type(0)), padding_config); auto sliced = xla::DynamicSlice(padded_input, begin_indices, input_shape.dim_sizes()); for (int i = 0; i < input_dims; i++) { xla::XlaOp dynamic_size = xla::Reshape(xla::Slice(ctx->Input(2), {i}, {i + 1}, {1}), {}); if (constant_size_is_minus_one && size[i] == -1) { dynamic_size = xla::ConstantR0<int32>(ctx->builder(), input_shape.dim_size(i)) - begin_indices[i]; } auto constant_size = ctx->value_inference().AnalyzeConstant( dynamic_size, xla::ValueInferenceMode::kValue); OP_REQUIRES_OK(ctx, constant_size.status()); if (constant_size->AllValid()) { sliced = xla::SliceInDim( sliced, 0, constant_size->Get<int32>({}).value(), 1, i); } else { auto status = xla::SetDimensionSizeWithRebound( &ctx->value_inference(), sliced, dynamic_size, i); OP_REQUIRES_OK(ctx, status.status()); sliced = status.value(); } } ctx->SetOutput(0, sliced); } } } }; REGISTER_XLA_OP(Name("Slice") .CompileTimeConstantInput("begin") .CompileTimeConstantInput("size"), SliceOp); } }
#include <functional> #include <memory> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { template <typename T> static void SliceHelper(::testing::benchmark::State& state) { const int size = state.range(0); Graph* g = new Graph(OpRegistry::Global()); DataType dt = DataTypeToEnum<T>::v(); int kDim = 100; int kMaxSize = 15000; CHECK_LT(size, kMaxSize); Tensor begin(DT_INT32, TensorShape({2})); begin.flat<int32>()(0) = 10; begin.flat<int32>()(1) = 10; Tensor sizes(DT_INT32, TensorShape({2})); sizes.flat<int32>()(0) = kDim; sizes.flat<int32>()(1) = size; Tensor input(dt, TensorShape({2 * kDim, kMaxSize})); input.flat<T>().setRandom(); Node* node; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Slice") .Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, begin)) .Input(test::graph::Constant(g, sizes)) .Attr("T", dt) .Finalize(g, &node)); FixupSourceAndSinkEdges(g); test::Benchmark("cpu", g, nullptr, nullptr, nullptr, "SINGLE_THREADED_EXECUTOR", false) .Run(state); state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim * size * sizeof(T)); } void BM_SliceFloat(::testing::benchmark::State& state) { SliceHelper<float>(state); } BENCHMARK(BM_SliceFloat)->UseRealTime()->Arg(100)->Arg(1000)->Arg(10000); void BM_SliceBFloat16(::testing::benchmark::State& state) { SliceHelper<bfloat16>(state); } BENCHMARK(BM_SliceBFloat16)->UseRealTime()->Arg(100)->Arg(1000)->Arg(10000); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/slice_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/slice_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fe2113da-8f9e-41d6-a72e-7ca1a449fdba
cpp
tensorflow/tensorflow
debug_ops
tensorflow/core/ops/debug_ops.cc
tensorflow/core/kernels/debug_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { REGISTER_OP("Copy") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("tensor_name: string = ''") .Attr("debug_ops_spec: list(string) = []") .SetAllowsUninitializedInput() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("CopyHost") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("tensor_name: string = ''") .Attr("debug_ops_spec: list(string) = []") .SetAllowsUninitializedInput() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("DebugIdentity") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("device_name: string = ''") .Attr("tensor_name: string = ''") .Attr("debug_urls: list(string) = []") .Attr("gated_grpc: bool = false") .SetAllowsUninitializedInput() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("DebugIdentityV3") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("device_name: string = ''") .Attr("tensor_name: string = ''") .Attr("io_of_node: string = ''") .Attr("is_input: bool = false") .Attr("io_index: int = -1") .SetIsStateful() .Attr("debug_urls: list(string) = []") .Attr("gated_grpc: bool = false") .SetAllowsUninitializedInput() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("DebugNanCount") .Input("input: T") .Output("output: int64") .Attr("T: type") .Attr("device_name: string = ''") .Attr("tensor_name: string = ''") .Attr("debug_urls: list(string) = []") .Attr("gated_grpc: bool = false") .SetAllowsUninitializedInput() .SetShapeFn(shape_inference::ScalarShape); REGISTER_OP("DebugNumericSummary") .Input("input: T") .Output("output: double") .Attr("T: type") .Attr("device_name: string = ''") .Attr("tensor_name: string = ''") .Attr("debug_urls: list(string) = []") .Attr("lower_bound: float = -inf") .Attr("upper_bound: float = inf") .Attr("mute_if_healthy: bool = false") .Attr("gated_grpc: bool = false") .SetAllowsUninitializedInput() .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("DebugIdentityV2") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("tfdbg_context_id: string = ''") .Attr("op_name: string = ''") .Attr("output_slot: int = -1") .Attr("tensor_debug_mode: int = -1") .Attr("debug_urls: list(string) = []") .Attr("circular_buffer_size: int = 1000") .Attr("tfdbg_run_id: string = ''") .SetIsStateful() .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("DebugNumericSummaryV2") .Input("input: T") .Output("output: output_dtype") .Attr("output_dtype: {float32, float64} = DT_FLOAT") .Attr("T: type") .Attr("tensor_debug_mode: int = -1") .Attr("tensor_id: int = -1") .SetShapeFn(shape_inference::UnknownShape); }
#include <string.h> #include <fstream> #include <vector> #include "tensorflow/core/debug/debug_io_utils.h" #include "tensorflow/core/debug/debug_node_key.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/util/event.pb.h" namespace tensorflow { class DebugIdentityOpTest : public OpsTestBase { protected: Status Init(DataType input_type, const std::vector<string>& debug_urls) { env_ = Env::Default(); TF_CHECK_OK(NodeDefBuilder("op", "DebugIdentity") .Input(FakeInput(input_type)) .Attr("tensor_name", "FakeTensor:0") .Attr("debug_urls", debug_urls) .Finalize(node_def())); return InitOp(); } Status Init(DataType input_type) { std::vector<string> empty_debug_urls; return Init(input_type, empty_debug_urls); } Env* env_; }; TEST_F(DebugIdentityOpTest, Int32Success_6) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(DebugIdentityOpTest, Int32Success_6_FileURLs) { const int kNumDumpDirs = 3; const string tmp_dir = testing::TmpDir(); std::vector<string> dump_roots; std::vector<string> debug_urls; for (int i = 0; i < kNumDumpDirs; ++i) { const string dump_root = strings::StrCat(tmp_dir, "_", i); dump_roots.push_back(dump_root); debug_urls.push_back(strings::StrCat("file: } uint64 wall_time = Env::Default()->NowMicros(); TF_ASSERT_OK(Init(DT_INT32, debug_urls)); AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); for (int i = 0; i < kNumDumpDirs; ++i) { ASSERT_TRUE(env_->FileExists(dump_roots[i]).ok()); ASSERT_TRUE(env_->IsDirectory(dump_roots[i]).ok()); std::vector<string> device_roots; FileSystem* fs = nullptr; TF_ASSERT_OK(Env::Default()->GetFileSystemForFile(dump_roots[i], &fs)); std::vector<string> children; TF_ASSERT_OK(fs->GetChildren(dump_roots[i], &children)); const string kDeviceDirPrefix = strings::StrCat( DebugNodeKey::kMetadataFilePrefix, DebugNodeKey::kDeviceTag); for (const string child : children) { if (!strncmp(child.c_str(), kDeviceDirPrefix.c_str(), kDeviceDirPrefix.size())) { device_roots.push_back(io::JoinPath(dump_roots[i], child)); } } ASSERT_EQ(1, device_roots.size()); const string& device_root = device_roots[0]; TF_ASSERT_OK(Env::Default()->GetFileSystemForFile(device_root, &fs)); TF_ASSERT_OK(fs->GetChildren(device_root, &children)); int dump_files_found = 0; for (const string child : children) { dump_files_found++; const string dump_file_path = io::JoinPath(device_root, child); std::fstream ifs(dump_file_path, std::ios::in | std::ios::binary); Event event; event.ParseFromIstream(&ifs); ifs.close(); ASSERT_GE(event.wall_time(), wall_time); ASSERT_EQ(1, event.summary().value().size()); ASSERT_EQ(strings::StrCat("FakeTensor", ":", 0, ":", "DebugIdentity"), event.summary().value(0).node_name()); Tensor tensor_prime(DT_INT32); ASSERT_TRUE(tensor_prime.FromProto(event.summary().value(0).tensor())); ASSERT_EQ(TensorShape({6}), tensor_prime.shape()); for (int j = 0; j < 6; ++j) { ASSERT_EQ(j + 1, tensor_prime.flat<int32>()(j)); } } ASSERT_EQ(1, dump_files_found); int64_t undeleted_files = 0; int64_t undeleted_dirs = 0; ASSERT_TRUE(env_->DeleteRecursively(dump_roots[i], &undeleted_files, &undeleted_dirs) .ok()); ASSERT_EQ(0, undeleted_files); ASSERT_EQ(0, undeleted_dirs); } } TEST_F(DebugIdentityOpTest, Int32Success_2_3) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2, 3})); test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6}); test::ExpectTensorEqual<int32>(expected, *GetOutput(0)); } TEST_F(DebugIdentityOpTest, StringSuccess) { TF_ASSERT_OK(Init(DT_STRING)); AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({6})); test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } class DebugNanCountOpTest : public OpsTestBase { protected: Status Init(DataType input_type) { TF_CHECK_OK(NodeDefBuilder("op", "DebugNanCount") .Input(FakeInput(input_type)) .Attr("tensor_name", "FakeTensor:0") .Finalize(node_def())); return InitOp(); } }; TEST_F(DebugNanCountOpTest, Float_has_NaNs) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>(TensorShape({6}), {1.1, std::numeric_limits<float>::quiet_NaN(), 3.3, std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), 6.6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1})); test::FillValues<int64_t>(&expected_nan_count, {3}); test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0)); } TEST_F(DebugNanCountOpTest, Float_no_NaNs) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>( TensorShape({6}), {1.1, 2.2, 3.3, std::numeric_limits<float>::infinity(), 5.5, 6.6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1})); test::FillValues<int64_t>(&expected_nan_count, {0}); test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0)); } TEST_F(DebugNanCountOpTest, Double_has_NaNs) { TF_ASSERT_OK(Init(DT_DOUBLE)); AddInputFromArray<double>(TensorShape({6}), {1.1, std::numeric_limits<double>::quiet_NaN(), 3.3, std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN(), 6.6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1})); test::FillValues<int64_t>(&expected_nan_count, {3}); test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0)); } TEST_F(DebugNanCountOpTest, Double_no_NaNs) { TF_ASSERT_OK(Init(DT_DOUBLE)); AddInputFromArray<double>( TensorShape({6}), {1.1, 2.2, 3.3, std::numeric_limits<double>::infinity(), 5.5, 6.6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1})); test::FillValues<int64_t>(&expected_nan_count, {0}); test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0)); } class DebugNumericSummaryOpTest : public OpsTestBase { protected: Status Init(DataType input_type) { TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary") .Input(FakeInput(input_type)) .Attr("tensor_name", "FakeTensor:0") .Finalize(node_def())); return InitOp(); } Status InitGated(DataType input_type, const std::vector<string>& debug_urls) { TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary") .Input(FakeInput(input_type)) .Attr("tensor_name", "FakeTensor:0") .Attr("gated_grpc", true) .Attr("debug_urls", debug_urls) .Finalize(node_def())); return InitOp(); } #if defined(PLATFORM_GOOGLE) void ClearEnabledWatchKeys() { DebugGrpcIO::ClearEnabledWatchKeys(); } #endif }; TEST_F(DebugNumericSummaryOpTest, Float_full_house) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>( TensorShape({18}), {std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), 0.0f, 0.0f, 0.0f, -1.0f, -3.0f, 3.0f, 7.0f, -std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({15})); test::FillValues<double>( &expected, {1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714285714, 8.97959183673, static_cast<double>(DT_FLOAT), 1.0, 18.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, Double_full_house) { TF_ASSERT_OK(Init(DT_DOUBLE)); AddInputFromArray<double>( TensorShape({18}), {std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN(), 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -std::numeric_limits<double>::infinity(), -std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity(), std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN()}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({15})); test::FillValues<double>( &expected, {1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714285714, 8.97959183673, static_cast<double>(DT_DOUBLE), 1.0, 18.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, Float_only_valid_values) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>(TensorShape({2, 3}), {0.0f, 0.0f, -1.0f, 3.0f, 3.0f, 7.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({16})); test::FillValues<double>( &expected, {1.0, 6.0, 0.0, 0.0, 1.0, 2.0, 3.0, 0.0, -1.0, 7.0, 2.0, 7.33333333333, static_cast<double>(DT_FLOAT), 2.0, 2.0, 3.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, Float_all_Inf_or_NaN) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>(TensorShape({3, 3}), {std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), -std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()}); TF_ASSERT_OK(RunOpKernel()); Tensor output_tensor = *GetOutput(0); const double* output = output_tensor.template flat<double>().data(); ASSERT_NEAR(1.0, output[0], 1e-8); ASSERT_NEAR(9.0, output[1], 1e-8); ASSERT_NEAR(4.0, output[2], 1e-8); ASSERT_NEAR(2.0, output[3], 1e-8); ASSERT_NEAR(0.0, output[4], 1e-8); ASSERT_NEAR(0.0, output[5], 1e-8); ASSERT_NEAR(0.0, output[6], 1e-8); ASSERT_NEAR(3.0, output[7], 1e-8); ASSERT_EQ(std::numeric_limits<float>::infinity(), output[8]); ASSERT_EQ(-std::numeric_limits<float>::infinity(), output[9]); ASSERT_TRUE(Eigen::numext::isnan(output[10])); ASSERT_TRUE(Eigen::numext::isnan(output[11])); ASSERT_EQ(static_cast<double>(DT_FLOAT), output[12]); ASSERT_EQ(2.0, output[13]); ASSERT_EQ(3.0, output[14]); ASSERT_EQ(3.0, output[15]); } TEST_F(DebugNumericSummaryOpTest, Many_dimensions_tensor_shape) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>(TensorShape({1, 3, 1, 1, 1, 1, 1}), {std::numeric_limits<float>::quiet_NaN(), -std::numeric_limits<float>::infinity(), -8.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({21})); test::FillValues<double>(&expected, {1.0, 3.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, -8.0, -8.0, -8.0, 0.0, static_cast<double>(DT_FLOAT), 7.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, Scalar_tensor_shape) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>(TensorShape({}), {42.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({14})); test::FillValues<double>(&expected, {1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 42.0, 42.0, 42.0, 0.0, static_cast<double>(DT_FLOAT), 0.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, Int16Success) { TF_ASSERT_OK(Init(DT_INT16)); AddInputFromArray<int16>(TensorShape({4, 1}), {-1, -3, 3, 7}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({16})); test::FillValues<double>(&expected, {1.0, 4.0, 0.0, 0.0, 2.0, 0.0, 2.0, 0.0, -3.0, 7.0, 1.5, 14.75, static_cast<double>(DT_INT16), 2.0, 4.0, 1.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, Int32Success) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({2, 3}), {0, 0, -1, 3, 3, 7}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({16})); test::FillValues<double>( &expected, {1.0, 6.0, 0.0, 0.0, 1.0, 2.0, 3.0, 0.0, -1.0, 7.0, 2.0, 7.33333333333, static_cast<double>(DT_INT32), 2.0, 2.0, 3.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, Int64Success) { TF_ASSERT_OK(Init(DT_INT64)); AddInputFromArray<int64_t>(TensorShape({2, 2, 2}), {0, 0, -1, 3, 3, 7, 0, 0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({17})); test::FillValues<double>(&expected, {1.0, 8.0, 0.0, 0.0, 1.0, 4.0, 3.0, 0.0, -1.0, 7.0, 1.5, 6.25, static_cast<double>(DT_INT64), 3.0, 2.0, 2.0, 2.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, UInt8Success) { TF_ASSERT_OK(Init(DT_UINT8)); AddInputFromArray<uint8>(TensorShape({1, 5}), {0, 10, 30, 30, 70}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({16})); test::FillValues<double>(&expected, {1.0, 5.0, 0.0, 0.0, 0.0, 1.0, 4.0, 0.0, 0.0, 70.0, 28.0, 576.0, static_cast<double>(DT_UINT8), 2.0, 1.0, 5.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, BoolSuccess) { TF_ASSERT_OK(Init(DT_BOOL)); AddInputFromArray<bool>(TensorShape({2, 3}), {false, false, true, true, true, false}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({16})); test::FillValues<double>(&expected, {1.0, 6.0, 0.0, 0.0, 0.0, 3.0, 3.0, 0.0, 0.0, 1.0, 0.5, 0.25, static_cast<double>(DT_BOOL), 2.0, 2.0, 3.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } #if defined(PLATFORM_GOOGLE) TEST_F(DebugNumericSummaryOpTest, DisabledDueToEmptyEnabledSet) { ClearEnabledWatchKeys(); std::vector<string> debug_urls({"grpc: TF_ASSERT_OK(InitGated(DT_FLOAT, debug_urls)); AddInputFromArray<float>(TensorShape({2, 2}), {1.0, 3.0, 3.0, 7.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_disabled(allocator(), DT_DOUBLE, TensorShape({0})); test::ExpectTensorNear<double>(expected_disabled, *GetOutput(0), 1e-8); } TEST_F(DebugNumericSummaryOpTest, DisabledDueToNonMatchingWatchKey) { ClearEnabledWatchKeys(); DebugGrpcIO::SetDebugNodeKeyGrpcState( "grpc: EventReply::DebugOpStateChange::READ_ONLY); std::vector<string> debug_urls({"grpc: TF_ASSERT_OK(InitGated(DT_FLOAT, debug_urls)); AddInputFromArray<float>(TensorShape({2, 2}), {1.0, 3.0, 3.0, 7.0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_disabled(allocator(), DT_DOUBLE, TensorShape({0})); test::ExpectTensorNear<double>(expected_disabled, *GetOutput(0), 1e-8); } #endif class DebugNumericSummaryOpCustomLowerBoundTest : public OpsTestBase { protected: Status Init(DataType input_type) { TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary") .Input(FakeInput(input_type)) .Attr("tensor_name", "FakeTensor:0") .Attr("lower_bound", -1.2f) .Finalize(node_def())); return InitOp(); } }; TEST_F(DebugNumericSummaryOpCustomLowerBoundTest, Float_full_house) { TF_ASSERT_OK(Init(DT_FLOAT)); AddInputFromArray<float>( TensorShape({18}), {std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), 0.0f, 0.0f, 0.0f, -1.0f, -3.0f, 3.0f, 7.0f, -std::numeric_limits<float>::infinity(), -std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({15})); test::FillValues<double>( &expected, {1.0, 18.0, 4.0, 3.0, 1.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714285714, 8.97959183673, static_cast<double>(DT_FLOAT), 1.0, 18.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } class DebugNumericSummaryOpCustomLowerUpperBoundsTest : public OpsTestBase { protected: Status Init(DataType input_type) { TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary") .Input(FakeInput(input_type)) .Attr("tensor_name", "FakeTensor:0") .Attr("lower_bound", -0.5f) .Attr("upper_bound", 3.6f) .Finalize(node_def())); return InitOp(); } }; TEST_F(DebugNumericSummaryOpCustomLowerUpperBoundsTest, Int32Success) { TF_ASSERT_OK(Init(DT_INT32)); AddInputFromArray<int32>(TensorShape({2, 3}), {0, 0, -1, 3, 3, 7}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_DOUBLE, TensorShape({16})); test::FillValues<double>( &expected, {1.0, 6.0, 0.0, 1.0, 0.0, 2.0, 2.0, 1.0, -1.0, 7.0, 2.0, 7.33333333333, static_cast<double>(DT_INT32), 2.0, 2.0, 3.0}); test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/debug_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/debug_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3237f930-4e86-4d74-98a8-7753c2534621
cpp
tensorflow/tensorflow
summary_audio_op
tensorflow/core/kernels/summary_audio_op.cc
tensorflow/core/kernels/summary_audio_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/wav/wav_io.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { class SummaryAudioOp : public OpKernel { public: explicit SummaryAudioOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_outputs", &max_outputs_)); OP_REQUIRES(context, max_outputs_ > 0, errors::InvalidArgument("max_outputs must be > 0")); has_sample_rate_attr_ = context->GetAttr("sample_rate", &sample_rate_attr_).ok(); } void Compute(OpKernelContext* c) override { const Tensor& tag = c->input(0); const Tensor& tensor = c->input(1); OP_REQUIRES(c, TensorShapeUtils::IsScalar(tag.shape()), errors::InvalidArgument("Tag must be a scalar")); OP_REQUIRES(c, tensor.dims() >= 2 && tensor.dims() <= 3, errors::InvalidArgument("Tensor must be 3-D or 2-D, got: ", tensor.shape().DebugString())); const string& base_tag = tag.scalar<tstring>()(); float sample_rate = sample_rate_attr_; if (!has_sample_rate_attr_) { const Tensor& sample_rate_tensor = c->input(2); OP_REQUIRES(c, sample_rate_tensor.IsAligned() && sample_rate_tensor.NumElements() == 1, errors::InvalidArgument( "sample_rate must be rank-0 or contain a single value")); sample_rate = sample_rate_tensor.scalar<float>()(); } OP_REQUIRES(c, sample_rate > 0.0f, errors::InvalidArgument("sample_rate must be > 0")); const int batch_size = tensor.dim_size(0); const int64_t length_frames = tensor.dim_size(1); const int64_t num_channels = tensor.dims() == 2 ? 1 : tensor.dim_size(tensor.dims() - 1); Summary s; const int N = std::min<int>(max_outputs_, batch_size); for (int i = 0; i < N; ++i) { Summary::Value* v = s.add_value(); if (max_outputs_ > 1) { v->set_tag(strings::StrCat(base_tag, "/audio/", i)); } else { v->set_tag(strings::StrCat(base_tag, "/audio")); } Summary::Audio* sa = v->mutable_audio(); sa->set_sample_rate(sample_rate); sa->set_num_channels(num_channels); sa->set_length_frames(length_frames); sa->set_content_type("audio/wav"); auto values = tensor.shaped<float, 3>({batch_size, length_frames, num_channels}); const float* data = tensor.NumElements() == 0 ? nullptr : &values(i, 0, 0); size_t sample_rate_truncated = lrintf(sample_rate); if (sample_rate_truncated == 0) { sample_rate_truncated = 1; } OP_REQUIRES_OK(c, wav::EncodeAudioAsS16LEWav( data, sample_rate_truncated, num_channels, length_frames, sa->mutable_encoded_audio_string())); } Tensor* summary_tensor = nullptr; OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor)); CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()())); } private: int max_outputs_; bool has_sample_rate_attr_; float sample_rate_attr_; }; REGISTER_KERNEL_BUILDER(Name("AudioSummaryV2").Device(DEVICE_CPU), SummaryAudioOp); REGISTER_KERNEL_BUILDER(Name("AudioSummary").Device(DEVICE_CPU), SummaryAudioOp); }
#include <functional> #include <memory> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/histogram/histogram.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { static void EXPECT_SummaryMatches(const Summary& actual, const string& expected_str) { Summary expected; CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected)); EXPECT_EQ(expected.DebugString(), actual.DebugString()); } class SummaryAudioOpTest : public OpsTestBase { protected: void MakeOp(const int max_outputs) { TF_ASSERT_OK(NodeDefBuilder("myop", "AudioSummaryV2") .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput()) .Attr("max_outputs", max_outputs) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } void CheckAndRemoveEncodedAudio(Summary* summary) { for (int i = 0; i < summary->value_size(); ++i) { Summary::Value* value = summary->mutable_value(i); ASSERT_TRUE(value->has_audio()) << "No audio for value: " << value->tag(); ASSERT_FALSE(value->audio().encoded_audio_string().empty()) << "No encoded_audio_string for value: " << value->tag(); if (VLOG_IS_ON(2)) { TF_CHECK_OK(WriteStringToFile( Env::Default(), strings::StrCat("/tmp/", value->tag(), ".wav"), value->audio().encoded_audio_string())); } value->mutable_audio()->clear_encoded_audio_string(); } } }; TEST_F(SummaryAudioOpTest, Basic3D) { const float kSampleRate = 44100.0f; const int kMaxOutputs = 3; MakeOp(kMaxOutputs); AddInputFromArray<tstring>(TensorShape({}), {"tag"}); AddInputFromArray<float>(TensorShape({4, 2, 2}), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}); AddInputFromArray<float>(TensorShape({}), {kSampleRate}); TF_ASSERT_OK(RunOpKernel()); Tensor* out_tensor = GetOutput(0); ASSERT_EQ(0, out_tensor->dims()); Summary summary; ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()()); CheckAndRemoveEncodedAudio(&summary); EXPECT_SummaryMatches(summary, R"( value { tag: 'tag/audio/0' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 length_frames: 2 } } value { tag: 'tag/audio/1' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 length_frames: 2 } } value { tag: 'tag/audio/2' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 length_frames: 2 } } )"); } TEST_F(SummaryAudioOpTest, Basic2D) { const float kSampleRate = 44100.0f; const int kMaxOutputs = 3; MakeOp(kMaxOutputs); AddInputFromArray<tstring>(TensorShape({}), {"tag"}); AddInputFromArray<float>(TensorShape({4, 4}), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}); AddInputFromArray<float>(TensorShape({}), {kSampleRate}); TF_ASSERT_OK(RunOpKernel()); Tensor* out_tensor = GetOutput(0); ASSERT_EQ(0, out_tensor->dims()); Summary summary; ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()()); CheckAndRemoveEncodedAudio(&summary); EXPECT_SummaryMatches(summary, R"( value { tag: 'tag/audio/0' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 length_frames: 4 } } value { tag: 'tag/audio/1' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 length_frames: 4 } } value { tag: 'tag/audio/2' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 length_frames: 4 } } )"); } TEST_F(SummaryAudioOpTest, ZeroLength) { const float kSampleRate = 44100.0f; const int kMaxOutputs = 3; MakeOp(kMaxOutputs); AddInputFromArray<tstring>(TensorShape({}), {"tag"}); AddInputFromArray<float>(TensorShape({4, 0}), {}); AddInputFromArray<float>(TensorShape({}), {kSampleRate}); TF_ASSERT_OK(RunOpKernel()); Tensor* out_tensor = GetOutput(0); ASSERT_EQ(0, out_tensor->dims()); Summary summary; ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()()); CheckAndRemoveEncodedAudio(&summary); EXPECT_SummaryMatches(summary, R"( value { tag: 'tag/audio/0' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 length_frames: 0 } } value { tag: 'tag/audio/1' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 length_frames: 0 } } value { tag: 'tag/audio/2' audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 length_frames: 0 } } )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_audio_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_audio_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8f1fc3e6-a0f2-4449-b1a7-b42f92f2e61b
cpp
tensorflow/tensorflow
xent_op
tensorflow/core/kernels/xent_op.cc
tensorflow/core/kernels/xent_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/xent_op.h" #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/util/bcast.h" #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/env_var.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Device, typename T> class SoftmaxXentWithLogitsOp : public OpKernel { public: explicit SoftmaxXentWithLogitsOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& logits_in = context->input(0); const Tensor& labels_in = context->input(1); TensorShape shape_in = logits_in.shape(); BCast bcast(BCast::FromShape(logits_in.shape()), BCast::FromShape(labels_in.shape()), false); if (!logits_in.IsSameSize(labels_in)) { OP_REQUIRES(context, bcast.IsValid(), errors::InvalidArgument( "logits and labels must be broadcastable: logits_size=", logits_in.shape().DebugString(), " labels_size=", labels_in.shape().DebugString())); shape_in = BCast::ToShape(bcast.output_shape()); } OP_REQUIRES(context, TensorShapeUtils::IsMatrix(shape_in), errors::InvalidArgument("logits and labels must be either " "2-dimensional, or broadcasted to be " "2-dimensional")); if (std::is_same<Device, GPUDevice>::value) { OP_REQUIRES(context, !OpDeterminismRequired(), errors::Unimplemented( "The GPU implementation of SoftmaxCrossEntropyWithLogits" " that would have been executed is not deterministic." " Note that the Python API uses an alternative," " deterministic, GPU-accelerated path when determinism is" " enabled.")); } Tensor scratch; OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, TensorShape({shape_in.dim_size(0), 1}), &scratch)); Tensor* loss_out = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({shape_in.dim_size(0)}), &loss_out)); Tensor* back_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 1, shape_in, &back_out)); if (shape_in.dim_size(0) > 0) { functor::XentFunctor<Device, T> functor; functor(context->eigen_device<Device>(), shape_in.AsEigenDSizes<2>(), BCast::ToIndexArray<2>(bcast.x_bcast()), BCast::ToIndexArray<2>(bcast.y_bcast()), logits_in.template shaped<T, 2>(bcast.x_reshape()), labels_in.template shaped<T, 2>(bcast.y_reshape()), scratch.matrix<T>(), loss_out->vec<T>(), back_out->matrix<T>()); } } }; namespace functor { template <typename Device, typename T> struct XentFunctorBase { void operator()(const Device& d, const Eigen::DSizes<Eigen::DenseIndex, 2>& shape, const Eigen::array<Eigen::DenseIndex, 2>& logits_bcast, const Eigen::array<Eigen::DenseIndex, 2>& labels_bcast, typename TTypes<T>::ConstMatrix logits, typename TTypes<T>::ConstMatrix labels, typename TTypes<T>::Matrix scratch, typename TTypes<T>::Vec loss, typename TTypes<T>::Matrix backprop) { if (shape[0] > 0) { XentEigenImpl<Device, T>::Compute(d, shape, logits_bcast, labels_bcast, logits, labels, scratch, loss, backprop); } } }; template <typename T> struct XentFunctor<CPUDevice, T> : XentFunctorBase<CPUDevice, T> {}; } #define REGISTER_CPU(T) \ REGISTER_KERNEL_BUILDER(Name("SoftmaxCrossEntropyWithLogits") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ SoftmaxXentWithLogitsOp<CPUDevice, T>); TF_CALL_half(REGISTER_CPU); TF_CALL_float(REGISTER_CPU); TF_CALL_double(REGISTER_CPU); TF_CALL_bfloat16(REGISTER_CPU); #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) #define REGISTER_GPU(T) \ REGISTER_KERNEL_BUILDER(Name("SoftmaxCrossEntropyWithLogits") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T"), \ SoftmaxXentWithLogitsOp<GPUDevice, T>); TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU); #endif }
#include "tensorflow/core/kernels/xent_op.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { template <class T> static Graph* Xent(int batch_size, int num_classes, DataType type) { Graph* g = new Graph(OpRegistry::Global()); Tensor logits(type, TensorShape({batch_size, num_classes})); logits.flat<T>().setRandom(); Tensor labels(type, TensorShape({batch_size, num_classes})); labels.flat<T>().setRandom(); test::graph::Binary(g, "SoftmaxCrossEntropyWithLogits", test::graph::Constant(g, logits), test::graph::Constant(g, labels)); return g; } #define BM_XentDev(BATCH, CLASS, DEVICE, C_TYPE, TF_TYPE) \ static void BM_Xent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, Xent<C_TYPE>(BATCH, CLASS, TF_TYPE), \ false) \ .Run(state); \ const int64_t tot = \ static_cast<int64_t>(state.iterations()) * BATCH * CLASS; \ state.SetItemsProcessed(tot); \ state.SetBytesProcessed(tot * sizeof(C_TYPE)); \ } \ BENCHMARK(BM_Xent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE)->UseRealTime() #ifdef GOOGLE_CUDA BM_XentDev(16, 10000, gpu, float, DT_FLOAT); BM_XentDev(16, 30000, gpu, float, DT_FLOAT); BM_XentDev(16, 100000, gpu, float, DT_FLOAT); BM_XentDev(32, 10000, gpu, float, DT_FLOAT); BM_XentDev(32, 30000, gpu, float, DT_FLOAT); BM_XentDev(32, 100000, gpu, float, DT_FLOAT); BM_XentDev(64, 10000, gpu, float, DT_FLOAT); BM_XentDev(64, 30000, gpu, float, DT_FLOAT); BM_XentDev(64, 100000, gpu, float, DT_FLOAT); #endif #define BM_XentDev_CPU(C_TYPE, TF_TYPE) \ BM_XentDev(1, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(2, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(4, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(8, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(16, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(32, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(64, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(128, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(256, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(512, 10000, cpu, C_TYPE, TF_TYPE); \ BM_XentDev(1024, 10000, cpu, C_TYPE, TF_TYPE) BM_XentDev_CPU(float, DT_FLOAT); BM_XentDev_CPU(bfloat16, DT_BFLOAT16); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/xent_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/xent_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7d0c65d5-2cb4-4db6-a272-128a033d924c
cpp
tensorflow/tensorflow
dynamic_partition_op
tensorflow/compiler/tf2xla/kernels/dynamic_partition_op.cc
tensorflow/core/kernels/dynamic_partition_op_test.cc
#include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "tensorflow/compiler/tf2xla/literal_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/comparison_util.h" #include "xla/hlo/builder/lib/arithmetic.h" #include "xla/hlo/builder/lib/comparators.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/ops_util.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { class DynamicPartitionOp : public XlaOpKernel { public: explicit DynamicPartitionOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("num_partitions", &num_partitions_)); } xla::XlaOp CountS32(XlaOpKernelContext* ctx, xla::XlaOp input, int64_t target) { xla::XlaOp equal_dim = xla::Compare(input, xla::ConstantR0<int32>(ctx->builder(), target), {}, xla::ComparisonDirection::kEq); xla::XlaOp casted = xla::ConvertElementType(equal_dim, xla::S32); return xla::ReduceAll( casted, xla::Zero(ctx->builder(), xla::S32), xla::CreateScalarAddComputation(xla::S32, ctx->builder())); } std::pair<std::vector<xla::XlaOp>, std::vector<xla::XlaOp>> DynamicPartition1D(XlaOpKernelContext* ctx, xla::XlaOp data_1d, xla::XlaOp partitions_1d, const xla::Shape& data_1d_shape, const xla::Shape& partition_1d_shape) { int64_t input_count = data_1d_shape.dimensions(0); std::vector<xla::XlaOp> to_sort = {partitions_1d, data_1d}; std::vector<xla::PrimitiveType> types_to_sort = { partition_1d_shape.element_type(), data_1d_shape.element_type()}; xla::XlaOp sorted = xla::Sort( to_sort, xla::CreateScalarLtComputation(types_to_sort, ctx->builder()), 0, true); xla::XlaOp sorted_partitions = xla::GetTupleElement(sorted, 0); xla::XlaOp sorted_data = xla::GetTupleElement(sorted, 1); std::vector<xla::XlaOp> partition_length(num_partitions_); std::vector<xla::XlaOp> partition_start(num_partitions_); xla::XlaOp count_so_far = xla::Zero(ctx->builder(), xla::S32); for (int64_t i = 0; i < num_partitions_; ++i) { xla::XlaOp count = CountS32(ctx, sorted_partitions, i); partition_length[i] = count; partition_start[i] = count_so_far; count_so_far = xla::Add(count_so_far, count); } xla::PaddingConfig padding_config; auto* dims = padding_config.add_dimensions(); dims->set_edge_padding_low(0); dims->set_edge_padding_high(input_count); dims->set_interior_padding(0); auto padded_data = xla::Pad(sorted_data, xla::Zero(ctx->builder(), ctx->input_xla_type(0)), padding_config); std::vector<xla::XlaOp> output(num_partitions_); for (int64_t i = 0; i < num_partitions_; ++i) { padded_data = xla::RemoveDynamicDimension(padded_data, 0); auto sliced = xla::DynamicSlice(padded_data, {partition_start[i]}, {input_count}); output[i] = sliced; } return {output, partition_length}; } void Compile(XlaOpKernelContext* ctx) override { xla::Shape data_shape = ctx->InputXlaShape(0).value(); xla::Shape partition_shape = ctx->InputXlaShape(1).value(); xla::XlaOp data = ctx->Input(0); xla::XlaOp partitions = ctx->Input(1); std::vector<int64_t> partitions_static; bool partitions_are_static = ctx->ConstantInputReshapedToIntVector(1, &partitions_static).ok(); if (data_shape.rank() > partition_shape.rank()) { std::vector<int64_t> broadcasted_dims; auto rank = partition_shape.rank(); broadcasted_dims.reserve(rank); for (int64_t i = 0; i < rank; ++i) { broadcasted_dims.push_back(i); } partitions = xla::BroadcastInDim(partitions, data_shape.dimensions(), broadcasted_dims); } std::vector<int64_t> output_shape_bound_dims; output_shape_bound_dims.push_back( xla::ShapeUtil::ElementsIn(partition_shape)); int64_t count_diff = 1; for (int64_t i = partition_shape.rank(); i < data_shape.rank(); ++i) { output_shape_bound_dims.push_back(data_shape.dimensions(i)); count_diff *= data_shape.dimensions(i); } int64_t input_count = xla::ShapeUtil::ElementsIn(data_shape); auto data_1d = xla::Reshape(data, {input_count}); auto partitions_1d = xla::Reshape(partitions, {input_count}); xla::Shape data_1d_shape = xla::ShapeUtil::MakeShape(data_shape.element_type(), {input_count}); xla::Shape partitions_1d_shape = xla::ShapeUtil::MakeShape( partition_shape.element_type(), {input_count}); std::vector<xla::XlaOp> output, partition_length; std::tie(output, partition_length) = DynamicPartition1D( ctx, data_1d, partitions_1d, data_1d_shape, partitions_1d_shape); for (int64_t i = 0; i < num_partitions_; ++i) { auto reshape = xla::Reshape(output[i], output_shape_bound_dims); if (partitions_are_static) { int64_t size = absl::c_count(partitions_static, i); ctx->SetOutput(i, xla::SliceInDim(reshape, 0, size, 1, 0)); } else { xla::XlaOp length; if (count_diff != 0) { length = xla::Div(partition_length[i], xla::ConstantR0<int32>(ctx->builder(), count_diff)); } else { length = CountS32(ctx, ctx->Input(1), i); } ctx->SetOutput(i, xla::SetDimensionSize(reshape, length, 0)); } } } private: int64_t num_partitions_; }; REGISTER_XLA_OP(Name("DynamicPartition"), DynamicPartitionOp); } }
#include <functional> #include <memory> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class DynamicPartitionOpTest : public OpsTestBase { protected: void MakeOp() { TF_ASSERT_OK(NodeDefBuilder("myop", "DynamicPartition") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("num_partitions", 4) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(DynamicPartitionOpTest, Simple_OneD) { MakeOp(); AddInputFromArray<float>(TensorShape({6}), {0, 13, 2, 39, 4, 17}); AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 3, 2, 1}); TF_ASSERT_OK(RunOpKernel()); { Tensor expected(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected, {0, 13}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } { Tensor expected(allocator(), DT_FLOAT, TensorShape({1})); test::FillValues<float>(&expected, {17}); test::ExpectTensorEqual<float>(expected, *GetOutput(1)); } { Tensor expected(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected, {2, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(2)); } { Tensor expected(allocator(), DT_FLOAT, TensorShape({1})); test::FillValues<float>(&expected, {39}); test::ExpectTensorEqual<float>(expected, *GetOutput(3)); } } TEST_F(DynamicPartitionOpTest, Simple_TwoD) { MakeOp(); AddInputFromArray<float>( TensorShape({6, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}); AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 3, 2, 1}); TF_ASSERT_OK(RunOpKernel()); { Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected, {0, 1, 2, 3, 4, 5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } { Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected, {15, 16, 17}); test::ExpectTensorEqual<float>(expected, *GetOutput(1)); } { Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected, {6, 7, 8, 12, 13, 14}); test::ExpectTensorEqual<float>(expected, *GetOutput(2)); } { Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected, {9, 10, 11}); test::ExpectTensorEqual<float>(expected, *GetOutput(3)); } } TEST_F(DynamicPartitionOpTest, SomeOutputsEmpty) { MakeOp(); AddInputFromArray<float>(TensorShape({6}), {0, 13, 2, 39, 4, 17}); AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 2, 0, 2}); TF_ASSERT_OK(RunOpKernel()); TensorShape empty_one_dim; empty_one_dim.AddDim(0); Tensor expected_empty(allocator(), DT_FLOAT, empty_one_dim); { Tensor expected(allocator(), DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected, {0, 13, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } { test::ExpectTensorEqual<float>(expected_empty, *GetOutput(1)); } { Tensor expected(allocator(), DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected, {2, 39, 17}); test::ExpectTensorEqual<float>(expected, *GetOutput(2)); } { test::ExpectTensorEqual<float>(expected_empty, *GetOutput(3)); } } TEST_F(DynamicPartitionOpTest, Error_IndexOutOfRange) { MakeOp(); AddInputFromArray<float>(TensorShape({5, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int32>(TensorShape({5}), {0, 2, 99, 2, 2}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "partitions[2] = 99 is not in [0, 4)")) << s; } Node* DynamicPartitionNode(Graph* g, Node* in0, Node* in1, int num_partitions) { Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "DynamicPartition") .Input(in0) .Input(in1) .Attr("num_partitions", num_partitions) .Finalize(g, &ret)); return ret; } template <typename T> static Graph* DynamicPartition(int num_partitions, int dim) { Graph* g = new Graph(OpRegistry::Global()); const int kRows = ((128 << 20) / sizeof(T)) / dim; Tensor data(DataTypeToEnum<T>::value, TensorShape({kRows, dim})); data.flat<T>().setRandom(); random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); Tensor partitions(DT_INT32, TensorShape({kRows})); for (int i = 0; i < kRows; i++) { partitions.flat<int32>()(i) = rnd.Uniform(num_partitions); } DynamicPartitionNode(g, test::graph::Constant(g, data), test::graph::Constant(g, partitions), num_partitions); return g; } #define BM_DYNAMIC_PARTITION(DEVICE, T, num) \ static void BM_##DEVICE##_dynpart_##T##_##num( \ ::testing::benchmark::State& state) { \ const int dim = state.range(0); \ \ const int64_t items = ((128 << 20) / sizeof(T)); \ test::Benchmark(#DEVICE, DynamicPartition<T>(num, dim), \ false) \ .Run(state); \ const int64_t tot = static_cast<int64_t>(state.iterations()) * items; \ state.SetItemsProcessed(tot); \ } \ BENCHMARK(BM_##DEVICE##_dynpart_##T##_##num)->UseRealTime()->Arg(1)->Arg(256) BM_DYNAMIC_PARTITION(cpu, float, 2); BM_DYNAMIC_PARTITION(cpu, float, 100); BM_DYNAMIC_PARTITION(cpu, double, 2); BM_DYNAMIC_PARTITION(cpu, double, 100); BM_DYNAMIC_PARTITION(cpu, complex64, 2); BM_DYNAMIC_PARTITION(cpu, complex64, 100); BM_DYNAMIC_PARTITION(gpu, int32, 2); BM_DYNAMIC_PARTITION(gpu, int32, 100); BM_DYNAMIC_PARTITION(gpu, int64, 2); BM_DYNAMIC_PARTITION(gpu, int64, 100); BM_DYNAMIC_PARTITION(gpu, float, 2); BM_DYNAMIC_PARTITION(gpu, float, 100); BM_DYNAMIC_PARTITION(gpu, double, 2); BM_DYNAMIC_PARTITION(gpu, double, 100); BM_DYNAMIC_PARTITION(gpu, complex64, 2); BM_DYNAMIC_PARTITION(gpu, complex64, 100); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/dynamic_partition_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/dynamic_partition_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bed7f3c8-e0f0-471f-aa7c-1b5fc1bac020
cpp
tensorflow/tensorflow
checkpoint_callback_manager
tensorflow/core/kernels/checkpoint_callback_manager.cc
tensorflow/core/kernels/checkpoint_callback_manager_test.cc
#include "tensorflow/core/kernels/checkpoint_callback_manager.h" #include <string> #include <utility> #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/stringpiece.h" #include "tsl/platform/regexp.h" namespace tensorflow { namespace checkpoint { const absl::string_view kCheckpointCallbackManagerResourceName = "checkpoint_callback_manager"; namespace { const absl::string_view kCheckpointFileRegex = "^part-[0-9]*-of-[0-9]*"; const absl::string_view kCheckpointTempDirRegex = "-[0-9]*_temp$"; const absl::string_view kCheckpointDirRegex = "-[0-9]*$"; const absl::string_view kCheckpointTempDirSuffix = "_temp"; void TriggerSaveCallbackIfFileNotExist(absl::string_view checkpoint_id, absl::string_view checkpoint_dir, absl::string_view file_extension, SaveCallback callback) { const std::string file_path = io::JoinPath( checkpoint_dir, absl::StrCat(checkpoint_id, ".", file_extension)); if (Env::Default()->FileExists(file_path).ok()) { return; } LOG(INFO) << "Calling a save callback: file_extension = " << file_extension << ", checkpoint_id = " << checkpoint_id; absl::StatusOr<std::string> save_content = callback(checkpoint_id); if (!save_content.ok()) { LOG(WARNING) << save_content.status(); return; } if (save_content->empty()) { return; } Status write_status = WriteStringToFile(Env::Default(), file_path, *save_content); if (!write_status.ok()) { LOG(WARNING) << write_status; } else { LOG(INFO) << "A CheckpointCallbackManager has been written to " << file_path; } } void TriggerRestoreCallbackIfFileExists(absl::string_view checkpoint_id, absl::string_view checkpoint_dir, absl::string_view file_extension, RestoreCallback callback) { const std::string file_path = io::JoinPath( checkpoint_dir, absl::StrCat(checkpoint_id, ".", file_extension)); if (!Env::Default()->FileExists(file_path).ok()) { return; } std::string payload; Status read_status = ReadFileToString(Env::Default(), file_path, &payload); if (!read_status.ok()) { LOG(WARNING) << "Failed to read: " << read_status; return; } LOG(INFO) << "Calling a restore callback: file_extension = " << file_extension << ", checkpoint_id = " << checkpoint_id; Status callback_status = callback(checkpoint_id, payload); if (!callback_status.ok()) { LOG(WARNING) << callback_status; } } } absl::StatusOr<std::pair<std::string, std::string>> CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix( absl::string_view prefix) { for (absl::string_view path = prefix;; path = io::Dirname(path)) { absl::string_view basename = io::Basename(path); if (basename.empty()) break; if (RE2::PartialMatch(basename, kCheckpointFileRegex)) continue; if (RE2::PartialMatch(basename, kCheckpointTempDirRegex)) { return std::make_pair( std::string(basename.substr( 0, basename.length() - kCheckpointTempDirSuffix.length())), std::string(io::Dirname(path))); } if (RE2::PartialMatch(basename, kCheckpointDirRegex)) { return std::make_pair(std::string(basename), std::string(io::Dirname(path))); } } return errors::NotFound( absl::StrCat("Failed to find a checkpoint id. prefix = ", prefix)); } Status CheckpointCallbackManager::RegisterSaveCallback( absl::string_view file_extension, SaveCallback callback) { SaveCallback lazy_callback = nullptr; std::string checkpoint_id; std::string checkpoint_dir; { mutex_lock l(mu_); if (!save_callbacks_.try_emplace(file_extension, std::move(callback)) .second) { return errors::AlreadyExists("A callback already exists."); } if (!last_saved_checkpoint_id_and_dir_.first.empty()) { lazy_callback = save_callbacks_[file_extension]; checkpoint_id = last_saved_checkpoint_id_and_dir_.first; checkpoint_dir = last_saved_checkpoint_id_and_dir_.second; } } if (lazy_callback != nullptr) { TriggerSaveCallbackIfFileNotExist(checkpoint_id, checkpoint_dir, file_extension, lazy_callback); } return absl::OkStatus(); } bool CheckpointCallbackManager::DoesSaveCallbackExist( absl::string_view file_extension) { tf_shared_lock l(mu_); return save_callbacks_.contains(file_extension); } Status CheckpointCallbackManager::RegisterRestoreCallback( absl::string_view file_extension, RestoreCallback callback) { RestoreCallback lazy_callback = nullptr; std::string checkpoint_id; std::string checkpoint_dir; { mutex_lock l(mu_); if (!restore_callbacks_.try_emplace(file_extension, std::move(callback)) .second) { return errors::AlreadyExists("A callback already exists."); } if (!last_restored_checkpoint_id_and_dir_.first.empty()) { lazy_callback = restore_callbacks_[file_extension]; checkpoint_id = last_restored_checkpoint_id_and_dir_.first; checkpoint_dir = last_restored_checkpoint_id_and_dir_.second; } } if (lazy_callback != nullptr) { TriggerRestoreCallbackIfFileExists(checkpoint_id, checkpoint_dir, file_extension, lazy_callback); } return absl::OkStatus(); } bool CheckpointCallbackManager::DoesRestoreCallbackExist( absl::string_view file_extension) { tf_shared_lock l(mu_); return restore_callbacks_.contains(file_extension); } void CheckpointCallbackManager::Save(absl::string_view prefix) { absl::StatusOr<std::pair<std::string, std::string>> id_and_dir = GetCheckpointIdAndPathFromPrefix(prefix); if (!id_and_dir.ok()) { return; } absl::flat_hash_map<std::string, SaveCallback> copy_of_save_callbacks; { mutex_lock l(mu_); last_saved_checkpoint_id_and_dir_ = *id_and_dir; copy_of_save_callbacks = save_callbacks_; } for (const auto& name_and_callback : copy_of_save_callbacks) { TriggerSaveCallbackIfFileNotExist(id_and_dir->first, id_and_dir->second, name_and_callback.first, name_and_callback.second); } } void CheckpointCallbackManager::Restore(absl::string_view prefix) { absl::StatusOr<std::pair<std::string, std::string>> id_and_dir = GetCheckpointIdAndPathFromPrefix(prefix); if (!id_and_dir.ok()) { return; } absl::flat_hash_map<std::string, RestoreCallback> copy_of_restore_callbacks; { mutex_lock l(mu_); if (*id_and_dir == last_restored_checkpoint_id_and_dir_) { return; } last_restored_checkpoint_id_and_dir_ = *id_and_dir; copy_of_restore_callbacks = restore_callbacks_; } for (const auto& name_and_callback : copy_of_restore_callbacks) { TriggerRestoreCallbackIfFileExists(id_and_dir->first, id_and_dir->second, name_and_callback.first, name_and_callback.second); } } } }
#include "tensorflow/core/kernels/checkpoint_callback_manager.h" #include <string> #include <utility> #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/resource_handle.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace checkpoint { namespace { class CheckpointCallbackManagerTest : public ::testing::Test { protected: void SetUp() override { checkpoint_callback_manager_ = new CheckpointCallbackManager(); handle_ = ResourceHandle::MakeRefCountingHandle( checkpoint_callback_manager_, "cpu", {}, {}); } CheckpointCallbackManager* checkpoint_callback_manager_; ResourceHandle handle_; }; TEST_F(CheckpointCallbackManagerTest, GetCheckpointIdAndPathFromPrefixWithTempDir) { absl::StatusOr<std::pair<std::string, std::string>> pair = CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix( "/foo/bar/model.ckpt-5_temp/part-00000-of-00001"); TF_ASSERT_OK(pair.status()); EXPECT_EQ(pair->first, "model.ckpt-5"); EXPECT_EQ(pair->second, "/foo/bar"); } TEST_F(CheckpointCallbackManagerTest, GetCheckpointIdAndPathFromPrefixWithPartFile) { absl::StatusOr<std::pair<std::string, std::string>> pair = CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix( "/foo/bar/model.ckpt-5/part-00000-of-00001"); TF_ASSERT_OK(pair.status()); EXPECT_EQ(pair->first, "model.ckpt-5"); EXPECT_EQ(pair->second, "/foo/bar"); } TEST_F(CheckpointCallbackManagerTest, GetCheckpointIdAndPathFromPrefixWithoutPartFile) { absl::StatusOr<std::pair<std::string, std::string>> pair = CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix( "/foo/bar/model.ckpt-5"); TF_ASSERT_OK(pair.status()); EXPECT_EQ(pair->first, "model.ckpt-5"); EXPECT_EQ(pair->second, "/foo/bar"); } TEST_F(CheckpointCallbackManagerTest, GetCheckpointIdAndPathFromPrefixForLongerPartName) { absl::StatusOr<std::pair<std::string, std::string>> pair = CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix( "/foo/bar/ckpt-tensor-1_temp/part-00000-of-00002_dev-0-of-2"); TF_ASSERT_OK(pair.status()); EXPECT_EQ(pair->first, "ckpt-tensor-1"); EXPECT_EQ(pair->second, "/foo/bar"); } TEST_F(CheckpointCallbackManagerTest, GetCheckpointIdAndPathFromPrefixUnrecognized) { EXPECT_FALSE( CheckpointCallbackManager::GetCheckpointIdAndPathFromPrefix("/foo/bar") .ok()); } TEST_F(CheckpointCallbackManagerTest, RegisterSaveCallbackTwice) { SaveCallback first_callback = [](absl::string_view checkpoint_id) { return std::string("MockString"); }; SaveCallback second_callback = [](absl::string_view checkpoint_id) { return std::string("MockString"); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "foo", std::move(first_callback))); EXPECT_FALSE(checkpoint_callback_manager_ ->RegisterSaveCallback("foo", std::move(second_callback)) .ok()); } TEST_F(CheckpointCallbackManagerTest, RegisterRestoreCallbackTwice) { RestoreCallback first_callback = [](absl::string_view checkpoint_id, absl::string_view str) { return absl::OkStatus(); }; RestoreCallback second_callback = [](absl::string_view checkpoint_id, absl::string_view str) { return absl::OkStatus(); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback( "foo", std::move(first_callback))); EXPECT_FALSE(checkpoint_callback_manager_ ->RegisterRestoreCallback("foo", std::move(second_callback)) .ok()); } TEST_F(CheckpointCallbackManagerTest, DoesSaveCallbackExist) { SaveCallback first_callback = [](absl::string_view checkpoint_id) { return std::string("MockString"); }; SaveCallback second_callback = [](absl::string_view checkpoint_id) { return std::string("MockString"); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "foo", std::move(first_callback))); TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "bar", std::move(second_callback))); EXPECT_TRUE(checkpoint_callback_manager_->DoesSaveCallbackExist("foo")); EXPECT_TRUE(checkpoint_callback_manager_->DoesSaveCallbackExist("bar")); EXPECT_FALSE( checkpoint_callback_manager_->DoesSaveCallbackExist("not_exist")); } TEST_F(CheckpointCallbackManagerTest, DoesRestoreCallbackExist) { RestoreCallback first_callback = [](absl::string_view checkpoint_id, absl::string_view str) { return absl::OkStatus(); }; RestoreCallback second_callback = [](absl::string_view checkpoint_id, absl::string_view str) { return absl::OkStatus(); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback( "foo", std::move(first_callback))); TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback( "bar", std::move(second_callback))); EXPECT_TRUE(checkpoint_callback_manager_->DoesRestoreCallbackExist("foo")); EXPECT_TRUE(checkpoint_callback_manager_->DoesRestoreCallbackExist("bar")); EXPECT_FALSE( checkpoint_callback_manager_->DoesRestoreCallbackExist("not_exist")); } TEST_F(CheckpointCallbackManagerTest, SaveTwoCallbacks) { SaveCallback save_callback1 = [](absl::string_view checkpoint_id) { return absl::StrCat("MockContent1::", checkpoint_id); }; SaveCallback save_callback2 = [](absl::string_view checkpoint_id) { return absl::StrCat("MockContent2::", checkpoint_id); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "foo", std::move(save_callback1))); TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "bar", std::move(save_callback2))); checkpoint_callback_manager_->Save(io::JoinPath( testing::TmpDir(), "model.ckpt-123_temp/part-00000-of-00001")); std::string file_content1; TF_EXPECT_OK(ReadFileToString( Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-123.foo"), &file_content1)); EXPECT_EQ(file_content1, "MockContent1::model.ckpt-123"); std::string file_content2; TF_EXPECT_OK(ReadFileToString( Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-123.bar"), &file_content2)); EXPECT_EQ(file_content2, "MockContent2::model.ckpt-123"); } TEST_F(CheckpointCallbackManagerTest, SaveMultipleTimes) { SaveCallback save_callback = [](absl::string_view checkpoint_id) { return absl::StrCat("MockContent::", checkpoint_id); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "foo", std::move(save_callback))); checkpoint_callback_manager_->Save(io::JoinPath( testing::TmpDir(), "model.ckpt-100_temp/part-00000-of-00001")); checkpoint_callback_manager_->Save(io::JoinPath( testing::TmpDir(), "model.ckpt-100_temp/part-00000-of-00001")); checkpoint_callback_manager_->Save(io::JoinPath( testing::TmpDir(), "model.ckpt-200_temp/part-00000-of-00001")); std::string file_content; TF_EXPECT_OK(ReadFileToString( Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-100.foo"), &file_content)); EXPECT_EQ(file_content, "MockContent::model.ckpt-100"); TF_EXPECT_OK(ReadFileToString( Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-200.foo"), &file_content)); EXPECT_EQ(file_content, "MockContent::model.ckpt-200"); } TEST_F(CheckpointCallbackManagerTest, Restore) { int callback_call_count = 0; RestoreCallback restore_callback = [&callback_call_count]( absl::string_view checkpoint_id, absl::string_view str) { EXPECT_EQ(checkpoint_id, "model.ckpt-100"); EXPECT_EQ(str, "Apple"); ++callback_call_count; return absl::OkStatus(); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback( "foo", std::move(restore_callback))); TF_EXPECT_OK(WriteStringToFile( Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-100.foo"), "Apple")); EXPECT_EQ(callback_call_count, 0); checkpoint_callback_manager_->Restore( io::JoinPath(testing::TmpDir(), "model.ckpt-100")); EXPECT_EQ(callback_call_count, 1); checkpoint_callback_manager_->Restore( io::JoinPath(testing::TmpDir(), "model.ckpt-100")); EXPECT_EQ(callback_call_count, 1); } TEST_F(CheckpointCallbackManagerTest, SaveAndRestore) { SaveCallback save_callback = [](absl::string_view checkpoint_id) { return std::string("Apple"); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "foo", std::move(save_callback))); int restore_callback_count = 0; RestoreCallback restore_callback = [&restore_callback_count]( absl::string_view checkpoint_id, absl::string_view str) { EXPECT_EQ(checkpoint_id, "model.ckpt-500"); EXPECT_EQ(str, "Apple"); ++restore_callback_count; return absl::OkStatus(); }; TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback( "foo", std::move(restore_callback))); checkpoint_callback_manager_->Save(io::JoinPath( testing::TmpDir(), "model.ckpt-500_temp/part-00000-of-00001")); EXPECT_EQ(restore_callback_count, 0); checkpoint_callback_manager_->Restore( io::JoinPath(testing::TmpDir(), "model.ckpt-500")); EXPECT_EQ(restore_callback_count, 1); } TEST_F(CheckpointCallbackManagerTest, SaveLazyCallback) { SaveCallback save_callback = [](absl::string_view checkpoint_id) { return absl::StrCat("MockContent::", checkpoint_id); }; checkpoint_callback_manager_->Save(io::JoinPath( testing::TmpDir(), "model.ckpt-456_temp/part-00000-of-00001")); TF_ASSERT_OK(checkpoint_callback_manager_->RegisterSaveCallback( "foo", std::move(save_callback))); std::string file_content; TF_EXPECT_OK(ReadFileToString( Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-456.foo"), &file_content)); EXPECT_EQ(file_content, "MockContent::model.ckpt-456"); } TEST_F(CheckpointCallbackManagerTest, RestoreLazyCallback) { int callback_call_count = 0; RestoreCallback restore_callback = [&callback_call_count]( absl::string_view checkpoint_id, absl::string_view str) { EXPECT_EQ(checkpoint_id, "model.ckpt-100"); EXPECT_EQ(str, "Apple"); ++callback_call_count; return absl::OkStatus(); }; TF_EXPECT_OK(WriteStringToFile( Env::Default(), io::JoinPath(testing::TmpDir(), "model.ckpt-100.foo"), "Apple")); EXPECT_EQ(callback_call_count, 0); checkpoint_callback_manager_->Restore( io::JoinPath(testing::TmpDir(), "model.ckpt-100")); EXPECT_EQ(callback_call_count, 0); TF_ASSERT_OK(checkpoint_callback_manager_->RegisterRestoreCallback( "foo", std::move(restore_callback))); EXPECT_EQ(callback_call_count, 1); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/checkpoint_callback_manager.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/checkpoint_callback_manager_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
daa75eb7-1f73-4fa1-80c7-c050d257e75f
cpp
tensorflow/tensorflow
string_format_op
tensorflow/core/kernels/string_format_op.cc
tensorflow/core/kernels/string_format_op_test.cc
#include <iostream> #include "absl/strings/str_split.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { class StringFormatOp : public OpKernel { public: explicit StringFormatOp(OpKernelConstruction* ctx) : OpKernel(ctx) { string template_; OP_REQUIRES_OK(ctx, ctx->GetAttr("template", &template_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("placeholder", &placeholder_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("summarize", &summarize_)); split_template_ = absl::StrSplit(template_, placeholder_); int64_t num_placeholders = split_template_.size() - 1; OP_REQUIRES(ctx, ctx->num_inputs() == num_placeholders, errors::InvalidArgument(strings::StrCat( "num placeholders in template and num inputs must match: ", num_placeholders, " vs. ", ctx->num_inputs()))); } void Compute(OpKernelContext* ctx) override { Tensor* formatted_string = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &formatted_string)); string msg; strings::StrAppend(&msg, split_template_[0].c_str()); for (int i = 0; i < ctx->num_inputs(); ++i) { strings::StrAppend(&msg, ctx->input(i).SummarizeValue(summarize_, true)); strings::StrAppend(&msg, split_template_[i + 1].c_str()); } formatted_string->scalar<tstring>()() = std::move(msg); } private: int32 summarize_ = 0; string placeholder_; std::vector<std::string> split_template_; }; REGISTER_KERNEL_BUILDER(Name("StringFormat").Device(DEVICE_CPU), StringFormatOp); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" namespace tensorflow { namespace { class StringFormatGraphTest : public OpsTestBase { protected: Status Init(int num_inputs, DataType input_type, const string& template_ = "%s", const string& placeholder = "%s", int summarize = 3) { TF_CHECK_OK(NodeDefBuilder("op", "StringFormat") .Input(FakeInput(num_inputs, input_type)) .Attr("template", template_) .Attr("placeholder", placeholder) .Attr("summarize", summarize) .Finalize(node_def())); return InitOp(); } }; TEST_F(StringFormatGraphTest, Int32Success_7) { TF_ASSERT_OK(Init(1, DT_INT32, "First tensor: %s")); AddInputFromArray<int32>(TensorShape({7}), {1, 2, 3, 4, 5, 6, 7}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({})); test::FillValues<tstring>(&expected, {"First tensor: [1 2 3 ... 5 6 7]"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } TEST_F(StringFormatGraphTest, Int32Success_3_3) { TF_ASSERT_OK(Init(1, DT_INT32, "First tensor: %s", "%s", 1)); AddInputFromArray<int32>(TensorShape({3, 3}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({})); test::FillValues<tstring>(&expected, {"First tensor: [[1 ... 3]\n ..." "\n [7 ... 9]]"}); test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/string_format_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/string_format_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4e13516c-178e-4cf2-b24e-2d400d9d8e92
cpp
tensorflow/tensorflow
sparse_dense_binary_op_shared
tensorflow/core/kernels/sparse_dense_binary_op_shared.cc
tensorflow/core/kernels/sparse_dense_binary_op_shared_test.cc
#define EIGEN_USE_THREADS #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/cwise_ops.h" #include "tensorflow/core/kernels/cwise_ops_common.h" #include "tensorflow/core/util/bcast.h" using Eigen::TensorRef; using tensorflow::gtl::ArraySlice; namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename Device, typename T, typename Functor> class SparseDenseBinaryOpShared : public OpKernel { public: explicit SparseDenseBinaryOpShared(OpKernelConstruction *ctx) : OpKernel(ctx) {} void Compute(OpKernelContext *ctx) override { const Tensor *indices_t, *values_t, *shape_t, *dense_t; OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t)); OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t)); OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t)); OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()), errors::InvalidArgument( "Input sp_indices should be a matrix but received shape: ", indices_t->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values_t->shape()) && TensorShapeUtils::IsVector(shape_t->shape()), errors::InvalidArgument( "Inputs sp_values and sp_shape should be vectors " "but received shapes: ", values_t->shape().DebugString(), " and ", shape_t->shape().DebugString())); OP_REQUIRES( ctx, TensorShapeUtils::IsVector(shape_t->shape()), errors::InvalidArgument("Input sp_shape must be a vector. Got: ", shape_t->shape().DebugString())); OP_REQUIRES( ctx, values_t->dim_size(0) == indices_t->dim_size(0), errors::InvalidArgument( "The first dimension of values and indices should match. (", values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")")); OP_REQUIRES( ctx, shape_t->shape().dim_size(0) == indices_t->shape().dim_size(1), errors::InvalidArgument( "Number of dimensions must match second dimension of indices. ", "Got ", shape_t->shape().dim_size(0), " dimensions, indices shape: ", indices_t->shape().DebugString())); OP_REQUIRES(ctx, shape_t->NumElements() > 0, errors::InvalidArgument( "The shape argument requires at least one element.")); const auto indices_mat = indices_t->matrix<int64_t>(); const auto shape_vec = shape_t->vec<int64_t>(); TensorShape lhs_shape; OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(shape_vec, &lhs_shape)); const auto lhs_dims = BCast::FromShape(lhs_shape); const auto rhs_dims = BCast::FromShape(dense_t->shape()); BCast b(lhs_dims, rhs_dims, false); auto VecGreaterEq = [](absl::Span<const int64_t> lhs, absl::Span<const int64_t> rhs) { if (lhs.size() < rhs.size()) return false; for (size_t i = 0; i < rhs.size(); ++i) { if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false; } return true; }; OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(), errors::InvalidArgument( "SparseDenseBinaryOpShared broadcasts dense to sparse " "only; got incompatible shapes: [", absl::StrJoin(lhs_dims, ","), "] vs. [", absl::StrJoin(rhs_dims, ","), "]")); Tensor *output_values = nullptr; Tensor dense_gathered; const int64_t nnz = indices_t->dim_size(0); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({nnz}), &output_values)); OP_REQUIRES_OK( ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}), &dense_gathered)); bool op_is_div = false; if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) { op_is_div = true; } auto dense_gathered_flat = dense_gathered.flat<T>(); const int ndims = lhs_dims.size(); switch (ndims) { #define CASE(NDIM) \ case NDIM: { \ TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \ dense_t->shaped<T, NDIM>(b.y_reshape()) \ .broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \ Eigen::array<Eigen::DenseIndex, NDIM> idx; \ bool indices_valid = true; \ for (int i = 0; i < nnz; ++i) { \ for (int d = 0; d < NDIM; ++d) { \ idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \ if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \ indices_valid = false; \ } \ } \ OP_REQUIRES( \ ctx, indices_valid, \ errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \ "dense side with broadcasted shape")); \ dense_gathered_flat(i) = rhs_ref.coeff(idx); \ if (op_is_div) { \ OP_REQUIRES(ctx, dense_gathered_flat(i) != T{0}, \ errors::InvalidArgument( \ "SparseDenseCwiseDiv cannot divide by zero," \ "but input dense tensor contains zero ")); \ } \ } \ break; \ } CASE(1); CASE(2); CASE(3); CASE(4); CASE(5); default: OP_REQUIRES( ctx, false, errors::InvalidArgument("Only tensors with ranks between 1 and 5 " "are currently supported. Tensor rank: ", ndims)); #undef CASE } output_values->flat<T>().device(ctx->eigen_device<Device>()) = values_t->flat<T>().binaryExpr(dense_gathered_flat, typename Functor::func()); } }; #define REGISTER_KERNELS(T) \ REGISTER_KERNEL_BUILDER( \ Name("SparseDenseCwiseMul").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ SparseDenseBinaryOpShared<CPUDevice, T, functor::mul<T>>) \ \ REGISTER_KERNEL_BUILDER( \ Name("SparseDenseCwiseDiv").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ SparseDenseBinaryOpShared<CPUDevice, T, functor::div<T>>) \ REGISTER_KERNEL_BUILDER( \ Name("SparseDenseCwiseAdd").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ SparseDenseBinaryOpShared<CPUDevice, T, functor::add<T>>) TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNELS); #undef REGISTER_KERNELS }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { static void ExpectHasSubstr(StringPiece s, StringPiece expected) { EXPECT_TRUE(absl::StrContains(s, expected)) << "'" << s << "' does not contain '" << expected << "'"; } class SparseDenseCDivTest : public OpsTestBase { protected: template <typename T> void MakeOp() { DataType value_type = tensorflow::DataTypeToEnum<T>::value; TF_ASSERT_OK(NodeDefBuilder("cdiv", "SparseDenseCwiseDiv") .Input(FakeInput(DT_INT64)) .Input(FakeInput(value_type)) .Input(FakeInput(DT_INT64)) .Input(FakeInput(value_type)) .Attr("T", value_type) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; class SparseDenseCMulTest : public OpsTestBase { protected: template <typename T> void MakeOp() { DataType value_type = tensorflow::DataTypeToEnum<T>::value; TF_ASSERT_OK(NodeDefBuilder("cmul", "SparseDenseCwiseMul") .Input(FakeInput(DT_INT64)) .Input(FakeInput(value_type)) .Input(FakeInput(DT_INT64)) .Input(FakeInput(value_type)) .Attr("T", value_type) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SparseDenseCDivTest, DoNotBroadcastSparse_FewerDims) { MakeOp<float>(); AddInputFromArray<int64_t>(TensorShape({1, 1}), {0}); AddInputFromArray<float>(TensorShape({1}), {1618}); AddInputFromArray<int64_t>(TensorShape({1}), {1}); AddInputFromArray<float>(TensorShape({2, 1}), {17, 19}); ExpectHasSubstr(RunOpKernel().ToString(), "broadcasts dense to sparse only"); } TEST_F(SparseDenseCDivTest, DoNotBroadcastSparse_SameDims) { MakeOp<float>(); AddInputFromArray<int64_t>(TensorShape({1, 2}), {0, 0}); AddInputFromArray<float>(TensorShape({1}), {1618}); AddInputFromArray<int64_t>(TensorShape({2}), {1, 1}); AddInputFromArray<float>(TensorShape({2, 1}), {17, 19}); ExpectHasSubstr(RunOpKernel().ToString(), "broadcasts dense to sparse only"); } TEST_F(SparseDenseCDivTest, SameShape) { MakeOp<float>(); const auto indices_shape = TensorShape({4, 2}); std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; const absl::Span<const int64_t> indices(in); std::initializer_list<int64_t> sh{3, 2}; const absl::Span<const int64_t> shape(sh); Tensor dense(DT_FLOAT, TensorShape(shape)); auto dense_flat = dense.flat<float>(); dense_flat.setConstant(1.); AddInputFromArray<int64_t>(indices_shape, indices); AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4}); AddInputFromArray<int64_t>(TensorShape({2}), shape); AddInputFromArray<float>(TensorShape(shape), dense_flat); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4})); test::FillValues<float>(&expected, {1, 2, 3, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseDenseCDivTest, BroadcastDenseSameDims) { MakeOp<float>(); const auto indices_shape = TensorShape({4, 2}); std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; const absl::Span<const int64_t> indices(in); std::initializer_list<int64_t> sh{3, 2}; const absl::Span<const int64_t> shape(sh); Tensor dense(DT_FLOAT, TensorShape({3, 1})); auto dense_flat = dense.flat<float>(); dense_flat.setConstant(1.); AddInputFromArray<int64_t>(indices_shape, indices); AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4}); AddInputFromArray<int64_t>(TensorShape({2}), shape); AddInputFromArray<float>(TensorShape({3, 1}), dense_flat); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4})); test::FillValues<float>(&expected, {1, 2, 3, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseDenseCDivTest, BroadcastDenseFewerDims) { MakeOp<float>(); const auto indices_shape = TensorShape({4, 2}); std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; const absl::Span<const int64_t> indices(in); std::initializer_list<int64_t> sh{3, 2}; const absl::Span<const int64_t> shape(sh); Tensor dense(DT_FLOAT, TensorShape({2})); auto dense_flat = dense.flat<float>(); dense_flat.setConstant(1.); AddInputFromArray<int64_t>(indices_shape, indices); AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4}); AddInputFromArray<int64_t>(TensorShape({2}), shape); AddInputFromArray<float>(TensorShape({2}), dense_flat); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4})); test::FillValues<float>(&expected, {1, 2, 3, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(SparseDenseCMulTest, BroadcastDense) { MakeOp<float>(); const auto indices_shape = TensorShape({4, 2}); std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; const absl::Span<const int64_t> indices(in); std::initializer_list<int64_t> sh{3, 2}; const absl::Span<const int64_t> shape(sh); Tensor dense(DT_FLOAT, TensorShape({2})); auto dense_flat = dense.flat<float>(); dense_flat(0) = 0.5; dense_flat(1) = 0; AddInputFromArray<int64_t>(indices_shape, indices); AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4}); AddInputFromArray<int64_t>(TensorShape({2}), shape); AddInputFromArray<float>(TensorShape({2}), dense_flat); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4})); test::FillValues<float>(&expected, {0, 1, 1.5, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } static Graph* SparseMatCMulDenseMat(Graph* g, Node* sp_indices, Node* sp_vals, Node* sp_shape, Node* dense) { Node* ret; TF_CHECK_OK( NodeBuilder(g->NewName("SparseDenseCwiseMul"), "SparseDenseCwiseMul") .Input(sp_indices) .Input(sp_vals) .Input(sp_shape) .Input(dense) .Finalize(g, &ret)); return g; } static Node* MakeTensor(Graph* g, int B, int M, int N) { Tensor data(DT_FLOAT, TensorShape({B, M, N})); data.flat<float>().setRandom(); return test::graph::Constant(g, data); } struct ST { Node* indices; Node* vals; Node* shape; }; static ST MakeSparseTensor(Graph* g, int B, int M, int N, int nnz_inner) { const int total_nnz = B * M * nnz_inner; const int kNumDims = 3; Tensor indices(DT_INT64, TensorShape({total_nnz, kNumDims})); Tensor vals(DT_FLOAT, TensorShape({total_nnz})); Tensor shape(DT_INT64, TensorShape({kNumDims})); vals.flat<float>().setRandom(); test::FillValues(&shape, absl::Span<const int64_t>({B, M, N})); auto indices_mat = indices.matrix<int64_t>(); int nnz_cnt = 0; std::unordered_set<int> picked; std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dist(0, N - 1); for (int i = 0; i < B; ++i) { for (int j = 0; j < M; ++j) { for (int k = 0; k < nnz_inner; ++k) { indices_mat(nnz_cnt, 0) = i; indices_mat(nnz_cnt, 1) = j; int inner = dist(gen); while (picked.count(inner) == 1) { inner = dist(gen); } picked.insert(inner); indices_mat(nnz_cnt, 2) = inner; ++nnz_cnt; } } } return ST{test::graph::Constant(g, indices), test::graph::Constant(g, vals), test::graph::Constant(g, shape)}; } #define BM_SparseMatCMulDenseMatArgs(N, NNZ_INNER) \ static void BM_SparseMatCMulDenseMat_##N##_##NNZ_INNER( \ ::testing::benchmark::State& state) { \ Graph* g = new Graph(OpRegistry::Global()); \ Node* dense = MakeTensor(g, 8, 4, N); \ ST sp = MakeSparseTensor(g, 8, 4, N, NNZ_INNER); \ \ test::Benchmark( \ "cpu", SparseMatCMulDenseMat(g, sp.indices, sp.vals, sp.shape, dense), \ false) \ .Run(state); \ state.SetItemsProcessed( \ static_cast<int64_t>(state.iterations() * 8 * 4 * N * 2)); \ } \ BENCHMARK(BM_SparseMatCMulDenseMat_##N##_##NNZ_INNER) BM_SparseMatCMulDenseMatArgs(1048576, 1); BM_SparseMatCMulDenseMatArgs(1048576, 8); BM_SparseMatCMulDenseMatArgs(1048576, 32); BM_SparseMatCMulDenseMatArgs(262144, 1); BM_SparseMatCMulDenseMatArgs(262144, 8); BM_SparseMatCMulDenseMatArgs(262144, 32); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_dense_binary_op_shared.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_dense_binary_op_shared_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4f18fb34-6236-41ee-9f97-e4b07328de32
cpp
tensorflow/tensorflow
gather_nd_op
tensorflow/core/kernels/gather_nd_op.cc
tensorflow/core/kernels/gather_nd_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/gather_nd_op.h" #include <string> #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mem.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/bad_indices_policy.h" namespace tensorflow { namespace { constexpr char kBadIndicesPolicyAtrr[] = "bad_indices_policy"; } typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename Device, typename T, typename Index> class GatherNdOp : public OpKernel { public: explicit GatherNdOp(OpKernelConstruction* c) : OpKernel(c) { const DataType dt = DataTypeToEnum<T>::v(); const DataType index_t = DataTypeToEnum<Index>::v(); OP_REQUIRES_OK(c, c->MatchSignature({dt, index_t}, {dt})); if (c->HasAttr(kBadIndicesPolicyAtrr)) { std::string bad_indices_policy_str; OP_REQUIRES_OK( c, c->GetAttr(kBadIndicesPolicyAtrr, &bad_indices_policy_str)); absl::StatusOr<BadIndicesPolicy> bad_indices_policy = BadIndicesPolicyFromString(bad_indices_policy_str); OP_REQUIRES_OK(c, bad_indices_policy.status()); bad_indices_policy_ = *bad_indices_policy; } } void Compute(OpKernelContext* c) override { const Tensor& params = c->input(0); const Tensor& indices = c->input(1); Tensor out; OP_REQUIRES_OK(c, functor::DoGatherNd<Device, T, Index>( c, params, indices, &out, bad_indices_policy_)); c->set_output(0, out); } private: BadIndicesPolicy bad_indices_policy_ = BadIndicesPolicy::kDefault; }; #define REGISTER_GATHER_ND_FULL(dev, type, index_type) \ REGISTER_KERNEL_BUILDER( \ Name("GatherNd") \ .Device(DEVICE_##dev) \ .TypeConstraint<type>("Tparams") \ .TypeConstraint<index_type>("Tindices") \ .AttrConstraint<std::string>( \ "bad_indices_policy", \ {"", "DEFAULT", "ERROR", "IGNORE"}), \ GatherNdOp<dev##Device, type, index_type>) #define REGISTER_GATHER_ND_CPU(type) \ REGISTER_GATHER_ND_FULL(CPU, type, int16); \ REGISTER_GATHER_ND_FULL(CPU, type, int32); \ REGISTER_GATHER_ND_FULL(CPU, type, int64_t) TF_CALL_ALL_TYPES(REGISTER_GATHER_ND_CPU); TF_CALL_QUANTIZED_TYPES(REGISTER_GATHER_ND_CPU); TF_CALL_float8_e5m2(REGISTER_GATHER_ND_CPU); TF_CALL_float8_e4m3fn(REGISTER_GATHER_ND_CPU); #undef REGISTER_GATHER_ND_CPU #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace functor { #define DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, NDIM) \ template <> \ Index GatherNdSlice<GPUDevice, T, Index, NDIM>::operator()( \ const GPUDevice& d, const Index slice_size, \ typename TTypes<int32>::Scalar Tscratch, \ typename TTypes<T, NDIM + 1>::ConstTensor Tparams, \ typename TTypes<Index>::ConstMatrix Tindices, \ typename TTypes<T>::Matrix Tout); \ extern template struct GatherNdSlice<GPUDevice, T, Index, NDIM>; #define DECLARE_GPU_SPECS_INDEX(T, Index) \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 0); \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 1); \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 2); \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 3); \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 4); \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 5); \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 6); \ DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 7); #define DECLARE_GPU_SPECS(T) \ DECLARE_GPU_SPECS_INDEX(T, int32); \ DECLARE_GPU_SPECS_INDEX(T, int64_t) TF_CALL_int32(DECLARE_GPU_SPECS); TF_CALL_int64(DECLARE_GPU_SPECS); TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPECS); TF_CALL_COMPLEX_TYPES(DECLARE_GPU_SPECS); #undef DECLARE_GPU_SPECS #undef DECLARE_GPU_SPECS_INDEX } #undef REGISTER_GATHER_ND_FULL #define REGISTER_GATHER_ND_FULL(dev, type, index_type) \ REGISTER_KERNEL_BUILDER( \ Name("GatherNd") \ .Device(DEVICE_##dev) \ .TypeConstraint<type>("Tparams") \ .TypeConstraint<index_type>("Tindices") \ .AttrConstraint<std::string>("bad_indices_policy", \ {"", "DEFAULT", "IGNORE"}), \ GatherNdOp<dev##Device, type, index_type>) #define REGISTER_GATHER_ND_GPU(type) \ REGISTER_GATHER_ND_FULL(GPU, type, int32); \ REGISTER_GATHER_ND_FULL(GPU, type, int64_t) TF_CALL_int32(REGISTER_GATHER_ND_GPU); TF_CALL_int64(REGISTER_GATHER_ND_GPU); TF_CALL_GPU_NUMBER_TYPES(REGISTER_GATHER_ND_GPU); TF_CALL_COMPLEX_TYPES(REGISTER_GATHER_ND_GPU); #undef REGISTER_GATHER_ND_GPU #endif #undef REGISTER_GATHER_ND_FULL }
#include <functional> #include <memory> #include <vector> #include "absl/strings/match.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace test { namespace graph { class Node* GatherNd(Graph* g, class Node* in0, class Node* in1) { class Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "GatherNd") .Input(in0) .Input(in1) .Finalize(g, &ret)); return ret; } } } namespace { class GatherNdOpTest : public OpsTestBase { protected: void MakeOp(DataType param_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd") .Input(FakeInput(param_type)) .Input(FakeInput(index_type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(GatherNdOpTest, Simple) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4}); AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2})); test::FillValues<float>(&expected, {8, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(GatherNdOpTest, Error_OutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4}); AddInputFromArray<int32>(TensorShape({2, 1}), {3, 5}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.message(), "indices[1] = [5] does not index into param shape [5]")) << s.message(); } TEST_F(GatherNdOpTest, Quantized_UINT8) { MakeOp(DT_QUINT8, DT_INT32); AddInputFromArray<quint8>(TensorShape({5}), {0, 1, 2, 8, 4}); AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QUINT8, TensorShape({2})); test::FillValues<quint8>(&expected, {8, 4}); test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); } TEST_F(GatherNdOpTest, Quantized_INT8) { MakeOp(DT_QINT8, DT_INT32); AddInputFromArray<qint8>(TensorShape({5}), {0, 1, 2, 8, 4}); AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({2})); test::FillValues<qint8>(&expected, {8, 4}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } class GatherNdOpIgnoreBadIndicesTest : public OpsTestBase { protected: void MakeOp(DataType param_type, DataType index_type) { TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd") .Input(FakeInput(param_type)) .Input(FakeInput(index_type)) .Attr("bad_indices_policy", "IGNORE") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(GatherNdOpIgnoreBadIndicesTest, IgnoreOutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {9, 1, 2, 8, 4}); AddInputFromArray<int32>(TensorShape({3, 1}), {3, 5, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({3})); test::FillValues<float>(&expected, {8, 0, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } class GatherNdOpConstructionTest : public OpsTestBase {}; TEST_F(GatherNdOpConstructionTest, Error_BadIndicesPolicyInvalid) { TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY") .Finalize(node_def())); EXPECT_NE(InitOp(), absl::OkStatus()); } constexpr int kLookups = 2000; template <typename Index> static Graph* GatherNd(int dim) { Graph* g = new Graph(OpRegistry::Global()); Tensor params(DT_FLOAT, TensorShape({dim, 8, 16, 32})); params.flat<float>().setRandom(); random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); Tensor indices(DataTypeToEnum<Index>::value, TensorShape({kLookups, 4})); auto indices_mat = indices.matrix<Index>(); for (int i = 0; i < kLookups; i++) { indices_mat(i, 0) = rnd.Uniform(dim); indices_mat(i, 1) = rnd.Uniform(8); indices_mat(i, 2) = rnd.Uniform(16); indices_mat(i, 3) = rnd.Uniform(32); } test::graph::GatherNd(g, test::graph::Constant(g, params), test::graph::Constant(g, indices)); return g; } #define BM_GATHER_ND(DEVICE, INDEX) \ static void BM_##DEVICE##_gather_nd_##INDEX( \ ::testing::benchmark::State& state) { \ const int dim = state.range(0); \ test::Benchmark(#DEVICE, GatherNd<INDEX>(dim), \ false) \ .Run(state); \ const int64_t tot = \ static_cast<int64_t>(state.iterations()) * kLookups * 4; \ state.SetItemsProcessed(tot); \ state.SetBytesProcessed(tot * sizeof(float)); \ } \ BENCHMARK(BM_##DEVICE##_gather_nd_##INDEX) \ ->UseRealTime() \ ->Arg(10) \ ->Arg(100) \ ->Arg(1000) \ ->Arg(10000) BM_GATHER_ND(cpu, int32); BM_GATHER_ND(gpu, int32); BM_GATHER_ND(cpu, int64_t); BM_GATHER_ND(gpu, int64_t); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/gather_nd_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/gather_nd_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f6d26ff3-c6e3-4c5f-9e01-90b52242a5cb
cpp
tensorflow/tensorflow
batch_norm_op
tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc
tensorflow/core/kernels/batch_norm_op_test.cc
#include <algorithm> #include <numeric> #include <string> #include <vector> #include "tensorflow/compiler/tf2xla/kernels/relu_op.h" #include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/math.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/util.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { namespace { class FusedBatchNormOp : public XlaOpKernel { public: explicit FusedBatchNormOp(OpKernelConstruction* ctx) : FusedBatchNormOp(ctx, false) {} FusedBatchNormOp(OpKernelConstruction* ctx, bool is_batch_norm_ex) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("epsilon", &epsilon_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("is_training", &is_training_)); OP_REQUIRES_OK( ctx, ctx->GetAttr("exponential_avg_factor", &exponential_avg_factor_)); string data_format_str; OP_REQUIRES_OK(ctx, ctx->GetAttr("data_format", &data_format_str)); OP_REQUIRES( ctx, FormatFromString(data_format_str, &data_format_), errors::InvalidArgument("Invalid data format: ", data_format_str)); if (is_batch_norm_ex) { int num_side_inputs; OP_REQUIRES_OK(ctx, ctx->GetAttr("num_side_inputs", &num_side_inputs)); OP_REQUIRES(ctx, num_side_inputs >= 0 && num_side_inputs <= 1, errors::InvalidArgument( "FusedBatchNormEx supports at most 1 side input.")); add_side_input_ = (num_side_inputs == 1); string activation_mode; OP_REQUIRES_OK(ctx, ctx->GetAttr("activation_mode", &activation_mode)); OP_REQUIRES(ctx, activation_mode == "Identity" || activation_mode == "Relu", errors::InvalidArgument( "Unsupported FusedBatchNormEx activation mode: ", activation_mode)); apply_relu_ = (activation_mode == "Relu"); } else { add_side_input_ = false; apply_relu_ = false; } is_on_gpu_ = ctx->device_type().type_string() == DEVICE_GPU_XLA_JIT; } void Compile(XlaOpKernelContext* ctx) override { CompileImpl(ctx); } protected: virtual void CompileImpl(XlaOpKernelContext* ctx) { xla::XlaBuilder* const b = ctx->builder(); xla::PrimitiveType input_type; OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(ctx->input_type(0), &input_type)); xla::PrimitiveType scale_type; OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(ctx->input_type(1), &scale_type)); xla::XlaOp input = ctx->Input(0); TensorShape input_shape = ctx->InputShape(0); int feature_index = GetTensorFeatureDimIndex(input_shape.dims(), data_format_); input = xla::ConvertElementType(input, scale_type); if (is_training_) { xla::XlaOp output = xla::BatchNormTraining( input, ctx->Input(1), ctx->Input(2), epsilon_, feature_index); xla::XlaOp converted = xla::ConvertElementType(xla::GetTupleElement(output, 0), input_type); if (add_side_input_ && apply_relu_) { ctx->SetOutput(0, xla::Relu(xla::Add(ctx->Input(5), converted))); } else if (apply_relu_) { ctx->SetOutput(0, xla::Relu(converted)); } else { ctx->SetOutput(0, converted); } xla::XlaOp variance = xla::GetTupleElement(output, 2); int total_input_size = ctx->InputShape(0).num_elements(); int total_scale_size = ctx->InputShape(1).num_elements(); int sample_size = total_scale_size > 0 ? total_input_size / total_scale_size : 0; int sample_size_minus_one = std::max(1, sample_size - 1); double factor = static_cast<double>(sample_size) / static_cast<double>(sample_size_minus_one); constexpr int kVarianceOutputIndex = 2; xla::XlaOp corrected = xla::Mul(variance, xla::ScalarLike(variance, factor)); if (input_shape.num_elements() == 0) { auto status_or_output_shape = b->GetShape(corrected); OP_REQUIRES_OK(ctx, status_or_output_shape.status()); ctx->SetOutput(1, xla::GetTupleElement(output, 1)); ctx->SetOutput( kVarianceOutputIndex, xla::Broadcast( xla::NanValue(b, ctx->output_xla_type(kVarianceOutputIndex)), status_or_output_shape.value().dimensions())); } else { if (exponential_avg_factor_ == 1.0f) { ctx->SetOutput(1, xla::GetTupleElement(output, 1)); ctx->SetOutput(2, corrected); } else { xla::XlaOp old_mean = ctx->Input(3); xla::XlaOp alpha = xla::ScalarLike(old_mean, 1.0f - exponential_avg_factor_); xla::XlaOp beta = xla::ScalarLike(old_mean, exponential_avg_factor_); xla::XlaOp new_running_mean = xla::Add(xla::Mul(old_mean, alpha), xla::Mul(xla::GetTupleElement(output, 1), beta)); ctx->SetOutput(1, new_running_mean); xla::XlaOp old_variance = ctx->Input(4); xla::XlaOp new_running_variance = xla::Add( xla::Mul(old_variance, alpha), xla::Mul(corrected, beta)); ctx->SetOutput(2, new_running_variance); } } ctx->SetOutput(3, xla::GetTupleElement(output, 1)); if (is_on_gpu_) { ctx->SetOutput(4, xla::Rsqrt(xla::Add( variance, xla::ScalarLike(variance, epsilon_)))); } else { ctx->SetOutput(4, variance); } } else { xla::XlaOp output = xla::BatchNormInference( input, ctx->Input(1), ctx->Input(2), ctx->Input(3), ctx->Input(4), epsilon_, feature_index); xla::XlaOp converted = xla::ConvertElementType(output, input_type); if (add_side_input_ && apply_relu_) { ctx->SetOutput(0, xla::Relu(xla::Add(ctx->Input(5), converted))); } else if (apply_relu_) { ctx->SetOutput(0, xla::Relu(converted)); } else { ctx->SetOutput(0, converted); } ctx->SetOutput(1, ctx->Input(3)); ctx->SetOutput(2, ctx->Input(4)); ctx->SetOutput(3, ctx->Input(3)); ctx->SetOutput(4, ctx->Input(4)); } } private: float epsilon_; TensorFormat data_format_; bool is_training_; float exponential_avg_factor_; bool add_side_input_; bool apply_relu_; bool is_on_gpu_; }; class FusedBatchNormOpV3 : public FusedBatchNormOp { public: explicit FusedBatchNormOpV3(OpKernelConstruction* ctx) : FusedBatchNormOp(ctx) {} void Compile(XlaOpKernelContext* ctx) override { FusedBatchNormOp::CompileImpl(ctx); if (!ctx->status().ok()) { return; } ctx->SetConstantOutput(5, Tensor()); } }; class FusedBatchNormOpEx : public FusedBatchNormOp { public: explicit FusedBatchNormOpEx(OpKernelConstruction* ctx) : FusedBatchNormOp(ctx, true) {} void Compile(XlaOpKernelContext* ctx) override { FusedBatchNormOp::CompileImpl(ctx); if (!ctx->status().ok()) { return; } ctx->SetConstantOutput(5, Tensor()); } }; REGISTER_XLA_OP(Name("FusedBatchNorm"), FusedBatchNormOp); REGISTER_XLA_OP(Name("FusedBatchNormV2"), FusedBatchNormOp); REGISTER_XLA_OP(Name("FusedBatchNormV3"), MlirXlaOpKernel); REGISTER_XLA_OP(Name("_FusedBatchNormEx"), FusedBatchNormOpEx); class FusedBatchNormGradOp : public XlaOpKernel { public: explicit FusedBatchNormGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("epsilon", &epsilon_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("is_training", &is_training_)); string data_format_str; OP_REQUIRES_OK(ctx, ctx->GetAttr("data_format", &data_format_str)); OP_REQUIRES( ctx, FormatFromString(data_format_str, &data_format_), errors::InvalidArgument("Invalid data format: ", data_format_str)); is_on_gpu_ = ctx->device_type().type_string() == DEVICE_GPU_XLA_JIT; } void Compile(XlaOpKernelContext* ctx) override { xla::XlaBuilder* const b = ctx->builder(); DataType input_dtype = ctx->input_type(0); DataType scale_dtype = ctx->input_type(2); auto grad_backprop = XlaHelpers::ConvertElementType(ctx->Input(0), scale_dtype); auto activations = XlaHelpers::ConvertElementType(ctx->Input(1), scale_dtype); auto scale = ctx->Input(2); auto mean = ctx->Input(3); auto var = ctx->Input(4); const int input_dims = ctx->InputShape(0).dims(); const int feature_index = GetTensorFeatureDimIndex(input_dims, data_format_); xla::XlaOp x_backprop; xla::XlaOp scale_backprop; xla::XlaOp offset_backprop; if (is_training_) { if (is_on_gpu_) { xla::XlaOp one = xla::ScalarLike(var, 1.0f); xla::XlaOp epsilon = xla::ScalarLike(var, epsilon_); var = xla::Sub(one / (var * var), epsilon); } xla::XlaOp output = xla::BatchNormGrad(activations, scale, mean, var, grad_backprop, epsilon_, feature_index); x_backprop = xla::GetTupleElement(output, 0); scale_backprop = xla::GetTupleElement(output, 1); offset_backprop = xla::GetTupleElement(output, 2); } else { std::vector<int64_t> reduction_dims(input_dims - 1); std::iota(reduction_dims.begin(), reduction_dims.begin() + feature_index, 0); std::iota(reduction_dims.begin() + feature_index, reduction_dims.end(), feature_index + 1); const DataType accumulation_type = XlaHelpers::SumAccumulationType(scale_dtype); auto converted = XlaHelpers::ConvertElementType(grad_backprop, accumulation_type); auto reduce = xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type), *ctx->GetOrCreateAdd(accumulation_type), reduction_dims); offset_backprop = XlaHelpers::ConvertElementType(reduce, scale_dtype); auto epsilon = XlaHelpers::FloatLiteral(b, scale_dtype, epsilon_); auto scratch1 = xla::Rsqrt(xla::Add(var, epsilon)); auto mul = xla::Mul(grad_backprop, xla::Sub(activations, mean, {feature_index})); converted = XlaHelpers::ConvertElementType(mul, accumulation_type); reduce = xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type), *ctx->GetOrCreateAdd(accumulation_type), reduction_dims); auto scratch2 = XlaHelpers::ConvertElementType(reduce, scale_dtype); x_backprop = xla::Mul(grad_backprop, xla::Mul(scratch1, scale), {feature_index}); scale_backprop = xla::Mul(scratch1, scratch2); } ctx->SetOutput(0, XlaHelpers::ConvertElementType(x_backprop, input_dtype)); ctx->SetOutput(1, scale_backprop); ctx->SetOutput(2, offset_backprop); ctx->SetConstantOutput(3, Tensor()); ctx->SetConstantOutput(4, Tensor()); } private: TensorFormat data_format_; float epsilon_; bool is_training_; bool is_on_gpu_; }; REGISTER_XLA_OP(Name("FusedBatchNormGrad"), FusedBatchNormGradOp); REGISTER_XLA_OP(Name("FusedBatchNormGradV2"), FusedBatchNormGradOp); REGISTER_XLA_OP(Name("FusedBatchNormGradV3"), MlirXlaOpKernel); } }
#include <vector> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { template <typename T> struct BatchNormOpTest : public OpsTestBase { static constexpr auto TValueType = DataTypeToEnum<T>::value; void run_me() { TF_EXPECT_OK( NodeDefBuilder("batch_norm_op", "BatchNormWithGlobalNormalization") .Input(FakeInput(TValueType)) .Input(FakeInput(TValueType)) .Input(FakeInput(TValueType)) .Input(FakeInput(TValueType)) .Input(FakeInput(TValueType)) .Attr("scale_after_normalization", false) .Attr("variance_epsilon", 0.001) .Finalize(node_def())); TF_EXPECT_OK(InitOpWithGraphVersion(8)); AddInputFromList<T>(TensorShape({1, 1, 6, 2}), {1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6}); AddInputFromList<T>(TensorShape({2}), {10, 20}); AddInputFromList<T>(TensorShape({2}), {0.25, 0.5}); AddInputFromList<T>(TensorShape({2}), {0.1, 0.6}); AddInputFromList<T>(TensorShape({2}), {0.0, 0.0}); TF_ASSERT_OK(RunOpKernel()); double atol = TValueType == DT_FLOAT ? 0.01 : 0.1; Tensor expected(allocator(), TValueType, TensorShape({1, 1, 6, 2})); test::FillValues<T>(&expected, {-17.86f, -22.00f, -15.87f, -20.59f, -13.87f, -19.18f, -21.86f, -33.31f, -23.85f, -34.72f, -25.85f, -36.13f}); test::ExpectTensorNear<T>(expected, *GetOutput(0), atol); } }; TYPED_TEST_SUITE_P(BatchNormOpTest); TYPED_TEST_P(BatchNormOpTest, Simple) { this->run_me(); } REGISTER_TYPED_TEST_SUITE_P(BatchNormOpTest, Simple); using DataTypes = ::testing::Types<float, Eigen::half>; INSTANTIATE_TYPED_TEST_SUITE_P(Test, BatchNormOpTest, DataTypes); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/batch_norm_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batch_norm_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9522902e-b0d0-42a7-bb94-a283261c7776
cpp
tensorflow/tensorflow
gather_op
tensorflow/compiler/tf2xla/kernels/gather_op.cc
tensorflow/core/kernels/gather_op_test.cc
#include <algorithm> #include <optional> #include <vector> #include "absl/types/optional.h" #include "tensorflow/compiler/tf2xla/kernels/gather_op_helpers.h" #include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_context.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/slicing.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/status_macros.h" #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { Status XlaGather(const xla::XlaOp& input, const TensorShape& input_shape, const xla::XlaOp& indices, const TensorShape& indices_shape, int64_t axis, bool indices_are_nd, DataType dtype, DataType index_type, xla::XlaBuilder* builder, xla::XlaOp* gather_output) { CHECK(!indices_are_nd || axis == 0); int64_t num_index_dims; int64_t num_indices = 1; if (indices_are_nd) { CHECK_GE(indices_shape.dims(), 1); num_index_dims = indices_shape.dim_size(indices_shape.dims() - 1); for (int64_t i = 0, e = indices_shape.dims() - 1; i < e; i++) { num_indices *= indices_shape.dim_size(i); } } else { num_index_dims = 1; for (int64_t i = 0, e = indices_shape.dims(); i < e; i++) { num_indices *= indices_shape.dim_size(i); } } if (num_indices == 0) { TensorShape input_shape_pre_axis{input_shape}; input_shape_pre_axis.RemoveDimRange(axis, input_shape.dims()); TensorShape input_shape_post_axis{input_shape}; input_shape_post_axis.RemoveDimRange(0, axis + num_index_dims); TensorShape indices_shape_no_index_vectors{indices_shape}; if (indices_are_nd) { indices_shape_no_index_vectors.RemoveLastDims(1); } TensorShape out_shape; out_shape.AppendShape(input_shape_pre_axis); out_shape.AppendShape(indices_shape_no_index_vectors); out_shape.AppendShape(input_shape_post_axis); *gather_output = xla::Broadcast(XlaHelpers::Zero(builder, dtype), out_shape.dim_sizes()); return absl::OkStatus(); } for (int64_t i = 0; i < num_index_dims; ++i) { if (input_shape.dim_size(axis + i) == 0) { auto slice_sizes = input_shape.dim_sizes(); slice_sizes.erase(slice_sizes.begin() + axis); *gather_output = xla::Broadcast(XlaHelpers::Zero(builder, dtype), slice_sizes); return absl::OkStatus(); } } xla::GatherDimensionNumbers dim_numbers; std::vector<int64_t> slice_sizes; slice_sizes.reserve(input_shape.dims()); for (int64_t i = 0; i < input_shape.dims(); i++) { int64_t window_bound; if (axis <= i && i < (axis + num_index_dims)) { dim_numbers.add_collapsed_slice_dims(i); window_bound = 1; } else { window_bound = input_shape.dim_size(i); } slice_sizes.push_back(window_bound); if (i < axis) { dim_numbers.add_offset_dims(i); } else if (i >= (axis + num_index_dims)) { int64_t indices_rank = indices_are_nd ? (indices_shape.dims() - 1) : indices_shape.dims(); dim_numbers.add_offset_dims(i + indices_rank - num_index_dims); } } dim_numbers.set_index_vector_dim(indices_are_nd ? (indices_shape.dims() - 1) : indices_shape.dims()); for (int64_t i = axis; i < axis + num_index_dims; i++) { dim_numbers.add_start_index_map(i); } *gather_output = xla::Gather(input, indices, dim_numbers, slice_sizes); return absl::OkStatus(); } Status XlaGatherWithBatchDimsOpImpl(XlaOpKernelContext* context, const xla::XlaOp input, const TensorShape& input_shape, int batch_dims, xla::XlaOp* gather_output) { auto indices = context->Input(1); auto indices_shape = context->InputShape(1); std::optional<int64_t> axis; if (context->num_inputs() == 3) { const TensorShape axis_shape = context->InputShape(2); if (!TensorShapeUtils::IsScalar(axis_shape)) { return errors::InvalidArgument("axis must be scalar"); } DataType axis_type = context->input_type(2); if (axis_type != DT_INT32 && axis_type != DT_INT64) { return errors::InvalidArgument("axis must be int32 or int64"); } int64_t axis_input; TF_RETURN_IF_ERROR(context->ConstantInputAsIntScalar(2, &axis_input)); const auto params_dims = input_shape.dims(); if (-params_dims > axis_input || axis_input >= params_dims) { const auto min_params_rank = axis_input < 0 ? -axis_input : axis_input + 1; return errors::InvalidArgument("Shape must be at least rank ", min_params_rank, " but is rank ", params_dims); } if (axis_input < 0) { axis_input += params_dims; } axis = axis_input; } if (batch_dims != 0) { if (batch_dims < 0) { batch_dims = indices_shape.dims() + batch_dims; } axis = axis.value_or(batch_dims); if (batch_dims < -indices_shape.dims() || batch_dims > indices_shape.dims()) { return errors::InvalidArgument( "Expected batch_dims in the range [", -indices_shape.dims(), ", ", indices_shape.dims(), "], but got ", batch_dims); } if (batch_dims >= input_shape.dims()) { return errors::InvalidArgument("batch_dims (", batch_dims, ") must be less than rank(input) (", input_shape.dims(), ")."); } if (*axis < batch_dims) { return errors::InvalidArgument("batch_dims (", batch_dims, ") must be less than or equal to ", "axis (", *axis, ")."); } } axis = axis.value_or(0); DataType index_type = context->input_type(1); if (index_type != DT_INT16 && index_type != DT_INT32 && index_type != DT_INT64) { return errors::InvalidArgument("indices must be int16, int32, or int64"); } xla::XlaOp gather; if (batch_dims > 0) { *gather_output = xla::TorchIndexSelect(input, indices, *axis, batch_dims); } else { TF_RETURN_IF_ERROR( XlaGather(input, input_shape, indices, indices_shape, *axis, false, context->expected_output_dtype(0), index_type, context->builder(), gather_output)); } return absl::OkStatus(); } class GatherOp : public XlaOpKernel { public: explicit GatherOp(OpKernelConstruction* context) : XlaOpKernel(context) { if (context->HasAttr("batch_dims")) { OP_REQUIRES_OK(context, context->GetAttr("batch_dims", &batch_dims_)); } else { batch_dims_ = 0; } } void Compile(XlaOpKernelContext* context) override { auto input = context->Input(0); auto input_shape = context->InputShape(0); xla::XlaOp gather; OP_REQUIRES_OK(context, XlaGatherWithBatchDimsOpImpl(context, input, input_shape, batch_dims_, &gather)); context->SetOutput(0, gather); } private: GatherOp(const GatherOp&) = delete; void operator=(const GatherOp&) = delete; int32 batch_dims_ = 0; }; REGISTER_XLA_OP(Name("Gather"), MlirXlaOpKernel); REGISTER_XLA_OP(Name("GatherV2").CompileTimeConstantInput("axis"), GatherOp); class GatherNdOp : public XlaOpKernel { public: explicit GatherNdOp(OpKernelConstruction* context) : XlaOpKernel(context) {} void Compile(XlaOpKernelContext* context) override { DataType params_type = context->input_type(0); DataType indices_type = context->input_type(1); TensorShape params_shape = context->InputShape(0); TensorShape indices_shape = context->InputShape(1); OP_REQUIRES(context, TensorShapeUtils::IsVectorOrHigher(params_shape), errors::InvalidArgument("params must be at least a vector")); OP_REQUIRES(context, TensorShapeUtils::IsVectorOrHigher(indices_shape), errors::InvalidArgument("indices must be at least a vector")); const int64_t num_index_dims = indices_shape.dim_size(indices_shape.dims() - 1); OP_REQUIRES( context, num_index_dims <= params_shape.dims(), errors::InvalidArgument( "index innermost dimension length must be <= params rank; saw: ", indices_shape.dim_size(indices_shape.dims() - 1), " vs. ", params_shape.dims())); xla::XlaBuilder* builder = context->builder(); auto params = context->Input(0); auto indices = context->Input(1); xla::XlaOp gather; OP_REQUIRES_OK(context, XlaGather(params, params_shape, indices, indices_shape, 0, true, params_type, indices_type, builder, &gather)); context->SetOutput(0, gather); } }; REGISTER_XLA_OP(Name("GatherNd"), GatherNdOp); }
#include <functional> #include <memory> #include <vector> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class GatherOpTest : public OpsTestBase { protected: void MakeOp(DataType data_type, DataType index_type, int batch_dims = 0) { TF_ASSERT_OK(NodeDefBuilder("myop", "GatherV2") .Input(FakeInput(data_type)) .Input(FakeInput(index_type)) .Input(FakeInput(index_type)) .Attr("batch_dims", batch_dims) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(GatherOpTest, ScalarIndices) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 3, 4}); AddInputFromArray<int32>(TensorShape({}), {3}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({})); test::FillValues<float>(&expected, {3}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(GatherOpTest, ScalarIndices_Complex) { MakeOp(DT_COMPLEX64, DT_INT32); AddInputFromArray<std::complex<float>>( TensorShape({5}), {std::complex<float>(0, 10), std::complex<float>(1, 11), std::complex<float>(2, 12), std::complex<float>(3, 13), std::complex<float>(4, 14)}); AddInputFromArray<int32>(TensorShape({}), {3}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_COMPLEX64, TensorShape({})); test::FillValues<std::complex<float>>(&expected, {std::complex<float>(3, 13)}); test::ExpectTensorEqual<std::complex<float>>(expected, *GetOutput(0)); } TEST_F(GatherOpTest, Simple_TwoD32_Axis0) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int32>(TensorShape({4}), {0, 4, 0, 2}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4, 3})); test::FillValues<float>(&expected, {0, 1, 2, 12, 13, 14, 0, 1, 2, 6, 7, 8}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(GatherOpTest, InvalidInputShape_TwoD32) { MakeOp(DT_FLOAT, DT_INT32); AddInput<float>(TensorShape({0, 3}), [](int) -> float { return 0.f; }); AddInputFromArray<int32>(TensorShape({4}), {0, 4, 0, 2}); AddInputFromArray<int32>(TensorShape({}), {0}); auto s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "indices[0] = 0 is not in [0, 0)")) << s; } TEST_F(GatherOpTest, Simple_TwoD32_Axis1) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int32>(TensorShape({4}), {0, 1, 0, 2}); AddInputFromArray<int32>(TensorShape({}), {1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 4})); test::FillValues<float>(&expected, {0, 1, 0, 2, 3, 4, 3, 5, 6, 7, 6, 8, 9, 10, 9, 11, 12, 13, 12, 14}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(GatherOpTest, ZeroSize_TwoD32) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 0}), {}); AddInputFromArray<int32>(TensorShape({4}), {0, 4, 0, 2}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4, 0})); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(GatherOpTest, Simple_TwoD64) { MakeOp(DT_FLOAT, DT_INT64); AddInputFromArray<float>(TensorShape({5, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int64_t>(TensorShape({4}), {0, 4, 0, 2}); AddInputFromArray<int64_t>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({4, 3})); test::FillValues<float>(&expected, {0, 1, 2, 12, 13, 14, 0, 1, 2, 6, 7, 8}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(GatherOpTest, HighRank) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({4}), {0, 1, 2, 3}); AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 0, 2, 3, 0}); AddInputFromArray<int32>(TensorShape({}), {0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected, {1, 2, 0, 2, 3, 0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(GatherOpTest, Error_IndexOutOfRange) { MakeOp(DT_FLOAT, DT_INT32); AddInputFromArray<float>(TensorShape({5, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int32>(TensorShape({4}), {0, 4, 99, 2}); AddInputFromArray<int32>(TensorShape({}), {0}); Status s = RunOpKernel(); EXPECT_TRUE( absl::StrContains(s.ToString(), "indices[2] = 99 is not in [0, 5)")) << s; } TEST_F(GatherOpTest, Error_BatchDimsOutOfRange) { MakeOp(DT_FLOAT, DT_INT32, 10); AddInputFromArray<float>(TensorShape({5, 3}), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}); AddInputFromArray<int32>(TensorShape({4}), {0, 4, 99, 2}); AddInputFromArray<int32>(TensorShape({}), {0}); Status s = RunOpKernel(); EXPECT_TRUE(absl::StrContains( s.ToString(), "Expected batch_dims in the range [-1, 1], but got 10")) << s; } constexpr int kLookups = 2000; template <typename Index> static Graph* Gather(int dim) { Graph* g = new Graph(OpRegistry::Global()); const int kRows = ((512 << 20) / sizeof(float)) / dim; Tensor params(DT_FLOAT, TensorShape({kRows, dim})); params.flat<float>().setRandom(); random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); std::vector<Index> indices_vec; indices_vec.reserve(kLookups); for (int i = 0; i < kLookups; i++) { indices_vec.push_back(rnd.Uniform(kRows)); } Tensor indices(DataTypeToEnum<Index>::value, TensorShape({kLookups})); for (int i = 0; i < indices_vec.size(); i++) { indices.flat<Index>()(i) = indices_vec[i]; } Tensor axis(DataTypeToEnum<Index>::value, TensorShape({})); axis.scalar<Index>()() = 0; test::graph::Gather(g, test::graph::Constant(g, params), test::graph::Constant(g, indices), test::graph::HostConstant(g, axis)); return g; } #define BM_GATHER(DEVICE, INDEX) \ static void BM_##DEVICE##_gather_##INDEX( \ ::testing::benchmark::State& state) { \ const int dim = state.range(0); \ test::Benchmark(#DEVICE, Gather<INDEX>(dim), false) \ .Run(state); \ const int64_t tot = \ static_cast<int64_t>(state.iterations()) * kLookups * dim; \ state.SetItemsProcessed(tot); \ state.SetBytesProcessed(tot * sizeof(float)); \ } \ BENCHMARK(BM_##DEVICE##_gather_##INDEX) \ ->UseRealTime() \ ->Arg(1) \ ->Arg(10) \ ->Arg(20) \ ->Arg(64) \ ->Arg(100) \ ->Arg(200) \ ->Arg(1000) BM_GATHER(cpu, int32); BM_GATHER(gpu, int32); BM_GATHER(cpu, int64_t); BM_GATHER(gpu, int64_t); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/gather_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/gather_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
af191e4b-a942-466b-910d-1b9f8555aaf2
cpp
tensorflow/tensorflow
diag_op
tensorflow/compiler/tf2xla/kernels/diag_op.cc
tensorflow/core/kernels/diag_op_test.cc
#include <algorithm> #include <vector> #include "tensorflow/compiler/tf2xla/lib/util.h" #include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/matrix.h" #include "xla/hlo/builder/lib/pooling.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/op_kernel.h" namespace tensorflow { namespace { xla::XlaOp CreateDiagonal(xla::XlaOp input, int64_t last_dim_size, absl::Span<const int64_t> other_dims) { xla::XlaBuilder* builder = input.builder(); xla::XlaOp iota = xla::Iota(builder, xla::S32, last_dim_size); xla::XlaOp iota_broadcast = xla::Broadcast(iota, {last_dim_size}); xla::XlaOp mask = xla::Eq(iota_broadcast, iota, {0}); if (!other_dims.empty()) { mask = xla::Broadcast(mask, other_dims); } std::vector<int64_t> out_dim_sizes(other_dims.begin(), other_dims.end()); out_dim_sizes.push_back(last_dim_size); out_dim_sizes.push_back(last_dim_size); std::vector<int64_t> broadcast_dimensions(other_dims.size() + 1); absl::c_iota(broadcast_dimensions, 0); ++broadcast_dimensions.back(); xla::XlaOp input_broadcast = xla::BroadcastInDim(input, out_dim_sizes, broadcast_dimensions); return xla::Select(mask, input_broadcast, xla::ZerosLike(input_broadcast)); } class DiagOp : public XlaOpKernel { public: explicit DiagOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { OP_REQUIRES(ctx, ctx->num_inputs() >= 1, errors::InvalidArgument("Diag op must have at an input")); const TensorShape input_shape = ctx->InputShape(0); auto dims = input_shape.dim_sizes(); OP_REQUIRES(ctx, !dims.empty(), errors::InvalidArgument("Expected 1 <= dims, got shape ", input_shape.DebugString())); xla::XlaOp input = ctx->Input(0); int64_t size = input_shape.num_elements(); input = xla::Reshape(input, {size}); xla::XlaOp diag = CreateDiagonal(input, size, {}); std::vector<int64_t> new_dims(dims.size() * 2); std::copy(dims.begin(), dims.end(), new_dims.begin()); std::copy(dims.begin(), dims.end(), new_dims.begin() + dims.size()); diag = xla::Reshape(diag, new_dims); ctx->SetOutput(0, diag); } }; REGISTER_XLA_OP(Name("Diag"), DiagOp); REGISTER_XLA_OP(Name("DiagPart"), MlirXlaOpKernel); } }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { template <typename T> static Graph* Diag(int n, DataType type) { Graph* g = new Graph(OpRegistry::Global()); Tensor in(type, TensorShape({n})); in.flat<T>().setRandom(); Node* out = test::graph::Diag(g, test::graph::Constant(g, in), type); test::graph::DiagPart(g, out, type); return g; } #define BM_DiagDev(N, T, TFTYPE, DEVICE) \ static void BM_Diag##_##N##_##TFTYPE##_##DEVICE( \ ::testing::benchmark::State& state) { \ test::Benchmark(#DEVICE, Diag<T>(N, TFTYPE), false) \ .Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * N * N); \ } \ BENCHMARK(BM_Diag##_##N##_##TFTYPE##_##DEVICE); #define BM_Diag(N) \ BM_DiagDev(N, int, DT_INT32, cpu); \ BM_DiagDev(N, float, DT_FLOAT, cpu); \ BM_DiagDev(N, std::complex<float>, DT_COMPLEX64, cpu); \ BM_DiagDev(N, int, DT_INT32, gpu); \ BM_DiagDev(N, float, DT_FLOAT, gpu); \ BM_DiagDev(N, std::complex<float>, DT_COMPLEX64, gpu); BM_Diag(16); BM_Diag(128); BM_Diag(512); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/diag_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/diag_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f8898a87-0494-44a9-805e-226abdf60693
cpp
tensorflow/tensorflow
isotonic_regression_op
tensorflow/core/kernels/isotonic_regression_op.cc
tensorflow/core/kernels/isotonic_regression_op_test.cc
#include <cmath> #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/platform/threadpool.h" namespace { using ::int64_t; using tensorflow::int32; constexpr int kCostMultiplier = 100; class Segment { public: explicit Segment(int col_index) : col_start_(col_index), col_limit_(col_index + 1) {} int num_points() const { return col_limit_ - col_start_; } void merge_with(const Segment& other) { col_start_ = std::min(col_start_, other.col_start()); col_limit_ = std::max(col_limit_, other.col_limit()); } int col_start() const { return col_start_; } int col_limit() const { return col_limit_; } private: int col_start_; int col_limit_; }; template <typename T> class L2PavaSegment : public Segment { public: L2PavaSegment(T y, int col_index) : Segment(col_index), y_sum_(y), minimum_(y) {} void merge_with(const L2PavaSegment& other) { Segment::merge_with(other); y_sum_ += other.y_sum_; minimum_ = y_sum_ / static_cast<T>(num_points()); } T minimum() const { return minimum_; } private: T y_sum_; T minimum_; }; template <typename SegmentType, typename FloatTensor, typename IntTensor> void solve_pava(const std::function<SegmentType(int, int)>& make_segment, FloatTensor* solution, IntTensor* segments, int row_index) { const size_t n = solution->dimensions()[1]; std::vector<SegmentType> pools; pools.reserve(n); for (size_t col_index = 0; col_index < n; ++col_index) { pools.push_back(make_segment(row_index, col_index)); while (pools.size() > 1 && pools.rbegin()->minimum() > (pools.rbegin() + 1)->minimum()) { (pools.rbegin() + 1)->merge_with(*pools.rbegin()); pools.pop_back(); } } int segment_id = 0; for (const auto& pool : pools) { const auto pool_minimum = pool.minimum(); auto* solution_ptr = &(*solution)(row_index, pool.col_start()); auto* segments_ptr = &(*segments)(row_index, pool.col_start()); for (int i = pool.col_start(); i < pool.col_limit(); ++i) { *solution_ptr++ = pool_minimum; *segments_ptr++ = segment_id; } ++segment_id; } } template <typename SegmentType, typename FloatTensor, typename IntTensor> void solve_pava_batch(const std::function<SegmentType(int, int)>& make_segment, FloatTensor* solution, IntTensor* segments, tensorflow::OpKernelContext* context) { const int batch_size = solution->dimensions()[0]; const int problem_size = solution->dimensions()[1]; auto thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; thread_pool->ParallelFor( batch_size, kCostMultiplier * problem_size, [&make_segment, &solution, &segments](int64_t row_start, int64_t row_limit) { for (int row_index = static_cast<int>(row_start); row_index < static_cast<int>(row_limit); ++row_index) { solve_pava(make_segment, solution, segments, row_index); } }); } } template <typename Tin, typename Tout> class IsotonicRegressionOp : public tensorflow::OpKernel { public: explicit IsotonicRegressionOp(tensorflow::OpKernelConstruction* context) : tensorflow::OpKernel(context) {} void Compute(tensorflow::OpKernelContext* context) override { const tensorflow::Tensor& input_tensor = context->input(0); const auto input = input_tensor.flat_inner_dims<Tin, 2>(); int int_max = std::numeric_limits<int32_t>::max(); OP_REQUIRES(context, tensorflow::FastBoundsCheck(input.dimensions()[0], int_max) && tensorflow::FastBoundsCheck(input.dimensions()[1], int_max), tensorflow::errors::InvalidArgument("Tensor too large")); const auto shape = input_tensor.shape(); tensorflow::Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, shape, &output_tensor)); auto output = output_tensor->flat_inner_dims<Tout, 2>(); tensorflow::Tensor* segments_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, shape, &segments_tensor)); auto segments = segments_tensor->flat_inner_dims<int>(); auto make_l2_segment = [&input](int row_index, int col_index) { return L2PavaSegment<Tout>(input(row_index, col_index), col_index); }; solve_pava_batch<L2PavaSegment<Tout>>(make_l2_segment, &output, &segments, context); } }; #define REGISTER_CPU_KERNEL(Tin, Tout) \ REGISTER_KERNEL_BUILDER(Name("IsotonicRegression") \ .Device(tensorflow::DEVICE_CPU) \ .TypeConstraint<Tin>("T") \ .TypeConstraint<Tout>("output_dtype"), \ IsotonicRegressionOp<Tin, Tout>); #define REGISTER_CPU_SAME_KERNEL(T) REGISTER_CPU_KERNEL(T, T) TF_CALL_FLOAT_TYPES(REGISTER_CPU_SAME_KERNEL); #define REGISTER_CPU_KERNEL_FLOAT(Tin) REGISTER_CPU_KERNEL(Tin, float) TF_CALL_int16(REGISTER_CPU_KERNEL_FLOAT); TF_CALL_int8(REGISTER_CPU_KERNEL_FLOAT); #define REGISTER_CPU_KERNEL_DOUBLE(Tin) REGISTER_CPU_KERNEL(Tin, double) TF_CALL_int64(REGISTER_CPU_KERNEL_DOUBLE); TF_CALL_int32(REGISTER_CPU_KERNEL_DOUBLE);
#include <cstdio> #include <functional> #include <memory> #include <vector> #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { class IsotonicRegressionOpTest : public OpsTestBase { public: void MakeOp(DataType type) { TF_ASSERT_OK(NodeDefBuilder("myop", "IsotonicRegression") .Input(FakeInput(type)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; class BenchmarkHelper : public IsotonicRegressionOpTest { public: void TestBody() override {} void AddIncreasingInput(int batch_size, int input_size) { std::vector<float> input_data(input_size * batch_size, 0); for (int i = 0; i < input_data.size(); i++) { input_data[i] = i; } AddInputFromArray<float>(TensorShape({batch_size, input_size}), input_data); } }; TEST_F(IsotonicRegressionOpTest, Constant) { MakeOp(DT_FLOAT_REF); AddInputFromArray<float>(TensorShape({5, 3}), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); test::ExpectClose(expected, *GetOutput((0))); } TEST_F(IsotonicRegressionOpTest, IncreasingInput) { MakeOp(DT_FLOAT_REF); AddInputFromArray<float>(TensorShape({5, 3}), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11, 14, 14, 14}); test::ExpectClose(expected, *GetOutput((0))); Tensor expected_ord(allocator(), DT_INT32, TensorShape({5, 3})); test::FillValues<int>(&expected_ord, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); test::ExpectTensorEqual<int>(expected_ord, *GetOutput((1))); } TEST_F(IsotonicRegressionOpTest, Decreasing) { MakeOp(DT_FLOAT_REF); AddInputFromArray<float>(TensorShape({5, 3}), {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3})); test::FillValues<float>(&expected, {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); test::ExpectClose(expected, *GetOutput((0))); Tensor expected_ord(allocator(), DT_INT32, TensorShape({5, 3})); test::FillValues<int>(&expected_ord, {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2}); test::ExpectTensorEqual<int>(expected_ord, *GetOutput((1))); } #ifdef PLATFORM_GOOGLE static void BM_IncreasingSequence(benchmark::State& state) { int batch_size = state.range(0); int input_size = state.range(1); for (auto _ : state) { state.PauseTiming(); BenchmarkHelper helper; helper.MakeOp(DT_FLOAT_REF); helper.AddIncreasingInput(batch_size, input_size); state.ResumeTiming(); Status stat = helper.RunOpKernel(); } state.SetItemsProcessed( static_cast<int64_t>(batch_size * input_size * state.iterations())); } BENCHMARK(BM_IncreasingSequence) ->Args({1, 1 << 0}) ->Args({1, 1 << 5}) ->Args({1, 1 << 8}) ->Args({1, 1 << 10}) ->Args({1, 1 << 20}) ->Args({1, 2 << 20}) ->Args({1 << 0, 1 << 10}) ->Args({1 << 1, 1 << 10}) ->Args({1 << 4, 1 << 10}) ->Args({1 << 6, 1 << 10}) ->Args({1 << 9, 1 << 10}) ->Args({1 << 10, 1 << 10}); #endif } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/isotonic_regression_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/isotonic_regression_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
18bf2c41-fa73-4265-8160-56ff07857792
cpp
tensorflow/tensorflow
regex_replace_op
tensorflow/core/kernels/regex_replace_op.cc
tensorflow/core/kernels/regex_replace_op_test.cc
#include <string> #include "re2/re2.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/thread_annotations.h" namespace tensorflow { namespace { Status InternalCompute(const RE2& regex, const string& rewrite, const bool replace_global, OpKernelContext* ctx) { const Tensor* input_tensor; TF_RETURN_IF_ERROR(ctx->input("input", &input_tensor)); Tensor* output_tensor; std::unique_ptr<Tensor> maybe_forwarded = ctx->forward_input(0 , 0 , tensorflow::DT_STRING, input_tensor->shape(), ctx->input_memory_type(0), ctx->input_alloc_attr(0)); if (maybe_forwarded) { output_tensor = maybe_forwarded.get(); TF_RETURN_IF_ERROR(ctx->set_output("output", *output_tensor)); } else { TF_RETURN_IF_ERROR( ctx->allocate_output("output", input_tensor->shape(), &output_tensor)); output_tensor->flat<tstring>() = input_tensor->flat<tstring>(); } auto output_flat = output_tensor->flat<tstring>(); for (size_t i = 0; i < output_flat.size(); ++i) { string buf = output_flat(i); if (replace_global) { RE2::GlobalReplace(&buf, regex, rewrite); } else { RE2::Replace(&buf, regex, rewrite); } output_flat(i) = std::move(buf); } return absl::OkStatus(); } } class RegexReplaceOp : public OpKernel { public: explicit RegexReplaceOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("replace_global", &replace_global_)); } ~RegexReplaceOp() override {} void Compute(OpKernelContext* ctx) override { const Tensor* pattern_tensor; OP_REQUIRES_OK(ctx, ctx->input("pattern", &pattern_tensor)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(pattern_tensor->shape()), errors::InvalidArgument("Pattern must be scalar, but received ", pattern_tensor->shape().DebugString())); const string& pattern = pattern_tensor->scalar<tstring>()(); std::shared_ptr<RE2> regex = CachedRE2(pattern); OP_REQUIRES(ctx, regex->ok(), errors::InvalidArgument("Invalid pattern: ", pattern, ", error: ", regex->error())); const Tensor* rewrite_tensor; OP_REQUIRES_OK(ctx, ctx->input("rewrite", &rewrite_tensor)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(rewrite_tensor->shape()), errors::InvalidArgument("Rewrite must be scalar, but received ", rewrite_tensor->shape().DebugString())); const string& rewrite = rewrite_tensor->scalar<tstring>()(); OP_REQUIRES_OK(ctx, InternalCompute(*regex, rewrite, replace_global_, ctx)); } private: std::shared_ptr<RE2> CachedRE2(const string& pattern) { { tf_shared_lock l(mu_); if (regex_ != nullptr && regex_->pattern() == pattern) { return regex_; } } auto regex = std::make_shared<RE2>(pattern); { mutex_lock l(mu_); regex_.swap(regex); return regex_; } } bool replace_global_; mutex mu_; std::shared_ptr<RE2> regex_ TF_GUARDED_BY(mu_); RegexReplaceOp(const RegexReplaceOp&) = delete; void operator=(const RegexReplaceOp&) = delete; }; REGISTER_KERNEL_BUILDER(Name("RegexReplace").Device(DEVICE_CPU), RegexReplaceOp); class StaticRegexReplaceOp : public OpKernel { public: explicit StaticRegexReplaceOp(OpKernelConstruction* ctx) : OpKernel(ctx) { string pattern; OP_REQUIRES_OK(ctx, ctx->GetAttr("pattern", &pattern)); re_ = std::make_unique<RE2>(pattern); OP_REQUIRES(ctx, re_->ok(), errors::InvalidArgument("Invalid pattern: ", pattern, ", error: ", re_->error())); OP_REQUIRES_OK(ctx, ctx->GetAttr("rewrite", &rewrite_str_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("replace_global", &replace_global_)); } void Compute(OpKernelContext* ctx) override { OP_REQUIRES_OK(ctx, InternalCompute(*re_, rewrite_str_, replace_global_, ctx)); } private: std::unique_ptr<RE2> re_; string rewrite_str_; bool replace_global_; }; REGISTER_KERNEL_BUILDER(Name("StaticRegexReplace").Device(DEVICE_CPU), StaticRegexReplaceOp); }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { const char* lines[] = { "**TensorFlow** is an open source software library for numerical " "computation using data flow graphs.", "The graph nodes represent mathematical operations, while the graph edges " "represent the multidimensional data arrays (tensors) that flow between " "them.", "This flexible architecture enables you to deploy computation to one or " "more CPUs or GPUs in a desktop, server, or mobile device without " "rewriting code.", "TensorFlow also includes " "[TensorBoard](https: "summaries_and_tensorboard), a data visualization toolkit.", "TensorFlow was originally developed by researchers and engineers working " "on the Google Brain team within Google's Machine Intelligence Research " "organization for the purposes of conducting machine learning and deep " "neural networks research.", "The system is general enough to be applicable in a wide variety of other " "domains, as well.", "TensorFlow provides stable Python API and C APIs as well as without API " "backwards compatibility guarantee like C++, Go, Java, JavaScript and " "Swift."}; const char kRegExPattern[] = "\\p{P}"; const char kRewrite[] = " "; Tensor GetTestTensor(int batch) { const int sz = TF_ARRAYSIZE(lines); Tensor t(DT_STRING, {batch}); auto s = t.flat<tstring>(); for (int i = 0; i < batch; ++i) { s(i) = lines[i % sz]; } return t; } Graph* SetupRegexReplaceGraph(const Tensor& input, const string& input_pattern, const string& input_rewrite) { Graph* g = new Graph(OpRegistry::Global()); Tensor pattern(DT_STRING, TensorShape({})); pattern.flat<tstring>().setConstant(input_pattern); Tensor rewrite(DT_STRING, TensorShape({})); rewrite.flat<tstring>().setConstant(input_rewrite); TF_CHECK_OK(NodeBuilder("regex_replace_op", "RegexReplace") .Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, pattern)) .Input(test::graph::Constant(g, rewrite)) .Attr("replace_global", true) .Finalize(g, nullptr )); return g; } static void BM_RegexReplace(::testing::benchmark::State& state) { const int batch_size = state.range(0); Tensor input = GetTestTensor(batch_size); Graph* g = SetupRegexReplaceGraph(input, kRegExPattern, kRewrite); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_RegexReplace) ->UseRealTime() ->Arg(1) ->Arg(8) ->Arg(16) ->Arg(32) ->Arg(64) ->Arg(128) ->Arg(256); Graph* SetupStaticGraph(const Tensor& input, const string& input_pattern, const string& rewrite) { Graph* g = new Graph(OpRegistry::Global()); TF_CHECK_OK(NodeBuilder("static_regex_replace_op", "StaticRegexReplace") .Attr("pattern", input_pattern) .Attr("rewrite", rewrite) .Input(test::graph::Constant(g, input)) .Attr("replace_global", true) .Finalize(g, nullptr )); return g; } static void BM_StaticRegexReplace(::testing::benchmark::State& state) { const int batch_size = state.range(0); Tensor input = GetTestTensor(batch_size); Graph* g = SetupStaticGraph(input, kRegExPattern, kRewrite); test::Benchmark("cpu", g, false).Run(state); state.SetItemsProcessed(static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StaticRegexReplace) ->UseRealTime() ->Arg(1) ->Arg(8) ->Arg(16) ->Arg(32) ->Arg(64) ->Arg(128) ->Arg(256); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/regex_replace_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/regex_replace_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1f1ddcd6-b16e-4419-83c4-75e3931b8b8d
cpp
tensorflow/tensorflow
save_op
tensorflow/core/kernels/save_op.cc
tensorflow/core/kernels/save_op_test.cc
#include "tensorflow/core/kernels/save_restore_tensor.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/tensor_slice_writer.h" namespace tensorflow { class SaveOp : public OpKernel { public: explicit SaveOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { SaveTensors(context, &checkpoint::CreateTableTensorSliceBuilder, false); } }; REGISTER_KERNEL_BUILDER(Name("Save").Device(DEVICE_CPU), SaveOp); class SaveSlicesOp : public OpKernel { public: explicit SaveSlicesOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { SaveTensors(context, &checkpoint::CreateTableTensorSliceBuilder, true); } }; REGISTER_KERNEL_BUILDER(Name("SaveSlices").Device(DEVICE_CPU), SaveSlicesOp); class ShardedFilenameOp : public OpKernel { public: explicit ShardedFilenameOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { static const char* input_names[3] = {"basename", "shard", "num_shards"}; for (int i = 0; i < ctx->num_inputs(); ++i) { OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(ctx->input(i).shape()), errors::InvalidArgument(input_names[i], " must be a scalar, got shape ", ctx->input(i).shape().DebugString())); } Tensor* out = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &out)); out->scalar<tstring>()() = strings::Printf( "%s-%05d-of-%05d", ctx->input(0).scalar<tstring>()().c_str(), ctx->input(1).scalar<int32>()(), ctx->input(2).scalar<int32>()()); } }; REGISTER_KERNEL_BUILDER(Name("ShardedFilename").Device(DEVICE_CPU), ShardedFilenameOp); class ShardedFilespecOp : public OpKernel { public: explicit ShardedFilespecOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { static const char* input_names[2] = {"basename", "num_shards"}; for (int i = 0; i < ctx->num_inputs(); ++i) { OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(ctx->input(i).shape()), errors::InvalidArgument(input_names[i], " must be a scalar, got shape ", ctx->input(i).shape().DebugString())); } Tensor* out = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &out)); out->scalar<tstring>()() = strings::Printf( "%s-\?\?\?\?\?-of-%05d", ctx->input(0).scalar<tstring>()().c_str(), ctx->input(1).scalar<int32>()()); } }; REGISTER_KERNEL_BUILDER(Name("ShardedFilespec").Device(DEVICE_CPU), ShardedFilespecOp); }
#include <functional> #include <memory> #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/io_ops.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/util/tensor_slice_reader.h" namespace tensorflow { namespace { class SaveOpTest : public OpsTestBase { protected: void MakeOp() { TF_ASSERT_OK( NodeDefBuilder("myop", "Save") .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE, DT_QINT8, DT_QINT32, DT_UINT8, DT_INT8, DT_INT16, DT_INT64, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_HALF})) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SaveOpTest, Simple) { const string filename = io::JoinPath(testing::TmpDir(), "tensor_simple"); const string tensornames[] = { "tensor_bool", "tensor_int", "tensor_float", "tensor_double", "tensor_qint8", "tensor_qint32", "tensor_uint8", "tensor_int8", "tensor_int16", "tensor_int64", "tensor_string", "tensor_complex64", "tensor_complex128", "tensor_half"}; MakeOp(); AddInput<tstring>(TensorShape({}), [&filename](int x) -> tstring { return filename; }); AddInput<tstring>(TensorShape({14}), [&tensornames](int x) -> tstring { return tensornames[x]; }); AddInput<bool>(TensorShape({2}), [](int x) -> bool { return x != 0; }); AddInput<int32>(TensorShape({10}), [](int x) -> int32 { return x + 1; }); AddInput<float>(TensorShape({2, 4}), [](int x) -> float { return static_cast<float>(x) / 10; }); AddInput<double>(TensorShape({2, 4}), [](int x) -> double { return static_cast<double>(x) / 20; }); AddInput<qint8>(TensorShape({3, 2}), [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); AddInput<qint32>(TensorShape({2, 3}), [](int x) -> qint32 { return *reinterpret_cast<qint32*>(&x) * qint8(2); }); AddInput<uint8>(TensorShape({11}), [](int x) -> uint8 { return x + 1; }); AddInput<int8>(TensorShape({7}), [](int x) -> int8 { return x - 7; }); AddInput<int16>(TensorShape({7}), [](int x) -> int16 { return x - 8; }); AddInput<int64_t>(TensorShape({9}), [](int x) -> int64 { return x - 9; }); AddInput<tstring>(TensorShape({2}), [](int x) -> tstring { return x ? "yes" : "no"; }); AddInput<complex64>(TensorShape({2, 3}), [](int x) -> complex64 { return complex64(100 + x, 200 + x); }); AddInput<complex128>(TensorShape({2, 3}), [](int x) -> complex128 { return complex128(100 + x, 200 + x); }); AddInput<Eigen::half>(TensorShape({2, 4}), [](int x) -> Eigen::half { return static_cast<Eigen::half>(x) / Eigen::half(2); }); TF_ASSERT_OK(RunOpKernel()); checkpoint::TensorSliceReader reader(filename, checkpoint::OpenTableTensorSliceReader); TF_EXPECT_OK(reader.status()); { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_bool", &shape, &type)); TensorShape expected({2}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_BOOL, type); TensorSlice s = TensorSlice::ParseOrDie("-"); bool data[2]; std::fill_n(data, 2, false); EXPECT_TRUE(reader.CopySliceData("tensor_bool", s, data)); for (int i = 0; i < 2; ++i) { EXPECT_EQ((i != 0), data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_int", &shape, &type)); TensorShape expected({10}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_INT32, type); TensorSlice s = TensorSlice::ParseOrDie("-"); int data[10]; std::fill_n(data, 10, 0); EXPECT_TRUE(reader.CopySliceData("tensor_int", s, data)); for (int i = 0; i < 10; ++i) { EXPECT_EQ(i + 1, data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_float", &shape, &type)); TensorShape expected({2, 4}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_FLOAT, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); float data[8]; std::fill_n(data, 8, 0); EXPECT_TRUE(reader.CopySliceData("tensor_float", s, data)); for (int i = 0; i < 8; ++i) { EXPECT_EQ(static_cast<float>(i) / 10, data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_double", &shape, &type)); TensorShape expected({2, 4}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_DOUBLE, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); double data[8]; std::fill_n(data, 8, 0); EXPECT_TRUE(reader.CopySliceData("tensor_double", s, data)); for (int i = 0; i < 8; ++i) { EXPECT_EQ(static_cast<double>(i) / 20, data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_qint8", &shape, &type)); TensorShape expected({3, 2}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_QINT8, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); qint8 data[6]; EXPECT_TRUE(reader.CopySliceData("tensor_qint8", s, data)); for (int i = 0; i < 6; ++i) { EXPECT_EQ(*reinterpret_cast<qint8*>(&i), data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_qint32", &shape, &type)); TensorShape expected({2, 3}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_QINT32, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); qint32 data[6]; EXPECT_TRUE(reader.CopySliceData("tensor_qint32", s, data)); for (int i = 0; i < 6; ++i) { EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_uint8", &shape, &type)); TensorShape expected({11}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_UINT8, type); TensorSlice s = TensorSlice::ParseOrDie("-"); uint8 data[11]; EXPECT_TRUE(reader.CopySliceData("tensor_uint8", s, data)); for (int i = 0; i < 11; ++i) { EXPECT_EQ(i + 1, data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_int8", &shape, &type)); TensorShape expected({7}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_INT8, type); TensorSlice s = TensorSlice::ParseOrDie("-"); int8 data[7]; EXPECT_TRUE(reader.CopySliceData("tensor_int8", s, data)); for (int i = 0; i < 7; ++i) { EXPECT_EQ(i - 7, data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_int16", &shape, &type)); TensorShape expected({7}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_INT16, type); TensorSlice s = TensorSlice::ParseOrDie("-"); int16 data[7]; EXPECT_TRUE(reader.CopySliceData("tensor_int16", s, data)); for (int i = 0; i < 7; ++i) { EXPECT_EQ(i - 8, data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_int64", &shape, &type)); TensorShape expected({9}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_INT64, type); TensorSlice s = TensorSlice::ParseOrDie("-"); int64_t data[9]; EXPECT_TRUE(reader.CopySliceData("tensor_int64", s, data)); for (int i = 0; i < 9; ++i) { EXPECT_EQ(i - 9, data[i]); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_string", &shape, &type)); TensorShape expected({2}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_STRING, type); TensorSlice s = TensorSlice::ParseOrDie("-"); tstring data[2]; EXPECT_TRUE(reader.CopySliceData("tensor_string", s, data)); EXPECT_EQ("no", data[0]); EXPECT_EQ("yes", data[1]); } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_complex64", &shape, &type)); TensorShape expected({2, 3}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_COMPLEX64, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); complex64 data[6]; EXPECT_TRUE(reader.CopySliceData("tensor_complex64", s, data)); for (int i = 0; i < 6; ++i) { EXPECT_EQ(100 + i, data[i].real()); EXPECT_EQ(200 + i, data[i].imag()); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_complex128", &shape, &type)); TensorShape expected({2, 3}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_COMPLEX128, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); complex128 data[6]; EXPECT_TRUE(reader.CopySliceData("tensor_complex128", s, data)); for (int i = 0; i < 6; ++i) { EXPECT_EQ(100 + i, data[i].real()); EXPECT_EQ(200 + i, data[i].imag()); } } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_half", &shape, &type)); TensorShape expected({2, 4}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_HALF, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); Eigen::half data[8]; std::fill_n(data, 8, Eigen::half(0)); EXPECT_TRUE(reader.CopySliceData("tensor_half", s, data)); for (int i = 0; i < 8; ++i) { EXPECT_EQ(static_cast<Eigen::half>(i) / Eigen::half(2), data[i]); } } } class SaveSlicesOpTest : public OpsTestBase { protected: void MakeOp() { TF_ASSERT_OK(NodeDefBuilder("myop", "SaveSlices") .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput( {DT_INT32, DT_FLOAT, DT_DOUBLE, DT_QINT8, DT_QINT32})) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SaveSlicesOpTest, Slices) { const string filename = io::JoinPath(testing::TmpDir(), "tensor_slices"); const string tensornames[] = {"tensor_int", "tensor_float", "tensor_double", "tensor_qint8", "tensor_qint32"}; const string tensorshapes[] = { "10 -", "2 4 -:0,2", "2 4 0,1:2,2", "3 2 -:-", "2 3 1,1:2,1" }; MakeOp(); AddInput<tstring>(TensorShape({}), [&filename](int x) -> tstring { return filename; }); AddInput<tstring>(TensorShape({5}), [&tensornames](int x) -> tstring { return tensornames[x]; }); AddInput<tstring>(TensorShape({5}), [&tensorshapes](int x) -> tstring { return tensorshapes[x]; }); AddInput<int32>(TensorShape({10}), [](int x) -> int32 { return x + 1; }); AddInput<float>(TensorShape({2, 2}), [](int x) -> float { return static_cast<float>(x) / 10; }); AddInput<double>(TensorShape({1, 2}), [](int x) -> double { return static_cast<double>(x) / 20; }); AddInput<qint8>(TensorShape({3, 2}), [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); AddInput<qint32>(TensorShape({1, 1}), [](int x) -> qint32 { return *reinterpret_cast<qint32*>(&x) * qint8(2); }); TF_ASSERT_OK(RunOpKernel()); checkpoint::TensorSliceReader reader(filename, checkpoint::OpenTableTensorSliceReader); TF_EXPECT_OK(reader.status()); { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_int", &shape, &type)); TensorShape expected({10}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_INT32, type); TensorSlice s = TensorSlice::ParseOrDie("-"); int data[10]; EXPECT_TRUE(reader.CopySliceData("tensor_int", s, data)); } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_float", &shape, &type)); TensorShape expected({2, 4}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_FLOAT, type); TensorSlice full_slice = TensorSlice::ParseOrDie("-:-"); TensorSlice saved_slice = TensorSlice::ParseOrDie("-:0,2"); float data[8]; EXPECT_FALSE(reader.CopySliceData("tensor_float", full_slice, data)); EXPECT_TRUE(reader.CopySliceData("tensor_float", saved_slice, data)); } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_double", &shape, &type)); TensorShape expected({2, 4}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_DOUBLE, type); TensorSlice full_slice = TensorSlice::ParseOrDie("-:-"); TensorSlice saved_slice = TensorSlice::ParseOrDie("0,1:2,2"); double data[8]; EXPECT_FALSE(reader.CopySliceData("tensor_double", full_slice, data)); EXPECT_TRUE(reader.CopySliceData("tensor_double", saved_slice, data)); } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_qint8", &shape, &type)); TensorShape expected({3, 2}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_QINT8, type); TensorSlice s = TensorSlice::ParseOrDie("-:-"); qint8 data[6]; EXPECT_TRUE(reader.CopySliceData("tensor_qint8", s, data)); } { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("tensor_qint32", &shape, &type)); TensorShape expected({2, 3}); EXPECT_TRUE(shape.IsSameSize(expected)); EXPECT_EQ(DT_QINT32, type); TensorSlice s = TensorSlice::ParseOrDie("1,1:2,1"); TensorSlice full_slice = TensorSlice::ParseOrDie("-:-"); TensorSlice saved_slice = TensorSlice::ParseOrDie("1,1:2,1"); qint32 data[6]; EXPECT_FALSE(reader.CopySliceData("tensor_qint32", full_slice, data)); EXPECT_TRUE(reader.CopySliceData("tensor_qint32", saved_slice, data)); } } class SaveOpSlices2Test : public OpsTestBase { protected: void MakeOp() { TF_ASSERT_OK(NodeDefBuilder("myop", "SaveSlices") .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput()) .Input(FakeInput({DT_INT32, DT_INT32, DT_FLOAT})) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } }; TEST_F(SaveOpSlices2Test, TwoSlices) { const string filename = io::JoinPath(testing::TmpDir(), "three_slices"); const string tensornames[] = {"four_by_sixteen", "four_by_sixteen", "small"}; const string tensorshapes[] = { "4 16 0,2:-", "4 16 2,2:-", "" }; MakeOp(); AddInput<tstring>(TensorShape({}), [&filename](int x) -> tstring { return filename; }); AddInput<tstring>(TensorShape({3}), [&tensornames](int x) -> tstring { return tensornames[x]; }); AddInput<tstring>(TensorShape({3}), [&tensorshapes](int x) -> tstring { return tensorshapes[x]; }); AddInput<int32>(TensorShape({2, 16}), [](int x) -> int32 { return x + 1; }); AddInput<int32>(TensorShape({2, 16}), [](int x) -> int32 { return 10 * (x + 1); }); AddInput<float>(TensorShape({2, 4}), [](int x) -> float { return static_cast<float>(x) / 10; }); TF_ASSERT_OK(RunOpKernel()); checkpoint::TensorSliceReader reader(filename, checkpoint::OpenTableTensorSliceReader); TF_EXPECT_OK(reader.status()); { Tensor reloaded(DT_INT32, {4, 16}); TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("four_by_sixteen", &shape, &type)); EXPECT_TRUE(shape.IsSameSize(reloaded.shape())); EXPECT_EQ(type, reloaded.dtype()); EXPECT_TRUE(reader.CopySliceData("four_by_sixteen", TensorSlice(reloaded.dims()), reloaded.flat<int>().data())); { auto slice = reloaded.Slice(0, 2).flat<int>(); for (int i = 0; i < slice.size(); ++i) { EXPECT_EQ(i + 1, slice(i)); } } { auto slice = reloaded.Slice(2, 4).flat<int>(); for (int i = 0; i < slice.size(); ++i) { EXPECT_EQ(10 * (i + 1), slice(i)); } } } { Tensor reloaded(DT_FLOAT, {2, 4}); TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("small", &shape, &type)); EXPECT_TRUE(shape.IsSameSize(reloaded.shape())); EXPECT_EQ(DT_FLOAT, reloaded.dtype()); EXPECT_TRUE(reader.CopySliceData("small", TensorSlice(reloaded.dims()), reloaded.flat<float>().data())); for (int64_t i = 0; i < reloaded.NumElements(); ++i) { EXPECT_EQ(static_cast<float>(i) / 10, reloaded.flat<float>().data()[i]); } } } void BM_LargeTensorWrite(::testing::benchmark::State& state) { const int num_elements = state.range(0); Tensor tensor(DT_FLOAT, TensorShape({num_elements})); tensor.flat<float>().setZero(); const tstring temp_filename = io::JoinPath(testing::TmpDir(), "benchmark_checkpoint"); auto root = Scope::NewRootScope().ExitOnError(); const tstring tensor_name = "my_tensor"; ops::Save give_me_a_name(root, temp_filename, {tensor_name}, {{tensor}}); SessionOptions session_options; session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(tensorflow::OptimizerOptions::L0); TF_CHECK_OK(root.status()); Graph* g = new Graph(OpRegistry::Global()); TF_CHECK_OK(root.ToGraph(g)); VLOG(1) << "Save op's output path: " << temp_filename; VLOG(1) << "# nodes in Graph: " << g->num_nodes(); test::Benchmark("cpu", g, &session_options, nullptr, nullptr, "", false) .Run(state); } BENCHMARK(BM_LargeTensorWrite)->Arg((1 << 30) / 4 ); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/save_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/save_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0ec728f6-1b23-4791-baf2-dec2e3938b04
cpp
tensorflow/tensorflow
random_poisson_op
tensorflow/core/kernels/random_poisson_op.cc
tensorflow/core/kernels/random_poisson_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/random_poisson_op.h" #include <algorithm> #include <cmath> #include <limits> #include <memory> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/lib/random/random_distributions.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/util/guarded_philox_random.h" #include "tensorflow/core/util/work_sharder.h" #if EIGEN_COMP_GNUC && __cplusplus > 199711L #define DISABLE_FLOAT_EQUALITY_WARNING \ _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"") #define ENABLE_FLOAT_EQUALITY_WARNING _Pragma("GCC diagnostic pop") #else #define DISABLE_FLOAT_EQUALITY_WARNING #define ENABLE_FLOAT_EQUALITY_WARNING #endif #define UNIFORM(X) \ if (uniform_remaining == 0) { \ uniform_remaining = Uniform::kResultElementCount; \ uniform_result = uniform(&gen); \ } \ uniform_remaining--; \ CT X = uniform_result[uniform_remaining] namespace tensorflow { namespace { static constexpr int kReservedSamplesPerOutput = 256; typedef Eigen::ThreadPoolDevice CPUDevice; template <typename T> struct PoissonComputeType { typedef double ComputeType; }; } namespace functor { template <typename T, typename U> struct PoissonFunctor<CPUDevice, T, U> { void operator()(OpKernelContext* ctx, const CPUDevice& d, const T* rate_flat, int64_t num_rate, int64_t num_samples, const random::PhiloxRandom& rng, U* samples_flat) { typedef random::UniformDistribution<random::PhiloxRandom, CT> Uniform; auto DoWork = [num_samples, num_rate, &rng, samples_flat, rate_flat]( int64_t start_output, int64_t limit_output) { Uniform uniform; typename Uniform::ResultType uniform_result; for (int64_t output_idx = start_output; output_idx < limit_output; ) { const int64_t rate_idx = output_idx / num_samples; const CT rate = CT(rate_flat[rate_idx]); auto samples_rate_output = samples_flat + rate_idx; if (rate < CT(10)) { const CT exp_neg_rate = Eigen::numext::exp(-rate); for (int64_t sample_idx = output_idx % num_samples; sample_idx < num_samples && output_idx < limit_output; sample_idx++, output_idx++) { random::PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16_t uniform_remaining = 0; CT prod = 1; CT x = 0; while (true) { UNIFORM(u); prod = prod * u; if (prod <= exp_neg_rate && x <= CT(Eigen::NumTraits<U>::highest())) { samples_rate_output[sample_idx * num_rate] = U(x); break; } x += 1; } } continue; } if (Eigen::numext::isinf(rate) && rate > CT(0)) { for (int64_t sample_idx = output_idx % num_samples; sample_idx < num_samples && output_idx < limit_output; sample_idx++, output_idx++) { U k = Eigen::NumTraits<U>::infinity(); samples_rate_output[sample_idx * num_rate] = k; } continue; } using Eigen::numext::log; const CT log_rate = log(rate); const CT b = CT(0.931) + CT(2.53) * Eigen::numext::sqrt(rate); const CT a = CT(-0.059) + CT(0.02483) * b; const CT inv_alpha = CT(1.1239) + CT(1.1328) / (b - CT(3.4)); for (int64_t sample_idx = output_idx % num_samples; sample_idx < num_samples && output_idx < limit_output; sample_idx++, output_idx++) { random::PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16_t uniform_remaining = 0; while (true) { UNIFORM(u); u -= CT(0.5); UNIFORM(v); CT u_shifted = CT(0.5) - Eigen::numext::abs(u); CT k = Eigen::numext::floor((CT(2) * a / u_shifted + b) * u + rate + CT(0.43)); if (k > CT(Eigen::NumTraits<U>::highest())) { continue; } if (u_shifted >= CT(0.07) && v <= CT(0.9277) - CT(3.6224) / (b - CT(2))) { samples_rate_output[sample_idx * num_rate] = U(k); break; } if (k < 0 || (u_shifted < CT(0.013) && v > u_shifted)) { continue; } CT s = log(v * inv_alpha / (a / (u_shifted * u_shifted) + b)); CT t = -rate + k * log_rate - Eigen::numext::lgamma(k + 1); if (s <= t) { samples_rate_output[sample_idx * num_rate] = U(k); break; } } } } }; static const int kElementCost = 165 + 6 * Uniform::kElementCost + 6 * random::PhiloxRandom::kElementCost; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_rate * num_samples, kElementCost, DoWork); } private: typedef typename PoissonComputeType<T>::ComputeType CT; }; } namespace { template <typename T, typename U> class RandomPoissonOp : public OpKernel { public: explicit RandomPoissonOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, generator_.Init(context)); } void Compute(OpKernelContext* ctx) override { const Tensor& shape_t = ctx->input(0); const Tensor& rate_t = ctx->input(1); TensorShape samples_shape; OP_REQUIRES_OK(ctx, tensor::MakeShape(shape_t, &samples_shape)); const int64_t num_samples = samples_shape.num_elements(); OP_REQUIRES_OK(ctx, samples_shape.AppendShapeWithStatus(rate_t.shape())); Tensor* samples_t = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, samples_shape, &samples_t)); if (num_samples == 0) return; const auto rate_flat = rate_t.flat<T>().data(); const int64_t num_rate = rate_t.NumElements(); auto samples_flat = samples_t->flat<U>().data(); random::PhiloxRandom rng = generator_.ReserveRandomOutputs( num_samples * num_rate, kReservedSamplesPerOutput); functor::PoissonFunctor<CPUDevice, T, U>()( ctx, ctx->eigen_device<CPUDevice>(), rate_flat, num_rate, num_samples, rng, samples_flat); } private: GuardedPhiloxRandom generator_; RandomPoissonOp(const RandomPoissonOp&) = delete; void operator=(const RandomPoissonOp&) = delete; }; } #undef UNIFORM #define REGISTER(TYPE) \ REGISTER_KERNEL_BUILDER( \ Name("RandomPoisson").Device(DEVICE_CPU).TypeConstraint<TYPE>("dtype"), \ RandomPoissonOp<TYPE, TYPE>); TF_CALL_half(REGISTER); TF_CALL_float(REGISTER); TF_CALL_double(REGISTER); #define REGISTER_V2(RTYPE, OTYPE) \ template struct functor::PoissonFunctor<CPUDevice, RTYPE, OTYPE>; \ REGISTER_KERNEL_BUILDER(Name("RandomPoissonV2") \ .Device(DEVICE_CPU) \ .TypeConstraint<RTYPE>("R") \ .TypeConstraint<OTYPE>("dtype"), \ RandomPoissonOp<RTYPE, OTYPE>); #define REGISTER_ALL(RTYPE) \ REGISTER_V2(RTYPE, Eigen::half); \ REGISTER_V2(RTYPE, float); \ REGISTER_V2(RTYPE, double); \ REGISTER_V2(RTYPE, int32); \ REGISTER_V2(RTYPE, int64_t); REGISTER_ALL(Eigen::half); REGISTER_ALL(float); REGISTER_ALL(double); REGISTER_ALL(int32); REGISTER_ALL(int64_t); #undef REGISTER_ALL #undef REGISTER_V2 #undef REGISTER }
#include <random> #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { Tensor VecShape(int64_t v) { if (v >= std::numeric_limits<int32>::max()) { Tensor shape(DT_INT64, TensorShape({1})); shape.vec<int64_t>()(0) = v; return shape; } else { Tensor shape(DT_INT32, TensorShape({1})); shape.vec<int32>()(0) = v; return shape; } } Tensor VecLam32(int64_t n, int magnitude) { std::mt19937 gen(0x12345); std::uniform_real_distribution<float> dist(0.0, 1.0); Tensor lams(DT_FLOAT, TensorShape({n})); for (int i = 0; i < n; i++) { lams.vec<float>()(i) = magnitude * (1 + dist(gen)); } return lams; } Tensor VecLam64(int64_t n, int magnitude) { std::mt19937 gen(0x12345); std::uniform_real_distribution<double> dist(0.0, 1.0); Tensor lams(DT_DOUBLE, TensorShape({n})); for (int i = 0; i < n; i++) { lams.vec<double>()(i) = magnitude * (1 + dist(gen)); } return lams; } #define BM_Poisson(DEVICE, BITS, MAGNITUDE) \ static void BM_##DEVICE##_RandomPoisson_lam_##MAGNITUDE##_##BITS( \ ::testing::benchmark::State& state) { \ const int nsamp = state.range(0); \ const int nlam = state.range(1); \ \ Graph* g = new Graph(OpRegistry::Global()); \ test::graph::RandomPoisson( \ g, test::graph::Constant(g, VecShape(nsamp)), \ test::graph::Constant(g, VecLam##BITS(nlam, MAGNITUDE))); \ test::Benchmark(#DEVICE, g, false).Run(state); \ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * nsamp * \ nlam); \ } \ BENCHMARK(BM_##DEVICE##_RandomPoisson_lam_##MAGNITUDE##_##BITS) \ ->RangePair(1, 64, 2, 50); BM_Poisson(cpu, 32, 1); BM_Poisson(cpu, 32, 8); BM_Poisson(cpu, 32, 32); BM_Poisson(cpu, 64, 1); BM_Poisson(cpu, 64, 8); BM_Poisson(cpu, 64, 32); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_poisson_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_poisson_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bf90ee0a-f59e-4e9c-9cf8-d22906551122
cpp
tensorflow/tensorflow
uniform_quantized_convolution_ops
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops.cc
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops_test.cc
#include <algorithm> #include <limits> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h" #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h" #include "tensorflow/core/util/quantization/uniform_quant_ops_params.h" namespace tensorflow { namespace { using tensorflow::errors::InvalidArgument; std::vector<int32_t> LhsTransposePerm( const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers, const int dims) { std::vector<int32_t> lhs_perm(dims); lhs_perm[0] = dimension_numbers.input_batch_dimension(); lhs_perm[1] = dimension_numbers.input_feature_dimension(); std::copy(dimension_numbers.input_spatial_dimensions().begin(), dimension_numbers.input_spatial_dimensions().end(), lhs_perm.begin() + 2); return lhs_perm; } std::vector<int32_t> RhsTransposePerm( const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers, const int dims) { std::vector<int32_t> rhs_perm(dims); rhs_perm[0] = dimension_numbers.kernel_output_feature_dimension(); rhs_perm[1] = dimension_numbers.kernel_input_feature_dimension(); std::copy(dimension_numbers.kernel_spatial_dimensions().begin(), dimension_numbers.kernel_spatial_dimensions().end(), rhs_perm.begin() + 2); return rhs_perm; } std::vector<int32_t> OutTransposePerm( const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers, const int dims) { std::vector<int32_t> out_perm(dims); out_perm[0] = dimension_numbers.output_batch_dimension(); out_perm[1] = dimension_numbers.output_feature_dimension(); std::copy(dimension_numbers.output_spatial_dimensions().begin(), dimension_numbers.output_spatial_dimensions().end(), out_perm.begin() + 2); return out_perm; } std::vector<int32_t> OutBackTransposePerm(absl::Span<const int32_t> out_perm) { std::vector<int32_t> out_perm_back(out_perm.size()); for (int i = 0; i < out_perm.size(); ++i) { out_perm_back[out_perm[i]] = i; } return out_perm_back; } TensorShape PaddedAndDilatedTransposedLhsShape( const TensorShape& in_shape, const UniformQuantizedConvolutionParams& convolution_params) { TensorShape out_shape = in_shape; for (int i = 2; i < in_shape.dims(); ++i) { const int64_t lhs_size_dilated = UniformQuantizedConvolutionParams::DilatedSize( in_shape.dim_size(i), convolution_params.lhs_dilation()[i - 2]); const int64_t out_lhs_size = lhs_size_dilated + convolution_params.padding_list()[2 * (i - 2)] + convolution_params.padding_list()[2 * (i - 2) + 1]; out_shape.set_dim(i, out_lhs_size); } return out_shape; } int64_t PaddedAndDilatedTransposedLhsSpatialIdx( const UniformQuantizedConvolutionParams& convolution_params, const TensorShape& lhs_in_shape, const TensorShape& lhs_out_shape, int64_t in_spatial_idx) { int64_t out_spatial_idx = 0; int64_t out_spatial_inner_dim_size = 1; for (int dim = lhs_in_shape.dims() - 1; dim >= 2; --dim) { const int64_t in_spatial_idx_of_dim = in_spatial_idx % lhs_in_shape.dim_size(dim); const int64_t out_spatial_idx_of_dim = convolution_params.padding_list()[2 * (dim - 2)] + convolution_params.lhs_dilation()[dim - 2] * in_spatial_idx_of_dim; out_spatial_idx += out_spatial_idx_of_dim * out_spatial_inner_dim_size; in_spatial_idx /= lhs_in_shape.dim_size(dim); out_spatial_inner_dim_size *= lhs_out_shape.dim_size(dim); } return out_spatial_idx; } int64_t ConvolutionTransposedLhsSpatialIdx( const UniformQuantizedConvolutionParams& convolution_params, const TensorShape& lhs_shape, const TensorShape& rhs_shape, const TensorShape& out_shape, int64_t rhs_spatial_idx, int64_t out_spatial_idx) { int64_t lhs_spatial_idx = 0; int64_t lhs_spatial_inner_dim_size = 1; for (int dim = lhs_shape.dims() - 1; dim >= 2; --dim) { const int64_t rhs_spatial_idx_of_dim = rhs_spatial_idx % rhs_shape.dim_size(dim); const int64_t out_spatial_idx_of_dim = out_spatial_idx % out_shape.dim_size(dim); const int64_t lhs_spatial_idx_of_dim = out_spatial_idx_of_dim * convolution_params.window_strides()[dim - 2] + rhs_spatial_idx_of_dim * convolution_params.rhs_dilation()[dim - 2]; lhs_spatial_idx += lhs_spatial_idx_of_dim * lhs_spatial_inner_dim_size; rhs_spatial_idx /= rhs_shape.dim_size(dim); out_spatial_idx /= out_shape.dim_size(dim); lhs_spatial_inner_dim_size *= lhs_shape.dim_size(dim); } return lhs_spatial_idx; } template <typename Tlhs> void PadAndDilateTransposedLhs( const Tensor& lhs_in, const UniformQuantizedConvolutionParams& convolution_params, const Tensor& lhs_zero_points, Tensor& lhs_out) { auto lhs_in_tensor = lhs_in.flat_outer_dims<Tlhs, 3>(); auto lhs_out_tensor = lhs_out.flat_outer_dims<Tlhs, 3>(); const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data(); const bool is_lhs_zero_points_scalar = lhs_zero_points.dims() == 0; for (int64_t batch_idx = 0; batch_idx < lhs_in.dim_size(0); ++batch_idx) { lhs_out_tensor.template chip<0>(batch_idx).setConstant( lhs_zero_points_data[is_lhs_zero_points_scalar ? 0 : batch_idx]); for (int64_t feature_idx = 0; feature_idx < lhs_in.dim_size(1); ++feature_idx) { for (int64_t in_spatial_idx = 0; in_spatial_idx < lhs_in_tensor.dimension(2); ++in_spatial_idx) { const int64_t out_spatial_idx = PaddedAndDilatedTransposedLhsSpatialIdx( convolution_params, lhs_in.shape(), lhs_out.shape(), in_spatial_idx); lhs_out_tensor(batch_idx, feature_idx, out_spatial_idx) = lhs_in_tensor(batch_idx, feature_idx, in_spatial_idx); } } } } template <typename Tlhs, typename Trhs, typename Tout, typename AccF, typename OutF> void ConvWithAccFunctionAndOutFunction( const Tensor& lhs, const Tensor& rhs, const UniformQuantizedConvolutionParams& convolution_params, Tensor& out, const AccF& acc_f, const OutF& out_f) { const int64_t out_feature_group_size_by_feature_group_count = out.dim_size(1) / convolution_params.feature_group_count(); const int64_t out_feature_group_size_by_batch_group_count = out.dim_size(1) / convolution_params.batch_group_count(); auto lhs_tensor = lhs.flat_outer_dims<Tlhs, 3>(); auto rhs_tensor = rhs.flat_outer_dims<Trhs, 3>(); auto out_tensor = out.flat_outer_dims<Tout, 3>(); for (int64_t out_batch_idx = 0; out_batch_idx < out_tensor.dimension(0); ++out_batch_idx) { for (int64_t out_feature_idx = 0; out_feature_idx < out_tensor.dimension(1); ++out_feature_idx) { const int64_t lhs_batch_idx = (out_feature_idx / out_feature_group_size_by_batch_group_count) * out_tensor.dimension(0) + out_batch_idx; for (int out_spatial_idx = 0; out_spatial_idx < out_tensor.dimension(2); ++out_spatial_idx) { int32_t acc = 0; for (int64_t rhs_in_feature_idx = 0; rhs_in_feature_idx < rhs_tensor.dimension(1); ++rhs_in_feature_idx) { const int64_t lhs_feature_idx = (out_feature_idx / out_feature_group_size_by_feature_group_count) * rhs_tensor.dimension(1) + rhs_in_feature_idx; for (int64_t rhs_spatial_idx = 0; rhs_spatial_idx < rhs_tensor.dimension(2); ++rhs_spatial_idx) { const int64_t lhs_spatial_idx = ConvolutionTransposedLhsSpatialIdx( convolution_params, lhs.shape(), rhs.shape(), out.shape(), rhs_spatial_idx, out_spatial_idx); const Tlhs lhs_val = lhs_tensor(lhs_batch_idx, lhs_feature_idx, lhs_spatial_idx); const Trhs rhs_val = rhs_tensor(out_feature_idx, rhs_in_feature_idx, rhs_spatial_idx); acc += acc_f(lhs_val, rhs_val, lhs_batch_idx, out_feature_idx); } } out_tensor(out_batch_idx, out_feature_idx, out_spatial_idx) = out_f(acc, lhs_batch_idx, out_feature_idx); } } } } template <typename Tin, typename Tout> Status EvalLhsPerTensorAndRhsPerTensorQuantizedConv( const Tensor& lhs, const Tensor& rhs, const UniformQuantizedConvolutionParams& convolution_params, const float lhs_scale, const int32_t lhs_zero_point, const float rhs_scale, const int32_t rhs_zero_point, const float output_scale, const int32_t output_zero_point, const int output_quantization_min_val, const int output_quantization_max_val, Tensor& out) { const double effective_multiplier = static_cast<double>(lhs_scale) * rhs_scale / output_scale; int32_t effective_quantized_multiplier; int effective_shift; TF_RETURN_IF_ERROR(QuantizeMultiplier( effective_multiplier, effective_quantized_multiplier, effective_shift)); ConvWithAccFunctionAndOutFunction<Tin, Tin, Tout>( lhs, rhs, convolution_params, out, [lhs_zero_point, rhs_zero_point](Tin lhs_val, Tin rhs_val, int64_t lhs_batch_idx, int64_t out_feature_idx) { return (static_cast<int32_t>(lhs_val) - lhs_zero_point) * (static_cast<int32_t>(rhs_val) - rhs_zero_point); }, [effective_quantized_multiplier, effective_shift, output_zero_point, output_quantization_min_val, output_quantization_max_val]( int32_t acc, int64_t lhs_batch_idx, int64_t out_feature_idx) { return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>( acc, effective_quantized_multiplier, effective_shift, 0, output_zero_point, output_quantization_min_val, output_quantization_max_val); }); return absl::OkStatus(); } template <typename Tin, typename Tout> Status EvalLhsPerTensorAndRhsPerChannelQuantizedConv( OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const UniformQuantizedConvolutionParams& convolution_params, const float lhs_scale, const int32_t lhs_zero_point, const Tensor& rhs_scales, const Tensor& rhs_zero_points, const Tensor& output_scales, const Tensor& output_zero_points, const int output_quantization_min_val, const int output_quantization_max_val, Tensor& out) { const int64_t out_feature_size = out.dim_size(1); const float* rhs_scales_data = rhs_scales.flat<float>().data(); const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data(); Tensor effective_quantized_multipliers; TF_RETURN_IF_ERROR(context->allocate_temp(DT_INT32, rhs_scales.shape(), &effective_quantized_multipliers)); Tensor effective_shifts; TF_RETURN_IF_ERROR( context->allocate_temp(DT_INT32, rhs_scales.shape(), &effective_shifts)); int32_t* effective_quantized_multipliers_data = effective_quantized_multipliers.flat<int32_t>().data(); int32_t* effective_shifts_data = effective_shifts.flat<int32_t>().data(); const bool is_output_scales_scalar = output_scales.dims() == 0; if (!is_output_scales_scalar) { const float* output_scales_data = output_scales.flat<float>().data(); for (int64_t out_feature_idx = 0; out_feature_idx < out_feature_size; ++out_feature_idx) { const double effective_multiplier = static_cast<double>(lhs_scale) * rhs_scales_data[out_feature_idx] / output_scales_data[out_feature_idx]; TF_RETURN_IF_ERROR(QuantizeMultiplier( effective_multiplier, effective_quantized_multipliers_data[out_feature_idx], effective_shifts_data[out_feature_idx])); } } else { const float output_scale = output_scales.scalar<float>()(); for (int64_t out_feature_idx = 0; out_feature_idx < out_feature_size; ++out_feature_idx) { const double effective_multiplier = static_cast<double>(lhs_scale) * rhs_scales_data[out_feature_idx] / output_scale; TF_RETURN_IF_ERROR(QuantizeMultiplier( effective_multiplier, effective_quantized_multipliers_data[out_feature_idx], effective_shifts_data[out_feature_idx])); } } const int32_t* output_zero_points_data = output_zero_points.flat<int32_t>().data(); ConvWithAccFunctionAndOutFunction<Tin, Tin, Tout>( lhs, rhs, convolution_params, out, [lhs_zero_point, rhs_zero_points_data](Tin lhs_val, Tin rhs_val, int64_t lhs_batch_idx, int64_t out_feature_idx) { return (static_cast<int32_t>(lhs_val) - lhs_zero_point) * (static_cast<int32_t>(rhs_val) - rhs_zero_points_data[out_feature_idx]); }, [effective_quantized_multipliers_data, effective_shifts_data, output_zero_points_data, output_quantization_min_val, output_quantization_max_val, is_output_scales_scalar]( int32_t acc, int64_t lhs_batch_idx, int64_t out_feature_idx) { return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>( acc, effective_quantized_multipliers_data[out_feature_idx], effective_shifts_data[out_feature_idx], 0, output_zero_points_data[is_output_scales_scalar ? 0 : out_feature_idx], output_quantization_min_val, output_quantization_max_val); }); return absl::OkStatus(); } template <typename Tlhs, typename Trhs> void EvalLhsPerBatchAndRhsPerTensorQuantizedConv( OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const UniformQuantizedConvolutionParams& convolution_params, const Tensor& lhs_scales, const Tensor& lhs_zero_points, const float rhs_scale, const int32_t rhs_zero_point, Tensor& out) { const float* lhs_scales_data = lhs_scales.flat<float>().data(); const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data(); ConvWithAccFunctionAndOutFunction<Tlhs, Trhs, float>( lhs, rhs, convolution_params, out, [lhs_zero_points_data, rhs_zero_point](Tlhs lhs_val, Trhs rhs_val, int64_t lhs_batch_idx, int64_t out_feature_idx) { return (static_cast<int32_t>(lhs_val) - lhs_zero_points_data[lhs_batch_idx]) * (static_cast<int32_t>(rhs_val) - rhs_zero_point); }, [lhs_scales_data, rhs_scale](int32_t acc, int64_t lhs_batch_idx, int64_t out_feature_idx) { return acc * lhs_scales_data[lhs_batch_idx] * rhs_scale; }); } template <typename Tlhs, typename Trhs> void EvalLhsPerBatchAndRhsPerChannelQuantizedConv( const Tensor& lhs, const Tensor& rhs, const UniformQuantizedConvolutionParams& convolution_params, const Tensor& lhs_scales, const Tensor& lhs_zero_points, const Tensor& rhs_scales, const Tensor& rhs_zero_points, Tensor& out) { const float* lhs_scales_data = lhs_scales.flat<float>().data(); const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data(); const float* rhs_scales_data = rhs_scales.flat<float>().data(); const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data(); ConvWithAccFunctionAndOutFunction<Tlhs, Trhs, float>( lhs, rhs, convolution_params, out, [lhs_zero_points_data, rhs_zero_points_data](Tlhs lhs_val, Trhs rhs_val, int64_t lhs_batch_idx, int64_t out_feature_idx) { return (static_cast<int32_t>(lhs_val) - lhs_zero_points_data[lhs_batch_idx]) * (static_cast<int32_t>(rhs_val) - rhs_zero_points_data[out_feature_idx]); }, [lhs_scales_data, rhs_scales_data](int32_t acc, int64_t lhs_batch_idx, int64_t out_feature_idx) { return acc * lhs_scales_data[lhs_batch_idx] * rhs_scales_data[out_feature_idx]; }); } template <typename Tin, typename Tout> Status EvalQuantizedConv( OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const UniformQuantizedConvolutionParams& convolution_params, const Tensor& lhs_scales, const Tensor& lhs_zero_points, const Tensor& rhs_scales, const Tensor& rhs_zero_points, const Tensor& output_scales, const Tensor& output_zero_points, int output_quantization_min_val, int output_quantization_max_val, Tensor& out) { const auto& dimension_numbers = convolution_params.dimension_numbers(); const auto& lhs_perm = LhsTransposePerm(dimension_numbers, lhs.dims()); Tensor lhs_transposed; TF_RETURN_IF_ERROR(context->allocate_temp( lhs.dtype(), TransposedShape(lhs.shape(), lhs_perm), &lhs_transposed)); Transpose<Tin>(lhs, lhs_perm, lhs_transposed); const auto& rhs_perm = RhsTransposePerm(dimension_numbers, rhs.dims()); Tensor rhs_transposed; TF_RETURN_IF_ERROR(context->allocate_temp( rhs.dtype(), TransposedShape(rhs.shape(), rhs_perm), &rhs_transposed)); Transpose<Tin>(rhs, rhs_perm, rhs_transposed); const auto& out_perm = OutTransposePerm(dimension_numbers, out.dims()); Tensor out_transposed; TF_RETURN_IF_ERROR(context->allocate_temp( out.dtype(), TransposedShape(out.shape(), out_perm), &out_transposed)); Tensor lhs_padded_and_dilated; TF_RETURN_IF_ERROR( context->allocate_temp(lhs_transposed.dtype(), PaddedAndDilatedTransposedLhsShape( lhs_transposed.shape(), convolution_params), &lhs_padded_and_dilated)); PadAndDilateTransposedLhs<Tin>(lhs_transposed, convolution_params, lhs_zero_points, lhs_padded_and_dilated); const float lhs_scale = lhs_scales.scalar<float>()(); const int32_t lhs_zero_point = lhs_zero_points.scalar<int32_t>()(); if (rhs_scales.dims() != 0) { TF_RETURN_IF_ERROR(EvalLhsPerTensorAndRhsPerChannelQuantizedConv<Tin, Tout>( context, lhs_padded_and_dilated, rhs_transposed, convolution_params, lhs_scale, lhs_zero_point, rhs_scales, rhs_zero_points, output_scales, output_zero_points, output_quantization_min_val, output_quantization_max_val, out_transposed)); } else { DCHECK_EQ(output_scales.dims(), 0); const float rhs_scale = rhs_scales.scalar<float>()(); const int32_t rhs_zero_point = rhs_zero_points.scalar<int32_t>()(); const float output_scale = output_scales.scalar<float>()(); const int32_t output_zero_point = output_zero_points.scalar<int32_t>()(); TF_RETURN_IF_ERROR(EvalLhsPerTensorAndRhsPerTensorQuantizedConv<Tin, Tout>( lhs_padded_and_dilated, rhs_transposed, convolution_params, lhs_scale, lhs_zero_point, rhs_scale, rhs_zero_point, output_scale, output_zero_point, output_quantization_min_val, output_quantization_max_val, out_transposed)); } const auto& out_perm_back = OutBackTransposePerm(out_perm); Transpose<Tout>(out_transposed, out_perm_back, out); return absl::OkStatus(); } template <typename Trhs> Status EvalHybridConv( OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const UniformQuantizedConvolutionParams& convolution_params, const Tensor& rhs_scales, const Tensor& rhs_zero_points, Tensor& out) { using TlhsQuant = Trhs; DataType lhs_quant_dtype = DataTypeToEnum<TlhsQuant>::v(); const auto& dimension_numbers = convolution_params.dimension_numbers(); const auto& lhs_perm = LhsTransposePerm(dimension_numbers, lhs.dims()); Tensor lhs_transposed; TF_RETURN_IF_ERROR(context->allocate_temp( DT_FLOAT, TransposedShape(lhs.shape(), lhs_perm), &lhs_transposed)); Transpose<float>(lhs, lhs_perm, lhs_transposed); const auto& rhs_perm = RhsTransposePerm(dimension_numbers, rhs.dims()); Tensor rhs_transposed; TF_RETURN_IF_ERROR(context->allocate_temp( rhs.dtype(), TransposedShape(rhs.shape(), rhs_perm), &rhs_transposed)); Transpose<Trhs>(rhs, rhs_perm, rhs_transposed); const auto& out_perm = OutTransposePerm(dimension_numbers, out.dims()); Tensor out_transposed; TF_RETURN_IF_ERROR(context->allocate_temp( DT_FLOAT, TransposedShape(out.shape(), out_perm), &out_transposed)); const int64_t lhs_batch_size = lhs_transposed.dim_size(0); Tensor lhs_quantized; TF_RETURN_IF_ERROR(context->allocate_temp( lhs_quant_dtype, lhs_transposed.shape(), &lhs_quantized)); Tensor lhs_scales; TF_RETURN_IF_ERROR( context->allocate_temp(DT_FLOAT, {lhs_batch_size}, &lhs_scales)); Tensor lhs_zero_points; TF_RETURN_IF_ERROR( context->allocate_temp(DT_INT32, {lhs_batch_size}, &lhs_zero_points)); float* lhs_scales_data = lhs_scales.flat<float>().data(); int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data(); auto lhs_tensor = lhs_transposed.template flat_outer_dims<float, 2>(); auto lhs_quantized_tensor = lhs_quantized.template flat_outer_dims<TlhsQuant, 2>(); for (int64_t b = 0; b < lhs_batch_size; ++b) { TF_RETURN_IF_ERROR(AsymmetricQuantize( lhs_tensor.template chip<0>(b), std::numeric_limits<TlhsQuant>::lowest(), std::numeric_limits<TlhsQuant>::max(), lhs_scales_data[b], lhs_zero_points_data[b], lhs_quantized_tensor.template chip<0>(b))); } Tensor lhs_padded_and_dilated; TF_RETURN_IF_ERROR( context->allocate_temp(lhs_quant_dtype, PaddedAndDilatedTransposedLhsShape( lhs_quantized.shape(), convolution_params), &lhs_padded_and_dilated)); PadAndDilateTransposedLhs<TlhsQuant>(lhs_quantized, convolution_params, lhs_zero_points, lhs_padded_and_dilated); if (rhs_scales.dims() != 0) { EvalLhsPerBatchAndRhsPerChannelQuantizedConv<TlhsQuant, Trhs>( lhs_padded_and_dilated, rhs_transposed, convolution_params, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, out_transposed); } else { EvalLhsPerBatchAndRhsPerTensorQuantizedConv<TlhsQuant, Trhs>( context, lhs_padded_and_dilated, rhs_transposed, convolution_params, lhs_scales, lhs_zero_points, rhs_scales.scalar<float>()(), rhs_zero_points.scalar<int32_t>()(), out_transposed); } const auto& out_perm_back = OutBackTransposePerm(out_perm); Transpose<float>(out_transposed, out_perm_back, out); return absl::OkStatus(); } } template <typename Tin, typename Tout> class UniformQuantizedConvolutionOp : public OpKernel { public: explicit UniformQuantizedConvolutionOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, convolution_params_.LoadFromAttrs(*context)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_min_val", &output_quantization_min_val_)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_max_val", &output_quantization_max_val_)); int lhs_quantization_axis; OP_REQUIRES_OK(context, context->GetAttr("lhs_quantization_axis", &lhs_quantization_axis)); OP_REQUIRES( context, (lhs_quantization_axis == -1), InvalidArgument("lhs_quantization_axis Attr must be -1 (per-tensor).")); OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis", &rhs_quantization_axis_)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_axis", &output_quantization_axis_)); } void Compute(OpKernelContext* context) override { const Tensor& lhs = context->input(0); const Tensor& rhs = context->input(1); const Tensor& lhs_scales = context->input(2); const Tensor& lhs_zero_points = context->input(3); const Tensor& rhs_scales = context->input(4); const Tensor& rhs_zero_points = context->input(5); const Tensor& output_scales = context->input(6); const Tensor& output_zero_points = context->input(7); OP_REQUIRES(context, (AllElementsPositive<float>(lhs_scales)), InvalidArgument("lhs scales elements must be all positive.")); OP_REQUIRES(context, (AllElementsPositive<float>(rhs_scales)), InvalidArgument("rhs scales elements must be all positive.")); OP_REQUIRES( context, (AllElementsPositive<float>(output_scales)), InvalidArgument("output scales elements must be all positive.")); OP_REQUIRES_OK(context, convolution_params_.ValidateOrFillParamsAndValidateShape( lhs.shape(), rhs.shape())); OP_REQUIRES( context, (lhs_scales.IsSameSize(lhs_zero_points) && lhs_scales.dims() == 0), InvalidArgument( "lhs scales/zero_points must be all scalar tensors. Given: ", lhs_scales.shape().DebugString(), lhs_zero_points.shape().DebugString())); OP_REQUIRES( context, (rhs_quantization_axis_ == -1 || rhs_quantization_axis_ == convolution_params_.dimension_numbers() .kernel_output_feature_dimension()), InvalidArgument("rhs_quantization_axis Attr must be -1 (per-tensor) or " "dimension_numbers.kernel_output_feature_dimension " "(per-channel).")); OP_REQUIRES_OK( context, QuantizationAxisAndShapeValid(rhs.shape(), rhs_scales.shape(), rhs_zero_points.shape(), rhs_quantization_axis_)); OP_REQUIRES( context, (output_quantization_axis_ == -1 || output_quantization_axis_ == convolution_params_.dimension_numbers() .output_feature_dimension()), InvalidArgument( "output_quantization_axis Attr must be -1 (per-tensor) or " "dimension_numbers.output_feature_dimension (per-channel).")); auto output_shape = convolution_params_.CalculateOutputShape(lhs.shape(), rhs.shape()); OP_REQUIRES_OK(context, output_shape.status()); OP_REQUIRES_OK(context, QuantizationAxisAndShapeValid( output_shape.value(), output_scales.shape(), output_zero_points.shape(), output_quantization_axis_)); OP_REQUIRES( context, (rhs_scales.dims() > 0 || output_scales.dims() == 0), InvalidArgument( "If rhs is per-tensor quantized, output must be also per-tensor " "quantized. Given output scales/zero_points of rank ", output_scales.dims())); Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape.value(), &output)); OP_REQUIRES_OK( context, EvalQuantizedConv<Tin, Tout>( context, lhs, rhs, convolution_params_, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, output_zero_points, output_quantization_min_val_, output_quantization_max_val_, *output)); } private: UniformQuantizedConvolutionParams convolution_params_; int rhs_quantization_axis_; int output_quantization_axis_; int output_quantization_min_val_; int output_quantization_max_val_; }; template <typename Tlhs, typename Trhs, typename Tout> class UniformQuantizedConvolutionHybridOp : public OpKernel { public: explicit UniformQuantizedConvolutionHybridOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis", &rhs_quantization_axis_)); OP_REQUIRES_OK(context, convolution_params_.LoadFromAttrs(*context)); } void Compute(OpKernelContext* context) override { const Tensor& lhs = context->input(0); const Tensor& rhs = context->input(1); const Tensor& rhs_scales = context->input(2); const Tensor& rhs_zero_points = context->input(3); OP_REQUIRES(context, (AllElementsPositive<float>(rhs_scales)), InvalidArgument("rhs scales elements must be all positive.")); OP_REQUIRES_OK(context, convolution_params_.ValidateOrFillParamsAndValidateShape( lhs.shape(), rhs.shape())); OP_REQUIRES( context, (rhs_quantization_axis_ == -1 || rhs_quantization_axis_ == convolution_params_.dimension_numbers() .kernel_output_feature_dimension()), InvalidArgument("rhs_quantization_axis Attr must be -1 (per-tensor) or " "dimension_numbers.kernel_output_feature_dimension " "(per-channel).")); OP_REQUIRES_OK( context, QuantizationAxisAndShapeValid(rhs.shape(), rhs_scales.shape(), rhs_zero_points.shape(), rhs_quantization_axis_)); Tensor* output; auto output_shape = convolution_params_.CalculateOutputShape(lhs.shape(), rhs.shape()); OP_REQUIRES_OK(context, output_shape.status()); OP_REQUIRES_OK(context, context->allocate_output(0, output_shape.value(), &output)); OP_REQUIRES_OK(context, EvalHybridConv<Trhs>(context, lhs, rhs, convolution_params_, rhs_scales, rhs_zero_points, *output)); } private: UniformQuantizedConvolutionParams convolution_params_; int rhs_quantization_axis_; }; REGISTER_KERNEL_BUILDER(Name("UniformQuantizedConvolution") .Device(DEVICE_CPU) .TypeConstraint<qint8>("Tin") .TypeConstraint<qint32>("Tout"), UniformQuantizedConvolutionOp<qint8, qint32>); REGISTER_KERNEL_BUILDER( Name("UniformQuantizedConvolutionHybrid") .Device(DEVICE_CPU) .TypeConstraint<float>("Tlhs") .TypeConstraint<qint8>("Trhs") .TypeConstraint<float>("Tout"), UniformQuantizedConvolutionHybridOp<float, qint8, float>); }
#include <cstdint> #include <limits> #include <vector> #include <gtest/gtest.h> #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h" namespace tensorflow { namespace { using protobuf::TextFormat; constexpr int32_t kInt8Min = std::numeric_limits<int8_t>::min(); constexpr int32_t kInt8Max = std::numeric_limits<int8_t>::max(); constexpr int32_t kInt32Min = std::numeric_limits<int32_t>::min(); constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max(); template <typename T> std::vector<T> Arange(int start, int stop, int step = 1) { std::vector<T> array; int val = start; while (val < stop) { array.push_back(val); val += step; } return array; } } class UniformQuantizedConvolutionTest : public OpsTestBase { protected: }; TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedDefaultAttrs) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2})); test::FillValues<qint32>( &expected, {4062, 3830, 3134, 2902, 990, 950, 830, 790, -2082, -1930, -1474, -1322, -1506, -1738, -2434, -2666, 30, -10, -130, -170, 1566, 1718, 2174, 2326}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetStrides) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("window_strides", {2, 3}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 1, 1})); test::FillValues<qint32>(&expected, {4062, 990, -2082, -1506, 30, 1566}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetExplicitPadding) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "EXPLICIT") .Attr("explicit_padding", {0, 1, 1, 2}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 3, 5})); test::FillValues<qint32>( &expected, {2694, 4062, 3830, 2550, 1272, 2096, 3134, 2902, 1910, 942, 968, 1432, 1304, 848, 414, 582, 990, 950, 694, 376, 496, 830, 790, 566, 302, 296, 472, 440, 304, 158, -1530, -2082, -1930, -1162, -520, -1104, -1474, -1322, -778, -338, -376, -488, -424, -240, -98, -890, -1506, -1738, -1290, -712, -1488, -2434, -2666, -1930, -1042, -1016, -1640, -1768, -1264, -674, 70, 30, -10, -74, -72, -16, -130, -170, -202, -146, -152, -296, -328, -272, -162, 1030, 1566, 1718, 1142, 568, 1456, 2174, 2326, 1526, 750, 712, 1048, 1112, 720, 350}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetSamePadding) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "SAME") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({1, 1, 2, 2}), Arange<qint8>(-2, 2)); AddInputFromArray<qint8>(TensorShape({1, 1, 2, 1}), Arange<qint8>(1, 3)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {4.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({1, 1, 2, 2})); test::FillValues<qint32>(&expected, {6, 5, 4, 3}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetDimensionNumbers) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 1 input_feature_dimension: 3 input_spatial_dimensions: 2 input_spatial_dimensions: 0 kernel_output_feature_dimension: 2 kernel_input_feature_dimension: 1 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 3 output_batch_dimension: 2 output_feature_dimension: 1 output_spatial_dimensions: 3 output_spatial_dimensions: 0 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({4, 2, 3, 2}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2})); test::FillValues<qint32>( &expected, {1323, 1147, 795, 619, 771, 691, 531, 451, 219, 235, 267, 283, 267, 91, -261, -437, 291, 211, 51, -29, 315, 331, 363, 379}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetFeatureGroupCount) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("feature_group_count", 2) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 4, 3, 4}), Arange<qint8>(-48, 48)); AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4, 2, 2})); test::FillValues<qint32>( &expected, {13470, 13142, 12158, 11830, 5790, 5654, 5246, 5110, -546, -490, -322, -266, -3618, -3370, -2626, -2378, -2274, -2602, -3586, -3914, -738, -874, -1282, -1418, 2142, 2198, 2366, 2422, 8286, 8534, 9278, 9526}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetBatchGroupCount) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("batch_group_count", 2) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({4, 2, 3, 4}), Arange<qint8>(-48, 48)); AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4, 2, 2})); test::FillValues<qint32>( &expected, {13470, 13142, 12158, 11830, 5790, 5654, 5246, 5110, 798, 854, 1022, 1078, 2334, 2582, 3326, 3574, 5598, 5270, 4286, 3958, 2526, 2390, 1982, 1846, 2142, 2198, 2366, 2422, 8286, 8534, 9278, 9526}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetLhsDilation) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("lhs_dilation", {2, 2}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 4, 5})); test::FillValues<qint32>( &expected, {1680, 819, 1595, 776, 1510, 1107, 536, 1038, 502, 968, 1339, 648, 1254, 606, 1168, 830, 398, 760, 363, 691, 496, 243, 475, 232, 454, 179, 88, 174, 86, 168, 411, 200, 390, 190, 368, 158, 78, 152, 75, 147, -688, -333, -645, -312, -602, -749, -360, -690, -330, -632, -517, -248, -474, -226, -432, -514, -242, -456, -213, -397, -368, -205, -453, -248, -538, -557, -296, -626, -330, -696, -709, -376, -794, -418, -880, -834, -434, -904, -469, -973, -16, -13, -37, -24, -58, 51, 24, 46, 22, 40, -101, -56, -122, -66, -144, 30, 14, 24, 11, 19, 336, 179, 379, 200, 422, 659, 344, 718, 374, 776, 507, 264, 550, 286, 592, 894, 462, 952, 491, 1011}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetRhsDilation) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("rhs_dilation", {2, 2}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 4, 5}), Arange<qint8>(-40, 40)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 1})); test::FillValues<qint32>(&expected, {6192, 5032, 1584, 1384, -3024, -2264, -3088, -4248, -16, -216, 3056, 3816}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedDefaultAttrs) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("rhs_quantization_axis", 0) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2})); test::FillValues<qint32>( &expected, {4062, 3830, 3134, 2902, 3000, 2856, 2424, 2280, -2082, -1930, -1474, -1322, -1506, -1738, -2434, -2666, -456, -600, -1032, -1176, 1566, 1718, 2174, 2326}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedRhsAndOutputDefaultAttrs) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("rhs_quantization_axis", 0) .Attr("output_quantization_axis", 1) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); AddInputFromArray<float>(TensorShape({3}), {3.0, 2.0, 1.0}); AddInputFromArray<int32>(TensorShape({3}), {3, 2, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2})); test::FillValues<qint32>( &expected, {4062, 3830, 3134, 2902, 4498, 4282, 3634, 3418, -6255, -5799, -4431, -3975, -1506, -1738, -2434, -2666, -686, -902, -1550, -1766, 4689, 5145, 6513, 6969}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedTFConv2DLikeConfig) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 kernel_output_feature_dimension: 3 kernel_input_feature_dimension: 2 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 output_batch_dimension: 0 output_feature_dimension: 3 output_spatial_dimensions: 1 output_spatial_dimensions: 2 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("rhs_quantization_axis", 3) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 3, 4, 2}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({2, 3, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 2, 2, 3})); test::FillValues<qint32>( &expected, {1755, 4099, 1163, 1643, 3811, 1115, 1307, 2947, 971, 1195, 2659, 923, 411, 643, 587, 299, 355, 539, -37, -509, 395, -149, -797, 347}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedTFDepthwiseConv2DLikeConfig) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 kernel_output_feature_dimension: 3 kernel_input_feature_dimension: 2 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 output_batch_dimension: 0 output_feature_dimension: 3 output_spatial_dimensions: 1 output_spatial_dimensions: 2 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("rhs_quantization_axis", 3) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("feature_group_count", 2) .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 3, 4, 2}), Arange<qint8>(-24, 24)); AddInputFromArray<qint8>(TensorShape({2, 3, 1, 2}), Arange<qint8>(-6, 6)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0}); AddInputFromArray<int32>(TensorShape({2}), {2, 4}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 2, 2, 2})); test::FillValues<qint32>( &expected, {576, 1390, 528, 1262, 384, 878, 336, 750, 0, -146, -48, -274, -192, -658, -240, -786}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedTFConv3DLikeConfig) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 4 input_spatial_dimensions: 1 input_spatial_dimensions: 2 input_spatial_dimensions: 3 kernel_output_feature_dimension: 4 kernel_input_feature_dimension: 3 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 kernel_spatial_dimensions: 2 output_batch_dimension: 0 output_feature_dimension: 4 output_spatial_dimensions: 1 output_spatial_dimensions: 2 output_spatial_dimensions: 3 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolution") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("rhs_quantization_axis", 4) .Attr("lhs_quantization_min_val", kInt8Min) .Attr("lhs_quantization_max_val", kInt8Max) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Attr("padding", "VALID") .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 3, 4, 2, 2}), Arange<qint8>(-50, 46)); AddInputFromArray<qint8>(TensorShape({2, 3, 2, 2, 2}), Arange<qint8>(-24, 24)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0}); AddInputFromArray<int32>(TensorShape({2}), {2, 4}); AddInputFromArray<float>(TensorShape({}), {3.0}); AddInputFromArray<int32>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 2, 2, 1, 2})); test::FillValues<qint32>( &expected, {7438, 17272, 7054, 16248, 5902, 13176, 5518, 12152, 2830, 4984, 2446, 3960, 1294, 888, 910, -136}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedDefaultAttrs) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 3, 4}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 2})); test::FillValues<float>( &expected, {12176., 11480., 9392., 8696., 2960., 2840., 2480., 2360., -6256., -5800., -4432., -3976., -4528., -5224., -7312., -8008., 80., -40., -400., -520., 4688., 5144., 6512., 6968.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetStrides) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("window_strides", {2, 3}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 3, 4}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 1, 1})); test::FillValues<float>(&expected, {12176., 2960., -6256., -4528., 80., 4688.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetPadding) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "EXPLICIT") .Attr("explicit_padding", {0, 1, 1, 2}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 3, 4}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 5})); test::FillValues<float>( &expected, {8072., 12176., 11480., 7640., 3808., 6280., 9392., 8696., 5720., 2816., 2896., 4288., 3904., 2536., 1232., 1736., 2960., 2840., 2072., 1120., 1480., 2480., 2360., 1688., 896., 880., 1408., 1312., 904., 464., -4600., -6256., -5800., -3496., -1568., -3320., -4432., -3976., -2344., -1024., -1136., -1472., -1280., -728., -304., -2680., -4528., -5224., -3880., -2144., -4472., -7312., -8008., -5800., -3136., -3056., -4928., -5312., -3800., -2032., 200., 80., -40., -232., -224., -56., -400., -520., -616., -448., -464., -896., -992., -824., -496., 3080., 4688., 5144., 3416., 1696., 4360., 6512., 6968., 4568., 2240., 2128., 3136., 3328., 2152., 1040.}); test::ExpectClose(expected, *GetOutput(0), 1.5, 0.04); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetExplicitPadding) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "EXPLICIT") .Attr("explicit_padding", {0, 1, 1, 2}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 3, 4}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 5})); test::FillValues<float>( &expected, {8072., 12176., 11480., 7640., 3808., 6280., 9392., 8696., 5720., 2816., 2896., 4288., 3904., 2536., 1232., 1736., 2960., 2840., 2072., 1120., 1480., 2480., 2360., 1688., 896., 880., 1408., 1312., 904., 464., -4600., -6256., -5800., -3496., -1568., -3320., -4432., -3976., -2344., -1024., -1136., -1472., -1280., -728., -304., -2680., -4528., -5224., -3880., -2144., -4472., -7312., -8008., -5800., -3136., -3056., -4928., -5312., -3800., -2032., 200., 80., -40., -232., -224., -56., -400., -520., -616., -448., -464., -896., -992., -824., -496., 3080., 4688., 5144., 3416., 1696., 4360., 6512., 6968., 4568., 2240., 2128., 3136., 3328., 2152., 1040.}); test::ExpectClose(expected, *GetOutput(0), 1.5, 0.04); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetDimensionNumbers) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 1 input_feature_dimension: 3 input_spatial_dimensions: 2 input_spatial_dimensions: 0 kernel_output_feature_dimension: 2 kernel_input_feature_dimension: 1 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 3 output_batch_dimension: 2 output_feature_dimension: 1 output_spatial_dimensions: 3 output_spatial_dimensions: 0 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_axis", -1) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({4, 2, 3, 2}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({2, 2, 3, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 2})); test::FillValues<float>( &expected, {3960., 3432., 2376., 1848., 2304., 2064., 1584., 1344., 648., 696., 792., 840., 792., 264., -792., -1320., 864., 624., 144., -96., 936., 984., 1080., 1128.}); test::ExpectClose(expected, *GetOutput(0), 10, 0.02); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetFeatureGroupCount) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("feature_group_count", 2) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 4, 3, 4}), Arange<float>(-98, 94, 2)); AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4, 2, 2})); test::FillValues<float>( &expected, {40400., 39416., 36464., 35480., 17360., 16952., 15728., 15320., -1648., -1480., -976., -808., -10864., -10120., -7888., -7144., -6832., -7816., -10768., -11752., -2224., -2632., -3856., -4264., 6416., 6584., 7088., 7256., 24848., 25592., 27824., 28568.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetBatchGroupCount) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("batch_group_count", 2) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({4, 2, 3, 4}), Arange<float>(-98, 94, 2)); AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4, 2, 2})); test::FillValues<float>( &expected, {40400., 39416., 36464., 35480., 17360., 16952., 15728., 15320., 2384., 2552., 3056., 3224., 6992., 7736., 9968., 10712., 16784., 15800., 12848., 11864., 7568., 7160., 5936., 5528., 6416., 6584., 7088., 7256., 24848., 25592., 27824., 28568.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetLhsDilation) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("lhs_dilation", {2, 2}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 3, 4}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 4, 5})); test::FillValues<float>( &expected, {5032., 2448., 4776., 2320., 4520., 3312., 1600., 3104., 1496., 2896., 4008., 1936., 3752., 1808., 3496., 2480., 1184., 2272., 1080., 2064., 1480., 720., 1416., 688., 1352., 528., 256., 512., 248., 496., 1224., 592., 1160., 560., 1096., 464., 224., 448., 216., 432., -2072., -1008., -1944., -944., -1816., -2256., -1088., -2080., -1000., -1904., -1560., -752., -1432., -688., -1304., -1552., -736., -1376., -648., -1200., -1112., -624., -1368., -752., -1624., -1680., -896., -1888., -1000., -2096., -2136., -1136., -2392., -1264., -2648., -2512., -1312., -2720., -1416., -2928., -56., -48., -120., -80., -184., 144., 64., 128., 56., 112., -312., -176., -376., -208., -440., 80., 32., 64., 24., 48., 1000., 528., 1128., 592., 1256., 1968., 1024., 2144., 1112., 2320., 1512., 784., 1640., 848., 1768., 2672., 1376., 2848., 1464., 3024.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetRhsDilation) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("rhs_dilation", {2, 2}) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 4, 5}), Arange<float>(-82, 78, 2)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 1})); test::FillValues<float>( &expected, {18568., 15088., 4744., 4144., -9080., -6800., -9272., -12752., -56., -656., 9160., 11440.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerChannelQuantizedDefaultAttrs) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_axis", 0) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2, 3, 4}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 2})); test::FillValues<float>( &expected, {12176., 11480., 9392., 8696., 8992., 8560., 7264., 6832., -6256., -5800., -4432., -3976., -4528., -5224., -7312., -8008., -1376., -1808., -3104., -3536., 4688., 5144., 6512., 6968.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerChannelQuantizedTFConv2DLikeConfig) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 kernel_output_feature_dimension: 3 kernel_input_feature_dimension: 2 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 output_batch_dimension: 0 output_feature_dimension: 3 output_spatial_dimensions: 1 output_spatial_dimensions: 2 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_axis", 3) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 3, 4, 2}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({2, 3, 2, 3}), Arange<qint8>(-18, 18)); AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 3})); test::FillValues<float>( &expected, {5256., 12288., 3480., 4920., 11424., 3336., 3912., 8832., 2904., 3576., 7968., 2760., 1224., 1920., 1752., 888., 1056., 1608., -120., -1536., 1176., -456., -2400., 1032.}); test::ExpectClose(expected, *GetOutput(0), 4, 0.04); } TEST_F(UniformQuantizedConvolutionTest, HybridPerChannelQuantizedTFDepthwiseConv2DLikeConfig) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 kernel_output_feature_dimension: 3 kernel_input_feature_dimension: 2 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 output_batch_dimension: 0 output_feature_dimension: 3 output_spatial_dimensions: 1 output_spatial_dimensions: 2 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_axis", 3) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("feature_group_count", 2) .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 3, 4, 2}), Arange<float>(-50, 46, 2)); AddInputFromArray<qint8>(TensorShape({2, 3, 1, 2}), Arange<qint8>(-6, 6)); AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0}); AddInputFromArray<int32>(TensorShape({2}), {2, 4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 2})); test::FillValues<float>( &expected, {1720., 4160., 1576., 3776., 1144., 2624., 1000., 2240., -8., -448., -152., -832., -584., -1984., -728., -2368.}); test::ExpectClose(expected, *GetOutput(0), 1, 0.01); } TEST_F(UniformQuantizedConvolutionTest, HybridPerChannelQuantizedTFConv3DLikeConfig) { UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers; ASSERT_TRUE(TextFormat::ParseFromString(R"pb( input_batch_dimension: 0 input_feature_dimension: 4 input_spatial_dimensions: 1 input_spatial_dimensions: 2 input_spatial_dimensions: 3 kernel_output_feature_dimension: 4 kernel_input_feature_dimension: 3 kernel_spatial_dimensions: 0 kernel_spatial_dimensions: 1 kernel_spatial_dimensions: 2 output_batch_dimension: 0 output_feature_dimension: 4 output_spatial_dimensions: 1 output_spatial_dimensions: 2 output_spatial_dimensions: 3 )pb", &dimension_numbers)); TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_axis", 4) .Attr("rhs_quantization_min_val", kInt8Min) .Attr("rhs_quantization_max_val", kInt8Max) .Attr("padding", "VALID") .Attr("dimension_numbers", dimension_numbers.SerializeAsString()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 3, 4, 2, 2}), Arange<float>(-50, 46)); AddInputFromArray<qint8>(TensorShape({2, 3, 2, 2, 2}), Arange<qint8>(-24, 24)); AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0}); AddInputFromArray<int32>(TensorShape({2}), {2, 4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1, 2})); test::FillValues<float>( &expected, {11008., 25520., 10432., 23984., 8704., 19376., 8128., 17840., 4096., 7088., 3520., 5552., 1792., 944., 1216., -592.}); test::ExpectClose(expected, *GetOutput(0), 11, 0.02); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b29a0a1c-13ca-4fde-bdbc-0fdc345e1ae1
cpp
tensorflow/tensorflow
tensor_utils
tensorflow/lite/core/api/tensor_utils.cc
tensorflow/lite/kernels/internal/tensor_utils_test.cc
#include "tensorflow/lite/core/api/tensor_utils.h" #include <string.h> #include "tensorflow/lite/core/c/common.h" namespace tflite { TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { if (!tensor->is_variable) { return kTfLiteOk; } int value = 0; if (tensor->type == kTfLiteInt8) { value = tensor->params.zero_point; } #if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ defined(__i386) || defined(__x86__) || defined(__X86__) || \ defined(_X86_) || defined(_M_IX86) || defined(_M_X64) memset(tensor->data.raw, value, tensor->bytes); #else char* raw_ptr = tensor->data.raw; for (size_t i = 0; i < tensor->bytes; ++i) { *raw_ptr = value; raw_ptr++; } #endif return kTfLiteOk; } }
#include "tensorflow/lite/kernels/internal/tensor_utils.h" #include <math.h> #include <algorithm> #include <gmock/gmock.h> #include "tensorflow/lite/core/c/builtin_op_data.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/test_util.h" #ifdef DOTPROD_BENCHMARKS #include "testing/base/public/benchmark.h" #endif namespace tflite { namespace tensor_utils { template <typename T> void CompareRoundingResults(int flat_size, const T* expected_result, const T* real_result, int max_element_tolerance = 1, int max_total_tolerance = 5) { int max_diff = 0; int64_t total_diff = 0; for (int i = 0; i < flat_size; i++) { int diff = static_cast<int>(std::abs(expected_result[i] - real_result[i])); total_diff += diff; max_diff = std::max(max_diff, diff); } EXPECT_LE(max_diff, max_element_tolerance); EXPECT_LE(total_diff, max_total_tolerance); } TEST(uKernels, FloorLog2Test) { for (int i = 1; i < 257; ++i) { EXPECT_EQ(::tflite::FloorLog2(i), static_cast<int>(std::floor(std::log2(i)))); } } TEST(uKernels, VectorScalarMultiply) { constexpr int kVectorSize = 29; static int8_t input[kVectorSize]; for (int i = 0; i < 29; ++i) { input[i] = static_cast<int8_t>(i - 14); } const float scale = 0.1f; std::vector<float> output(kVectorSize, 0.0f); VectorScalarMultiply(input, kVectorSize, scale, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear( {-1.4, -1.3, -1.2, -1.1, -1.0, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4}))); } #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) TEST(uKernels, IsZeroFloatTest) { { const float four_zeros[4] = {0, 0, 0, 0}; EXPECT_TRUE(IsZeroVector(four_zeros, ARRAY_SIZE(four_zeros))); } { const float four_nonzeros[4] = {1, 2, 3, 4}; EXPECT_FALSE(IsZeroVector(four_nonzeros, ARRAY_SIZE(four_nonzeros))); } { const float eight_zeros[8] = {0, 0, 0, 0, 0, 0, 0, 0}; EXPECT_TRUE(IsZeroVector(eight_zeros, ARRAY_SIZE(eight_zeros))); } { const float eight_nonzeros[8] = {1, 2, 3, 4, 5, 6, 7, 8}; EXPECT_FALSE(IsZeroVector(eight_nonzeros, ARRAY_SIZE(eight_nonzeros))); } { const float multiple_four_mixed1[8] = {0, 0, 0, 0, 5, 6, 7, 8}; EXPECT_FALSE( IsZeroVector(multiple_four_mixed1, ARRAY_SIZE(multiple_four_mixed1))); } { const float multiple_four_mixed2[8] = {1, 2, 3, 4, 0, 0, 0, 0}; EXPECT_FALSE( IsZeroVector(multiple_four_mixed2, ARRAY_SIZE(multiple_four_mixed2))); } { const float three_zeros[3] = {0, 0, 0}; EXPECT_TRUE(IsZeroVector(three_zeros, ARRAY_SIZE(three_zeros))); } { const float three_nonzeros[3] = {1, 2, 3}; EXPECT_FALSE(IsZeroVector(three_nonzeros, ARRAY_SIZE(three_nonzeros))); } { const float three_mixed[3] = {1, 0, 3}; EXPECT_FALSE(IsZeroVector(three_mixed, ARRAY_SIZE(three_mixed))); } { const float seven_zeros[7] = {0, 0, 0, 0, 0, 0, 0}; EXPECT_TRUE(IsZeroVector(seven_zeros, ARRAY_SIZE(seven_zeros))); } { const float seven_nonzeros[7] = {1, 2, 3, 4, 5, 6, 7}; EXPECT_FALSE(IsZeroVector(seven_nonzeros, ARRAY_SIZE(seven_nonzeros))); } { const float nonzeros_after_zeros[7] = {0, 0, 0, 0, 5, 6, 7}; EXPECT_FALSE( IsZeroVector(nonzeros_after_zeros, ARRAY_SIZE(nonzeros_after_zeros))); } } TEST(uKernels, IsZeroInt8Test) { { const int8_t sixteen_zeros[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; EXPECT_TRUE(IsZeroVector(sixteen_zeros, ARRAY_SIZE(sixteen_zeros))); } { const int8_t sixteen_nonzeros[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; EXPECT_FALSE(IsZeroVector(sixteen_nonzeros, ARRAY_SIZE(sixteen_nonzeros))); } { const int8_t thritytwo_zeros[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; EXPECT_TRUE(IsZeroVector(thritytwo_zeros, ARRAY_SIZE(thritytwo_zeros))); } { const int8_t thritytwo_nonzeros[32] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; EXPECT_FALSE( IsZeroVector(thritytwo_nonzeros, ARRAY_SIZE(thritytwo_nonzeros))); } { const int8_t thritytwo_mixed1[32] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; EXPECT_FALSE(IsZeroVector(thritytwo_mixed1, ARRAY_SIZE(thritytwo_mixed1))); } { const int8_t thritytwo_mixed2[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; EXPECT_FALSE(IsZeroVector(thritytwo_mixed2, ARRAY_SIZE(thritytwo_mixed2))); } { const int8_t fifteen_zeros[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; EXPECT_TRUE(IsZeroVector(fifteen_zeros, ARRAY_SIZE(fifteen_zeros))); } { const int8_t fifteen_nonzeros[15] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; EXPECT_FALSE(IsZeroVector(fifteen_nonzeros, ARRAY_SIZE(fifteen_nonzeros))); } { const int8_t fifteen_mixed[15] = {1, 0, 3, 0, 5, 0, 7, 0, 9, 0, 11, 0, 13, 0, 15}; EXPECT_FALSE(IsZeroVector(fifteen_mixed, ARRAY_SIZE(fifteen_mixed))); } { const int8_t seventeen_zeros[17] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; EXPECT_TRUE(IsZeroVector(seventeen_zeros, ARRAY_SIZE(seventeen_zeros))); } { const int8_t seventeen_nonzeros[17] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}; EXPECT_FALSE( IsZeroVector(seventeen_nonzeros, ARRAY_SIZE(seventeen_nonzeros))); } { const int8_t nonzeros_after_zeros[17] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17}; EXPECT_FALSE( IsZeroVector(nonzeros_after_zeros, ARRAY_SIZE(nonzeros_after_zeros))); } } #undef ARRAY_SIZE TEST(uKernels, SymmetricQuantizeFloatsTest) { constexpr int kVectorSize = 9; static float input[kVectorSize] = {-640, -635.0, -630, 10.0, 2.0, -5.0, -10.0, 0.0, 1000.0}; int8_t output[kVectorSize]; float min, max, scaling_factor; SymmetricQuantizeFloats(input, kVectorSize, output, &min, &max, &scaling_factor); EXPECT_EQ(min, -640); EXPECT_EQ(max, 1000); EXPECT_NEAR(scaling_factor, 1000 / 127.0, 1e-6); EXPECT_THAT(output, testing::ElementsAreArray({-81, -81, -80, 1, 0, -1, -1, 0, 127})); } TEST(uKernels, SymmetricQuantizeFloatsAllZerosTest) { constexpr int kVectorSize = 9; static float input[kVectorSize] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; int8_t output[kVectorSize]; float min, max, scaling_factor; SymmetricQuantizeFloats(input, kVectorSize, output, &min, &max, &scaling_factor); EXPECT_EQ(min, 0); EXPECT_EQ(max, 0); EXPECT_EQ(scaling_factor, 1); EXPECT_THAT(output, testing::ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0})); } TEST(uKernels, SymmetricQuantizeFloatsAllAlmostZeroTest) { constexpr int kVectorSize = 9; static float input[kVectorSize] = {-1e-5, 3e-5, -7e-6, -9e-5, 1e-6, 4e-5, 9e-6, 2e-4, 0}; int8_t output[kVectorSize]; float min, max, scaling_factor; SymmetricQuantizeFloats(input, kVectorSize, output, &min, &max, &scaling_factor); EXPECT_NEAR(min, -9e-05, 1e-6); EXPECT_NEAR(max, 0.0002, 1e-6); EXPECT_NEAR(scaling_factor, 1.57e-6, 1e-6); EXPECT_THAT(output, testing::ElementsAreArray({-6, 19, -4, -57, 1, 25, 6, 127, 0})); } TEST(uKernels, AsymmetricQuantizeFloatsTest) { constexpr int kVectorSize = 9; static float input[kVectorSize] = {-640, -635.0, -630, 10.0, 2.0, -5.0, -10.0, 0.0, 1000.0}; int8_t output[kVectorSize]; double min = -640.0; double max = 1000.0; QuantizationParams quantization_params = ChooseQuantizationParams<int8_t>(min, max); float scale = quantization_params.scale; int32_t offset = quantization_params.zero_point; float test_scale; int32_t test_offset; AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale, &test_offset); EXPECT_NEAR(test_scale, scale, 1e-6); EXPECT_EQ(test_offset, offset); EXPECT_THAT(output, testing::ElementsAreArray( {-128, -127, -126, -26, -28, -29, -30, -28, 127})); } TEST(uKernels, AsymmetricQuantizeFloatsAllZerosTest) { constexpr int kVectorSize = 9; static float input[kVectorSize] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; int8_t output[kVectorSize]; float test_scale; int32_t test_offset; AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale, &test_offset); EXPECT_EQ(test_scale, 1); EXPECT_EQ(test_offset, 0); EXPECT_THAT(output, testing::ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0})); } TEST(uKernels, AsymmetricQuantizeFloatsZeroRangeTest) { constexpr int kVectorSize = 9; static float input[kVectorSize] = {2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000}; int8_t output[kVectorSize]; double min = 0; double max = 2000; QuantizationParams quantization_params = ChooseQuantizationParams<int8_t>(min, max); int32_t offset = quantization_params.zero_point; float scale = quantization_params.scale; float test_scale; int32_t test_offset; AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale, &test_offset); EXPECT_NEAR(test_scale, scale, 1e-6); EXPECT_EQ(test_offset, offset); EXPECT_THAT(output, testing::ElementsAreArray( {127, 127, 127, 127, 127, 127, 127, 127, 127})); } TEST(uKernels, AsymmetricQuantizeFloatsAllAlmostZeroTest) { constexpr int kVectorSize = 9; static float input[kVectorSize] = {-1e-5, 3e-5, -7e-6, -9e-5, 1e-6, 4e-5, 9e-6, 2e-4, 0}; int8_t output[kVectorSize]; double min = -9e-05; double max = 0.0002; QuantizationParams quantization_params = ChooseQuantizationParams<int8_t>(min, max); int32_t offset = quantization_params.zero_point; float scale = quantization_params.scale; float test_scale; int32_t test_offset; AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale, &test_offset); EXPECT_NEAR(test_scale, scale, 1e-6); EXPECT_EQ(test_offset, offset); EXPECT_THAT(output, testing::ElementsAreArray( {-58, -23, -55, -128, -48, -14, -41, 127, -49})); } TEST(uKernels, MatrixBatchVectorMultiplyAccumulateTest) { constexpr int kRow = 3; constexpr int kCol = 4; constexpr int kBatch = 2; static float matrix[kRow * kCol] = {1.0, 2.0, 3.0, 4.0, -1.0, -2.0, -3.0, -4.0, 1.0, -2.0, 3.0, -4.0}; static float vector[kCol * kBatch] = {1.0, -1.0, 1.0, -1.0, 2.0, -2.0, 2.0, -2.0}; std::vector<float> output(kRow * kBatch); std::fill(output.begin(), output.end(), 3.0); MatrixBatchVectorMultiplyAccumulate(matrix, kRow, kCol, vector, kBatch, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({1., 5., 13., -1., 7., 23.}))); } TEST(uKernels, QuantMatrixBatchVectorMultiplyAccumulate8x8_16Test) { CpuBackendContext context; const std::vector<int8_t> input = { 4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12, 11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24, -44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28, }; const std::vector<int32_t> input_zeropoint_times_weights = { -620, -170, -395, 715, -1220, -1080, 1130, -260, -470, }; const std::vector<int8_t> input_to_gate_weights = { -10, -4, -8, 16, 4, -16, -1, 11, 1, 2, -25, 19, 7, 9, 2, -24, -2, 10, -7, 7, -5, -2, 3, 4, 3, -4, -7, -11, -13, -18, 11, 10, 12, -9, 17, -15, -5, 20, -6, -11, 2, -6, -18, 15, 4, 4, -9, -2, -3, -9, -13, 17, -21, 5, 3, -12, 0, -4, 9, -5, 10, -2, 8, 1, -10, -6, 1, -9, 10, 11, -1, -5, 4, -7, -4, -4, 4, 12, -7, -5, -9, -19, 6, -4, 12, -17, -22, 0, 9, -4, -5, 5, -8, 8, 3, 15, -18, -18, 5, 3, -12, 5, -10, 7, 7, -9, 17, 2, -11, -25, 3, 19, -6, 7, 1, 7, 5, -3, 11, 3, 0, -8, 8, -2, -2, -12, 14, -5, 7, 8, 16, 20, -16, -5, -5, 1, -10, -6, 14, 10, -12, 10, -6, 5, 0, 3, 8, -9, -13, -2, 4, 4, -16, -17, -9, 16, -5, 14, -9, -5, -12, 0, 17, 6, -1, 16, -20, 1, -11, -1, -10, -21, 13, 4, -12, -7, 0, -14, -6, 3, -4, 6, -18, -3, -1, 14, -8, -6, -15, 5, 12, -3, -10, 4, 6, -5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6, 4, -12, 2, 6, -20, 0, 5, 23, 15, 14, 9, 8, 20, -2, 9, -8, -8, -7, -4, -8, -9, 7, -12, -2, 2, 1, -14, 31, 4, -14, 3, 10, -18, -17, -1, 18, 1, 12, 0, 7, -3, -5, 8, -9, 18, 17, 7, -15, 3, 20, 4, -8, 16, 6, -3, -3, 9, -4, -6, 4, }; const int32_t multiplier = 2080364544; const int32_t shift = -2; std::vector<int32_t> scratch(2 * 9, 0); std::vector<int16_t> output = {10, 2, 33, 4, 5, 6, 65, 4, 3, 52, 1, 2, 8, -1, -2, 11, 17, -18}; MatrixBatchVectorMultiplyAccumulate( input.data(), input_zeropoint_times_weights.data(), input_to_gate_weights.data(), multiplier, shift, 2, 30, 9, 0, scratch.data(), output.data(), &context); const std::vector<int16_t> expected_output = { -210, 331, 153, 139, -570, -657, 258, 515, -495, 91, -243, -73, 603, -744, -269, 169, -748, -174, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, HybridMatrixBatchVectorMultiplyAccumulate8x8_16Test) { CpuBackendContext context; const std::vector<int8_t> input = { 4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16, 1, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12, 1, 11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24, 1, -44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28, 1, 4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16, 1, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12, 1, 11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24, 1, -44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28, 1, }; const std::vector<int32_t> input_offsets = {1, 1, 1, 1}; const std::vector<float> scaling_factors = { 1.0, 1.0, 1.0, 1.0, }; const std::vector<int8_t> input_to_gate_weights = { -10, -4, -8, 16, 4, -16, -1, 11, 1, 2, -25, 19, 7, 9, 2, 1, -24, -2, 10, -7, 7, -5, -2, 3, 4, 3, -4, -7, -11, -13, -18, 2, 11, 10, 12, -9, 17, -15, -5, 20, -6, -11, 2, -6, -18, 15, 4, 3, 4, -9, -2, -3, -9, -13, 17, -21, 5, 3, -12, 0, -4, 9, -5, 4, 10, -2, 8, 1, -10, -6, 1, -9, 10, 11, -1, -5, 4, -7, -4, 5, -4, 4, 12, -7, -5, -9, -19, 6, -4, 12, -17, -22, 0, 9, -4, 6, -5, 5, -8, 8, 3, 15, -18, -18, 5, 3, -12, 5, -10, 7, 7, 7, -9, 17, 2, -11, -25, 3, 19, -6, 7, 1, 7, 5, -3, 11, 3, 8, 0, -8, 8, -2, -2, -12, 14, -5, 7, 8, 16, 20, -16, -5, -5, 9, 1, -10, -6, 14, 10, -12, 10, -6, 5, 0, 3, 8, -9, -13, -2, 10, 4, 4, -16, -17, -9, 16, -5, 14, -9, -5, -12, 0, 17, 6, -1, 11, 16, -20, 1, -11, -1, -10, -21, 13, 4, -12, -7, 0, -14, -6, 3, 12, -4, 6, -18, -3, -1, 14, -8, -6, -15, 5, 12, -3, -10, 4, 6, 13, -5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6, 14, -5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6, 15, -5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6, 16, }; std::vector<int32_t> scratch(5 * 8, 0); std::vector<float> output(4 * 8, 0); int32_t* row_sums = scratch.data() + 8 * 4; bool compute_row_sums = true; MatrixBatchVectorMultiplyAccumulate( input_to_gate_weights.data(), 8, 32, input.data(), scaling_factors.data(), 4, output.data(), nullptr, input_offsets.data(), scratch.data(), row_sums, &compute_row_sums, &context); const std::vector<float> expected_output = { -228, 1548, 937, -166, -1164, -1578, -278, 303, 839, -820, 132, 1733, -1858, 58, -425, -587, -228, 1548, 937, -166, -1164, -1578, -278, 303, 839, -820, 132, 1733, -1858, 58, -425, -587, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); EXPECT_THAT(compute_row_sums, false); std::vector<float> output2(4 * 8, 0); MatrixBatchVectorMultiplyAccumulate( input_to_gate_weights.data(), 8, 32, input.data(), scaling_factors.data(), 4, output2.data(), nullptr, input_offsets.data(), scratch.data(), row_sums, &compute_row_sums, &context); EXPECT_THAT(output2, testing::ElementsAreArray(expected_output)); constexpr int kBatchMultiplier = 8; std::vector<int8_t> input_big_batch(input.size() * kBatchMultiplier); std::vector<float> scaling_factors_big_batch(scaling_factors.size() * kBatchMultiplier); std::vector<int32_t> scratch_big_batch(scratch.size() * kBatchMultiplier); std::vector<int32_t> input_offsets_big_batch(input_offsets.size() * kBatchMultiplier); for (int i = 0; i < kBatchMultiplier; i++) { std::copy(input.begin(), input.end(), input_big_batch.begin() + i * input.size()); std::copy(scaling_factors.begin(), scaling_factors.end(), scaling_factors_big_batch.begin() + i * scaling_factors.size()); std::copy(input_offsets.begin(), input_offsets.end(), input_offsets_big_batch.begin() + i * input_offsets.size()); } std::vector<float> output_big_batch(output.size() * kBatchMultiplier, 0); MatrixBatchVectorMultiplyAccumulate( input_to_gate_weights.data(), 8, 32, input_big_batch.data(), scaling_factors_big_batch.data(), 4 * kBatchMultiplier, output_big_batch.data(), nullptr, input_offsets_big_batch.data(), scratch_big_batch.data(), row_sums, &compute_row_sums, &context); for (int i = 0; i < kBatchMultiplier; i++) { std::vector<float> output_per_batch( output_big_batch.begin() + i * output.size(), output_big_batch.begin() + (i + 1) * output.size()); EXPECT_THAT(output_per_batch, testing::ElementsAreArray(expected_output)); } } TEST(uKernels, QuantMatrixBatchVectorMultiplyAccumulate8x8_8Test) { CpuBackendContext context; const std::vector<int8_t> input = { 4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12, 11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24, -44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28, }; const std::vector<int32_t> input_zeropoint_times_weights = { 0, 0, 0, 0, 0, 0, 0, 0, 0, }; const std::vector<int8_t> input_to_gate_weights = { 13, -7, -20, -22, 8, -46, 9, -2, -18, -42, 40, 28, -7, 24, 34, -7, -24, -24, 19, 14, -19, -6, -2, -3, 5, -36, -13, 6, -27, 36, -23, 0, 20, -37, -23, 9, 17, -41, 33, -15, -18, -42, -41, -34, -16, -6, 12, -14, -15, -20, -14, 21, -3, -1, -26, 54, 51, 35, -14, 9, -2, 13, -6, 39, 34, -21, 39, -51, 19, -44, 52, 0, -2, -38, -35, -33, 4, -22, -37, 27, -23, 3, -10, 5, 32, 6, 1, -35, 24, -19, 46, 43, -55, 5, 38, -14, 32, -43, -44, -17, -13, -28, 56, 28, -42, 4, 10, -7, 25, -15, -9, -25, -14, -15, 6, -10, -22, 40, -72, 18, -6, -18, -2, 37, -13, -10, 11, -9, 32, -28, 19, -2, 4, -31, 50, -15, 23, -34, -9, 41, -6, -34, 17, 2, 24, -15, 21, -17, -8, -20, 1, -63, 19, -40, 12, -5, 5, -6, 1, 19, -9, -23, 5, -34, 11, 26, 21, 54, 34, -43, -29, 1, 16, 31, -56, -28, 57, -15, -23, 37, -17, -3, -6, 29, 18, 77, 17, -20, -14, -19, 8, -24, -7, -45, -3, 0, -25, -8, 6, 9, 3, -15, 51, 4, -15, -19, -16, -14, -47, -52, 25, 9, 58, 26, -9, -27, 49, -6, -21, 21, 18, 12, -9, -9, 14, 31, -26, -19, -50, 17, 35, 11, -10, 22, -16, -43, -2, 26, 55, -20, -7, 21, 33, -20, 26, -15, -22, 30, 27, 3, -34, 26, 12, -1, 19, 26, -25, 10, 30, 30, -14, -23, -23, -35, -16, 26, -41, 11, 1, 21, }; const int32_t multiplier = 1347771520; const int32_t shift = -7; const int32_t output_zp = -11; std::vector<int8_t> output = {1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1, 2, 8, -1, -2, 11, 17, 18}; std::vector<int32_t> scratch(2 * 9, 0); MatrixBatchVectorMultiplyAccumulate( input.data(), input_zeropoint_times_weights.data(), input_to_gate_weights.data(), multiplier, shift, 2, 30, 9, output_zp, scratch.data(), output.data(), &context); const std::vector<int8_t> expected_output = { 5, -9, -2, -30, -5, -11, -22, -18, 18, -19, 2, 11, -5, 9, -2, 10, -38, -22, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantMatrixBatchVectorMultiply8x8_8WithZPTest) { const int32_t input_zp = 3; const std::vector<int8_t> input = { 4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12, 11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24, -44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28, }; const std::vector<int8_t> input_to_gate_weights = { 13, -7, -20, -22, 8, -46, 9, -2, -18, -42, 40, 28, -7, 24, 34, -7, -24, -24, 19, 14, -19, -6, -2, -3, 5, -36, -13, 6, -27, 36, -23, 0, 20, -37, -23, 9, 17, -41, 33, -15, -18, -42, -41, -34, -16, -6, 12, -14, -15, -20, -14, 21, -3, -1, -26, 54, 51, 35, -14, 9, -2, 13, -6, 39, 34, -21, 39, -51, 19, -44, 52, 0, -2, -38, -35, -33, 4, -22, -37, 27, -23, 3, -10, 5, 32, 6, 1, -35, 24, -19, 46, 43, -55, 5, 38, -14, 32, -43, -44, -17, -13, -28, 56, 28, -42, 4, 10, -7, 25, -15, -9, -25, -14, -15, 6, -10, -22, 40, -72, 18, -6, -18, -2, 37, -13, -10, 11, -9, 32, -28, 19, -2, 4, -31, 50, -15, 23, -34, -9, 41, -6, -34, 17, 2, 24, -15, 21, -17, -8, -20, 1, -63, 19, -40, 12, -5, 5, -6, 1, 19, -9, -23, 5, -34, 11, 26, 21, 54, 34, -43, -29, 1, 16, 31, -56, -28, 57, -15, -23, 37, -17, -3, -6, 29, 18, 77, 17, -20, -14, -19, 8, -24, -7, -45, -3, 0, -25, -8, 6, 9, 3, -15, 51, 4, -15, -19, -16, -14, -47, -52, 25, 9, 58, 26, -9, -27, 49, -6, -21, 21, 18, 12, -9, -9, 14, 31, -26, -19, -50, 17, 35, 11, -10, 22, -16, -43, -2, 26, 55, -20, -7, 21, 33, -20, 26, -15, -22, 30, 27, 3, -34, 26, 12, -1, 19, 26, -25, 10, 30, 30, -14, -23, -23, -35, -16, 26, -41, 11, 1, 21, }; const int32_t multiplier = 1347771520; const int32_t shift = -7; const int32_t output_zp = -11; std::vector<int8_t> output = {1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1, 2, 8, -1, -2, 11, 17, 18}; MatrixBatchVectorMultiply( input.data(), input_zp, input_to_gate_weights.data(), multiplier, shift, 2, 30, 9, output.data(), output_zp); const std::vector<int8_t> expected_output = {6, -9, -4, -32, -10, -17, -25, -25, 14, -19, 3, 10, -12, 10, 0, 1, -57, -41}; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantMatrixBatchVectorMultiply16x8_8WithZPTest) { const std::vector<int16_t> input = { 400, -41, 5, -41, 22, 17, -30, 24, 130, -47, 18, 9, -11, -30, 16, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12, 11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24, -44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28, }; const std::vector<int8_t> input_to_gate_weights = { 13, -7, -20, -22, 8, -46, 9, -2, -18, -42, 40, 28, -7, 24, 34, -7, -24, -24, 19, 14, -19, -6, -2, -3, 5, -36, -13, 6, -27, 36, -23, 0, 20, -37, -23, 9, 17, -41, 33, -15, -18, -42, -41, -34, -16, -6, 12, -14, -15, -20, -14, 21, -3, -1, -26, 54, 51, 35, -14, 9, -2, 13, -6, 39, 34, -21, 39, -51, 19, -44, 52, 0, -2, -38, -35, -33, 4, -22, -37, 27, -23, 3, -10, 5, 32, 6, 1, -35, 24, -19, 46, 43, -55, 5, 38, -14, 32, -43, -44, -17, -13, -28, 56, 28, -42, 4, 10, -7, 25, -15, -9, -25, -14, -15, 6, -10, -22, 40, -72, 18, -6, -18, -2, 37, -13, -10, 11, -9, 32, -28, 19, -2, 4, -31, 50, -15, 23, -34, -9, 41, -6, -34, 17, 2, 24, -15, 21, -17, -8, -20, 1, -63, 19, -40, 12, -5, 5, -6, 1, 19, -9, -23, 5, -34, 11, 26, 21, 54, 34, -43, -29, 1, 16, 31, -56, -28, 57, -15, -23, 37, -17, -3, -6, 29, 18, 77, 17, -20, -14, -19, 8, -24, -7, -45, -3, 0, -25, -8, 6, 9, 3, -15, 51, 4, -15, -19, -16, -14, -47, -52, 25, 9, 58, 26, -9, -27, 49, -6, -21, 21, 18, 12, -9, -9, 14, 31, -26, -19, -50, 17, 35, 11, -10, 22, -16, -43, -2, 26, 55, -20, -7, 21, 33, -20, 26, -15, -22, 30, 27, 3, -34, 26, 12, -1, 19, 26, -25, 10, 30, 30, -14, -23, -23, -35, -16, 26, -41, 11, 1, 21, }; const std::vector<int32_t> input_zeropoint_times_weights = { 0, 2, 3, 4, 5, 4, 3, 2, 10, }; const int32_t multiplier = 1347771520; const int32_t shift = -8; const int32_t output_zp = -11; std::vector<int8_t> output = {1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1, 2, 8, -1, -2, 11, 17, 18}; MatrixBatchVectorMultiply( input.data(), input_to_gate_weights.data(), multiplier, shift, input_zeropoint_times_weights.data(), 2, 30, 9, output_zp, output.data()); const std::vector<int8_t> expected_output = {4, -24, -5, 10, -7, -13, -39, 2, 3, -16, -5, -1, -12, -1, -6, -6, -33, -25}; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, MatrixScalarMultiplyAccumulateTest) { std::vector<int32_t> output = { -620, -170, -395, 715, -1220, -1080, 1130, -260, -470, }; const std::vector<int8_t> weight = { -10, -4, -8, 16, 4, -16, -1, 11, 1, 2, -25, 19, 7, 9, 2, -24, -2, 10, -7, 7, -5, -2, 3, 4, 3, -4, -7, -11, -13, -18, 11, 10, 12, -9, 17, -15, -5, 20, -6, -11, 2, -6, -18, 15, 4, 4, -9, -2, -3, -9, -13, 17, -21, 5, 3, -12, 0, -4, 9, -5, 10, -2, 8, 1, -10, -6, 1, -9, 10, 11, -1, -5, 4, -7, -4, -4, 4, 12, -7, -5, -9, -19, 6, -4, 12, -17, -22, 0, 9, -4, -5, 5, -8, 8, 3, 15, -18, -18, 5, 3, -12, 5, -10, 7, 7, -9, 17, 2, -11, -25, 3, 19, -6, 7, 1, 7, 5, -3, 11, 3, 0, -8, 8, -2, -2, -12, 14, -5, 7, 8, 16, 20, -16, -5, -5, 1, -10, -6, 14, 10, -12, 10, -6, 5, 0, 3, 8, -9, -13, -2, 4, 4, -16, -17, -9, 16, -5, 14, -9, -5, -12, 0, 17, 6, -1, 16, -20, 1, -11, -1, -10, -21, 13, 4, -12, -7, 0, -14, -6, 3, -4, 6, -18, -3, -1, 14, -8, -6, -15, 5, 12, -3, -10, 4, 6, -5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6, 4, -12, 2, 6, -20, 0, 5, 23, 15, 14, 9, 8, 20, -2, 9, -8, -8, -7, -4, -8, -9, 7, -12, -2, 2, 1, -14, 31, 4, -14, 3, 10, -18, -17, -1, 18, 1, 12, 0, 7, -3, -5, 8, -9, 18, 17, 7, -15, 3, 20, 4, -8, 16, 6, -3, -3, 9, -4, -6, 4, }; MatrixScalarMultiplyAccumulate(weight.data(), 3, 9, 30, output.data()); const std::vector<int32_t> expected_output = { -797, -227, -536, 739, -1187, -1314, 965, -140, -257, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantApplyLayerNormTest) { const std::vector<int16_t> input = { -310, 596, 34, -68, 475, 92, 672, -54, -913, -200, -1194, -836, -620, -237, 991, 533, 721, -736, -8, -941, -372, -1084, 591, 2557, -779, 175, 582, 956, -287, 944, }; const std::vector<int16_t> layer_norm_weights = { 21849, 22882, 20626, 23854, 24779, 26354, 12980, 26231, 23716, 27271, 24937, 22647, 24715, 22854, 19646, }; const std::vector<int32_t> bias_weight = { -14175520, -13805465, -16027609, -13786809, -13321033, -14399810, -15055368, -14536623, -14508746, -13784007, -15206609, -15125830, -14996304, -14847597, -12814379, }; const int32_t multiplier = 1895840000; const int32_t shift = -13; const int32_t limit = 1; std::vector<int16_t> output(2 * 15, 0); ApplyLayerNorm(input.data(), layer_norm_weights.data(), bias_weight.data(), multiplier, shift, limit, 2, 15, output.data()); const std::vector<int16_t> expected_output = { -9407, 5846, -4802, -5295, 4822, -2390, 930, -5283, -20352, -7846, -26539, -18704, -15829, -8627, 10313, -2522, -132, -16058, -8206, -19158, -13296, -14407, -1235, 20612, -18591, -6738, -2274, 2602, -11622, 1565, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantApplyLayerNormFloatTest) { const std::vector<int16_t> input = { -310, 596, 34, -68, 475, 92, 672, -54, -913, -200, -1194, -836, -620, -237, 991, 533, 721, -736, -8, -941, -372, -1084, 591, 2557, -779, 175, 582, 956, -287, 944, }; const std::vector<int16_t> layer_norm_weights = { 21849, 22882, 20626, 23854, 24779, 26354, 12980, 26231, 23716, 27271, 24937, 22647, 24715, 22854, 19646, }; const std::vector<int32_t> bias_weight = { -14175520, -13805465, -16027609, -13786809, -13321033, -14399810, -15055368, -14536623, -14508746, -13784007, -15206609, -15125830, -14996304, -14847597, -12814379, }; const int32_t multiplier = 1895840000; const int32_t shift = -13; std::vector<int16_t> output(2 * 15, 0); ApplyLayerNormFloat(input.data(), layer_norm_weights.data(), multiplier, shift, bias_weight.data(), 2, 15, output.data()); const std::vector<int16_t> expected_output = { -9408, 5844, -4803, -5297, 4826, -2392, 927, -5286, -20353, -7851, -26534, -18701, -15830, -8623, 10312, -2524, -136, -16053, -8206, -19160, -13299, -14407, -1233, 20617, -18594, -6736, -2272, 2597, -11620, 1566}; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantTanh0Test) { const std::vector<int16_t> input = { -145, 899, -176, -35, 264, 289, 8, 27, -37, -1310, -120, 127, -16, 106, 370, -583, -299, 93, -548, 548, 653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297, -145, 899, -176, -35, 264, 289, 8, 27, -37, -1310, -120, 127, -16, 106, 370, -583, -299, 93, -548, 548, 653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297, }; std::vector<int16_t> output(4 * 15, 0); ApplyTanh(0, input.data(), 4, 15, output.data()); const std::vector<int16_t> expected_output = { -136, 904, -176, -40, 260, 292, 8, 28, -44, -1304, -120, 120, -24, 112, 376, -576, -308, 88, -544, 544, 652, -32, -60, 1056, -56, -156, -144, -636, 192, -1300, -136, 904, -176, -40, 260, 292, 8, 28, -44, -1304, -120, 120, -24, 112, 376, -576, -308, 88, -544, 544, 652, -32, -60, 1056, -56, -156, -144, -636, 192, -1300, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantTanh3Test) { const std::vector<int16_t> input = { -145, 899, -176, -35, 264, 289, 8, 27, -37, -1310, -120, 127, -16, 106, 370, -583, -299, 93, -548, 548, 653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297, -145, 899, -176, -35, 264, 289, 8, 27, -37, -1310, -120, 127, -16, 106, 370, -583, -299, 93, -548, 548, 653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297, }; std::vector<int16_t> output(4 * 15, 0); ApplyTanh(3, input.data(), 4, 15, output.data()); const std::vector<int16_t> expected_output = { -1156, 7076, -1412, -276, 2104, 2308, 64, 220, -288, -10132, -964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352, 5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044, -1156, 7076, -1412, -276, 2104, 2308, 64, 220, -288, -10132, -964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352, 5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantTanhFloatTest) { const std::vector<int16_t> input = { -1, 0, 1, -35, 264, 289, 8, 27, -37, -1310, -120, 127, -16, 106, 370, -583, -299, 93, -548, 548, 653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297, -145, 899, -176, -35, 264, 289, 8, 27, -37, -1310, -120, 127, -16, 106, 370, -583, -299, 93, -548, 548, 653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297, }; std::vector<int16_t> output(4 * 15, 0); ApplyTanhFloat(input.data(), 4, 15, -12, output.data()); const std::vector<int16_t> expected_output = { -8, 0, 8, -279, 2109, 2308, 63, 215, -295, -10136, -959, 1015, -127, 847, 2951, -4632, -2387, 743, -4358, 4358, 5180, -231, -423, 8280, -415, -1311, -1191, -5039, 1606, -10042, -1159, 7078, -1407, -279, 2109, 2308, 63, 215, -295, -10136, -959, 1015, -127, 847, 2951, -4632, -2387, 743, -4358, 4358, 5180, -231, -423, 8280, -415, -1311, -1191, -5039, 1606, -10042}; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantTanh4Test) { const std::vector<int16_t> input = { -5, 163, -31, -5, 54, 90, 1, 2, -4, -42, -8, 29, 0, 47, 150, -26, -36, 9, -73, 25, 14, -2, -1, 29, -10, -12, -18, -29, 51, -92, -5, 163, -31, -5, 54, 90, 1, 2, -4, -42, -8, 29, 0, 47, 150, -26, -36, 9, -73, 25, 14, -2, -1, 29, -10, -12, -18, -29, 51, -92, }; std::vector<int16_t> output(4 * 15, 0); ApplyTanh(4, input.data(), 4, 15, output.data()); const std::vector<int16_t> expected_output = { -76, 2596, -496, -76, 856, 1436, 24, 36, -64, -672, -120, 456, 0, 752, 2400, -412, -576, 148, -1168, 400, 216, -36, -24, 456, -164, -192, -292, -456, 820, -1476, -76, 2596, -496, -76, 856, 1436, 24, 36, -64, -672, -120, 456, 0, 752, 2400, -412, -576, 148, -1168, 400, 216, -36, -24, 456, -164, -192, -292, -456, 820, -1476, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantSigmoidTest) { const std::vector<int16_t> input = { -10500, 1398, -6963, -7404, 485, -5401, -1757, -7668, -19248, -9692, -24249, -17923, -15840, -10026, 5249, -89, 1787, -16178, -6691, -19524, -13439, -24048, -1123, 32767, -17267, -3378, 823, 11482, -11139, 7508, -10500, 1398, -6963, -7404, 485, -5401, -1757, -7668, -19248, -9692, -24249, -17923, -15840, -10026, 5249, -89, 1787, -16178, -6691, -19524, -13439, -24048, -1123, 32767, -17267, -3378, 823, 11482, -11139, 7508, }; std::vector<int16_t> output(4 * 15, 0); ApplySigmoid(input.data(), 4, 15, output.data()); const std::vector<int16_t> expected_output = { 2339, 19152, 5063, 4617, 17350, 6917, 12921, 4371, 299, 2813, 89, 409, 673, 2605, 25646, 16207, 19904, 615, 5353, 273, 1187, 91, 14153, 32756, 475, 9983, 18026, 30898, 2023, 28246, 2339, 19152, 5063, 4617, 17350, 6917, 12921, 4371, 299, 2813, 89, 409, 673, 2605, 25646, 16207, 19904, 615, 5353, 273, 1187, 91, 14153, 32756, 475, 9983, 18026, 30898, 2023, 28246, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantSigmoidFloatTest) { const std::vector<int16_t> input = { -10500, 1398, -6963, -7404, 485, -5401, -1757, -7668, -19248, -9692, -24249, -17923, -15840, -10026, 5249, -89, 1787, -16178, -6691, -19524, -13439, -24048, -1123, 32767, -17267, -3378, 823, 11482, -11139, 7508, -10500, 1398, -6963, -7404, 485, -5401, -1757, -7668, -19248, -9692, -24249, -17923, -15840, -10026, 5249, -89, 1787, -16178, -6691, -19524, -13439, -24048, -1123, 32767, -17267, -3378, 823, 11482, -11139, 7508, }; std::vector<int16_t> output(4 * 15, 0); ApplySigmoidFloat(input.data(), 4, 15, output.data()); const std::vector<int16_t> expected_output = { 2343, 19153, 5061, 4617, 17352, 6915, 12922, 4368, 295, 2811, 87, 407, 671, 2608, 25647, 16206, 19902, 619, 5352, 276, 1187, 92, 14151, 32757, 476, 9986, 18024, 30895, 2026, 28249, 2343, 19153, 5061, 4617, 17352, 6915, 12922, 4368, 295, 2811, 87, 407, 671, 2608, 25647, 16206, 19902, 619, 5352, 276, 1187, 92, 14151, 32757, 476, 9986, 18024, 30895, 2026, 28249}; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantMul16bitOut15ShiftTest) { const std::vector<int16_t> input1 = { 2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 2157, 4545, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001, 1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791, }; const std::vector<int16_t> input2 = { -1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, -10132, -964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352, 5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044, }; std::vector<int16_t> output(2 * 15, 0); CwiseMul(input1.data(), input2.data(), 2, 15, 15, output.data()); const std::vector<int16_t> expected_output = { -88, 32766, -32768, -32767, -32767, 2308, 64, -220, 288, -667, -134, 460, -5, 760, 2407, -412, -575, 142, -1165, 399, 221, -33, -19, 468, -153, -197, -291, -462, 817, -1469, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantMul16bitOut19ShiftTest) { const std::vector<int16_t> input1 = { 2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 2157, 4545, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001, 1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791, }; const std::vector<int16_t> input2 = { -1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, -10132, -964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352, 5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044, }; std::vector<int16_t> output(2 * 15, 0); CwiseMul(input1.data(), input2.data(), 2, 15, 19, output.data()); const std::vector<int16_t> expected_output = { -5, 2048, 2048, -2048, -2048, 144, 4, -14, 18, -42, -8, 29, 0, 47, 150, -26, -36, 9, -73, 25, 14, -2, -1, 29, -10, -12, -18, -29, 51, -92, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantMul8bitArbitrarySclaeTest) { int multiplier = 1970324837; int shift = -15; const std::vector<int16_t> input1 = { 2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 2157, 4545, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001, 1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791, }; const std::vector<int16_t> input2 = { -1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, -10132, -964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352, 5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044, }; std::vector<int8_t> output(2 * 15, 0); CwiseMul(input1.data(), input2.data(), multiplier, shift, 2, 15, 3, output.data()); const std::vector<int8_t> expected_output = { -78, 127, 127, -128, -128, 127, 62, -128, 127, -128, -120, 127, -1, 127, 127, -128, -128, 127, -128, 127, 127, -27, -14, 127, -128, -128, -128, -128, 127, -128, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantAddTest) { const std::vector<int16_t> input1 = { 2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 20000, -20000, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001, 1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791, }; const std::vector<int16_t> input2 = { -1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, 20000, -20000, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352, 5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044, }; std::vector<int16_t> output(2 * 15, 0); CwiseAdd(input1.data(), input2.data(), 2, 15, output.data()); const std::vector<int16_t> expected_output = { 1335, 32767, -32768, -1, -1, 32767, 32767, -32548, -32768, 32767, -32768, 15851, 1165, 30342, 29732, -1733, 5485, 7067, 4423, 7353, 6579, 4451, 1009, 10129, 11751, 3619, 6781, -2043, 18224, -5253, }; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, ClipTest) { constexpr int kVectorSize = 10; constexpr float kAbsLimit = 2.0; std::vector<float> input = {0.0, -0.5, 1.0, -1.5, 2.0, -2.5, 3.0, -3.5, 4.0, -4.5}; CwiseClipping(input.data(), kVectorSize, kAbsLimit); const std::vector<float> expected_output = {0.0, -0.5, 1.0, -1.5, 2.0, -2.0, 2.0, -2.0, 2.0, -2.0}; EXPECT_THAT(input, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantClip16Test) { constexpr int kVectorSize = 30; constexpr int16_t kAbsLimit = 300; std::vector<int16_t> input = { -10500, 1, -2, -7404, 200, -5401, -1757, -7668, -19248, -9692, -24249, -17923, -15840, -10026, 5249, -89, 1787, -200, -6691, -19524, -13439, -24048, -1123, 32767, -17267, -3378, 823, 11482, -11139, 7508, }; CwiseClipping(input.data(), kVectorSize, kAbsLimit); const std::vector<int16_t> expected_output = { -300, 1, -2, -300, 200, -300, -300, -300, -300, -300, -300, -300, -300, -300, 300, -89, 300, -200, -300, -300, -300, -300, -300, 300, -300, -300, 300, 300, -300, 300, }; EXPECT_THAT(input, testing::ElementsAreArray(expected_output)); } TEST(uKernels, QuantClip8Test) { constexpr int kVectorSize = 30; constexpr int8_t kAbsLimit = 32; std::vector<int8_t> input = { 4, -11, -5, -34, -10, -17, -27, -22, 15, 127, -128, 1, 3, 56, 3, -21, 1, 9, -13, 10, 0, -1, -55, -40, 127, -128, 11, 4, 6, 32, }; CwiseClipping(input.data(), kVectorSize, kAbsLimit); const std::vector<int8_t> expected_output = { 4, -11, -5, -32, -10, -17, -27, -22, 15, 32, -32, 1, 3, 32, 3, -21, 1, 9, -13, 10, 0, -1, -32, -32, 32, -32, 11, 4, 6, 32, }; EXPECT_THAT(input, testing::ElementsAreArray(expected_output)); } struct MatrixVectorData { std::vector<int8_t> matrix; std::vector<int8_t> zeroed_matrix; std::vector<int8_t> sparse_matrix; std::vector<uint8_t> ledger; std::vector<int8_t> vectors; std::vector<float> scale_factors; std::vector<float> results; std::vector<float> per_channel_scales; std::vector<int32_t> input_offsets; int rows; int cols; int batch; }; MatrixVectorData SetupMatrixVectorData(int rows, int cols, int batch, bool negative = false, bool is_per_channel = false, bool init_to_one = false) { MatrixVectorData data; data.rows = rows; data.cols = cols; data.batch = batch; for (int i = 0; i < rows * cols; i++) { int sign = 1; if ((i % 3) == 0 && negative) sign = -1; data.matrix.push_back(sign * (i % 70)); } for (int i = 0; i < cols * batch; i++) { int sign = 1; if ((i % 5) == 0 && negative) sign = -1; data.vectors.push_back(sign * (i % 50)); } for (int i = 0; i < batch; i++) { data.scale_factors.insert(data.scale_factors.end(), {1, 2, 3, 4, 5, 6, 7, 8}); } data.results.resize(rows * batch, init_to_one ? 1 : 0); data.zeroed_matrix = data.matrix; for (int i = 0; i < rows; i++) { int max_chunks = cols / 16; int selected_chunks = (max_chunks / 2); bool row_is_odd = (i % 2) > 0; bool max_chunks_is_odd = (max_chunks % 2) > 0; data.ledger.push_back(selected_chunks); if (max_chunks_is_odd && row_is_odd) { selected_chunks++; } for (int j = 0; j < max_chunks; j++) { const int chunk_start = i * cols + (j * 16); const int chunk_end = i * cols + (j * 16) + 16; if ((j % 2) == (i % 2)) { data.ledger.push_back(j); for (int k = chunk_start; k < chunk_end; k++) { data.sparse_matrix.push_back(data.matrix[k]); } } else { for (int k = chunk_start; k < chunk_end; k++) { data.zeroed_matrix[k] = 0; } } } } if (is_per_channel) { for (int i = 0; i < rows; i++) { if (i % 2 == 0) { data.per_channel_scales.push_back(0.5); } else { data.per_channel_scales.push_back(1.0); } } for (int i = 0; i < batch; i++) { for (int j = 0; j < cols; j++) { data.vectors[i * cols + j] += i; } data.input_offsets.push_back(i); } } return data; } std::vector<float> TestDotprodMatrixBatchVectorMultiply( int rows, int cols, int batch, bool negative = false, bool init_to_one = false) { MatrixVectorData data = SetupMatrixVectorData(rows, cols, batch, negative, false, init_to_one); MatrixBatchVectorMultiplyAccumulate( data.matrix.data(), rows, cols, data.vectors.data(), data.scale_factors.data(), batch, &data.results[0]); return data.results; } std::vector<float> TestSparseDotprodMatrixBatchVectorMultiply( int rows, int cols, int batch, bool negative = false) { MatrixVectorData data = SetupMatrixVectorData(rows, cols, batch, negative); SparseMatrixBatchVectorMultiplyAccumulate( data.sparse_matrix.data(), data.ledger.data(), rows, cols, data.vectors.data(), data.scale_factors.data(), batch, &data.results[0]); return data.results; } std::vector<float> TestPerChannelDotprodMatrixBatchVectorMultiply( int rows, int cols, int batch, bool negative = false, bool is_per_channel = true) { MatrixVectorData data = SetupMatrixVectorData(rows, cols, batch, negative, is_per_channel); std::vector<int32_t> scratch(rows * batch); std::vector<int32_t> row_sums(rows); bool compute_row_sums = true; CpuBackendContext context; MatrixBatchVectorMultiplyAccumulate( data.matrix.data(), rows, cols, data.vectors.data(), data.scale_factors.data(), batch, &data.results[0], data.per_channel_scales.data(), data.input_offsets.data(), scratch.data(), row_sums.data(), &compute_row_sums, &context); return data.results; } TEST(uKernels, DotprodMatrixBatchVectorMultiplyAccumulateTest) { ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(4, 16, 1), testing::ElementsAre(1240, 3160, 5080, 7000)); ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(4, 32, 2), testing::ElementsAre(10416, 26288, 8490, 23312, 18276, 70756, 37416, 60916)); std::vector<float> results = TestDotprodMatrixBatchVectorMultiply(32, 512, 5); EXPECT_NEAR(415566, results[0], 0.0001); EXPECT_NEAR(880736, results[50], 0.0001); EXPECT_NEAR(1312062, results[72], 0.0001); EXPECT_NEAR(1750384, results[100], 0.0001); EXPECT_NEAR(1776224, results[120], 0.0001); EXPECT_NEAR(2101860, results[150], 0.0001); const bool kNegative = true; ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(4, 64, 1, kNegative), testing::ElementsAre(13696, 6904, 7764, 11806)); ASSERT_THAT( TestDotprodMatrixBatchVectorMultiply(4, 32, 2, kNegative), testing::ElementsAre(3436, 3522, 1590, 6972, 2516, 20520, 456, 10628)); const bool kInitToOne = true; ASSERT_THAT( TestDotprodMatrixBatchVectorMultiply(4, 32, 2, kNegative, kInitToOne), testing::ElementsAre(3437, 3523, 1591, 6973, 2517, 20521, 457, 10629)); } TEST(uKernels, PerChannelDotprodMatrixBatchVectorMultiplyAccumulateTest) { ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 16, 1), testing::ElementsAre(1240 / 2, 3160, 5080 / 2, 7000)); ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 32, 2), testing::ElementsAre(10416 / 2, 26288, 8490 / 2, 23312, 18276 / 2, 70756, 37416 / 2, 60916)); std::vector<float> results = TestPerChannelDotprodMatrixBatchVectorMultiply(32, 512, 5); EXPECT_NEAR(207783, results[0], 0.0001); EXPECT_NEAR(411552, results[13], 0.0001); EXPECT_NEAR(835936, results[39], 0.0001); EXPECT_NEAR(440368, results[50], 0.0001); EXPECT_NEAR(875192, results[100], 0.0001); EXPECT_NEAR(1775536, results[123], 0.0001); EXPECT_NEAR(1050930, results[150], 0.0001); } TEST(uKernels, DotprodMatrixBatchFourVectorMultiplyAccumulateDotprodTest) { ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(2, 16, 4), testing::ElementsAreArray( {1240, 3160, 6320, 18352, 15240, 45576, 4200, 16232})); ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(2, 64, 4), testing::ElementsAreArray({45794, 38948, 88536, 84252, 157626, 165312, 209864, 246128})); ASSERT_THAT( TestDotprodMatrixBatchVectorMultiply(2, 64, 8), testing::ElementsAreArray({45794, 38948, 88536, 84252, 157626, 165312, 209864, 246128, 219700, 195550, 279684, 278928, 413616, 445662, 374896, 365952})); ASSERT_THAT( TestDotprodMatrixBatchVectorMultiply(4, 64, 8), testing::ElementsAreArray( {45794, 38948, 34622, 32816, 88536, 84252, 85008, 90804, 157626, 165312, 180558, 203364, 209864, 246128, 236472, 208896, 219700, 195550, 184000, 185050, 279684, 278928, 293292, 322776, 413616, 445662, 495348, 513674, 374896, 365952, 321168, 296544})); ASSERT_THAT( TestDotprodMatrixBatchVectorMultiply(16, 1024, 4), testing::ElementsAreArray( {841094, 853168, 866642, 840286, 860760, 862754, 843678, 872552, 837586, 851270, 877414, 834188, 863062, 857846, 841780, 879054, 1724476, 1769072, 1747588, 1738844, 1758240, 1742916, 1761612, 1755808, 1737684, 1750780, 1747356, 1754152, 1748348, 1753324, 1743320, 1754316, 2506896, 2564262, 2629188, 2515824, 2598390, 2569236, 2537352, 2645118, 2508444, 2571480, 2610576, 2510442, 2618208, 2566584, 2544570, 2614536, 3458904, 3502688, 3474792, 3505976, 3499360, 3488264, 3485848, 3512832, 3500616, 3482520, 3489624, 3469008, 3495992, 3524376, 3465680, 3526264})); ASSERT_THAT( TestDotprodMatrixBatchVectorMultiply(4, 128, 4), testing::ElementsAreArray({87920, 80024, 92288, 103712, 228148, 224820, 233812, 213124, 271284, 271788, 332772, 328236, 419328, 431328, 411968, 417248})); ASSERT_THAT( TestDotprodMatrixBatchVectorMultiply(4, 128, 8), testing::ElementsAreArray( {87920, 80024, 92288, 103712, 228148, 224820, 233812, 213124, 271284, 271788, 332772, 328236, 419328, 431328, 411968, 417248, 482680, 523840, 560800, 593560, 563940, 609924, 566868, 644772, 743708, 857780, 818972, 823284, 708384, 695008, 730912, 872096})); const bool kNegative = true; EXPECT_THAT(TestDotprodMatrixBatchVectorMultiply(1, 16, 1, kNegative), testing::ElementsAre(450)); EXPECT_THAT(TestDotprodMatrixBatchVectorMultiply(2, 64, 8, kNegative), testing::ElementsAreArray({13696, 6904, 9952, 12368, 22848, 61632, 40424, 46776, 57630, 38670, 62976, 49824, 39032, 71988, 60128, 148992})); std::vector<float> results = TestDotprodMatrixBatchVectorMultiply(256, 1024, 8); int64_t sum = 0; for (int i = 0; i < results.size(); i++) { sum += static_cast<int64_t>(results[i]); } EXPECT_EQ(7980076336, sum); } TEST(uKernels, PerChannelDotprodMatrixBatchFourVectorMultiplyAccumulateDotprodTest) { ASSERT_THAT( TestPerChannelDotprodMatrixBatchVectorMultiply(16, 1024, 4), testing::ElementsAreArray( {841094 / 2, 853168, 866642 / 2, 840286, 860760 / 2, 862754, 843678 / 2, 872552, 837586 / 2, 851270, 877414 / 2, 834188, 863062 / 2, 857846, 841780 / 2, 879054, 1724476 / 2, 1769072, 1747588 / 2, 1738844, 1758240 / 2, 1742916, 1761612 / 2, 1755808, 1737684 / 2, 1750780, 1747356 / 2, 1754152, 1748348 / 2, 1753324, 1743320 / 2, 1754316, 2506896 / 2, 2564262, 2629188 / 2, 2515824, 2598390 / 2, 2569236, 2537352 / 2, 2645118, 2508444 / 2, 2571480, 2610576 / 2, 2510442, 2618208 / 2, 2566584, 2544570 / 2, 2614536, 3458904 / 2, 3502688, 3474792 / 2, 3505976, 3499360 / 2, 3488264, 3485848 / 2, 3512832, 3500616 / 2, 3482520, 3489624 / 2, 3469008, 3495992 / 2, 3524376, 3465680 / 2, 3526264})); ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 128, 4), testing::ElementsAreArray( {87920 / 2, 80024, 92288 / 2, 103712, 228148 / 2, 224820, 233812 / 2, 213124, 271284 / 2, 271788, 332772 / 2, 328236, 419328 / 2, 431328, 411968 / 2, 417248})); ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 128, 8), testing::ElementsAreArray( {87920 / 2, 80024, 92288 / 2, 103712, 228148 / 2, 224820, 233812 / 2, 213124, 271284 / 2, 271788, 332772 / 2, 328236, 419328 / 2, 431328, 411968 / 2, 417248, 482680 / 2, 523840, 560800 / 2, 593560, 563940 / 2, 609924, 566868 / 2, 644772, 743708 / 2, 857780, 818972 / 2, 823284, 708384 / 2, 695008, 730912 / 2, 872096})); } TEST(uKernels, DotprodSparseMatrixBatchVectorMultiplyAccumulate) { EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 16, 1), testing::ElementsAre(0)); EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 32, 1), testing::ElementsAre(1240)); EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 64, 1), testing::ElementsAre(26544)); EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 64, 2), testing::ElementsAre(26544, 24344)); EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(4, 64, 4), testing::ElementsAreArray( {26544, 15866, 22140, 11408, 24344, 53248, 42704, 39900, 48000, 94146, 101892, 81876, 87712, 105160, 148304, 75936})); const bool kNegative = true; EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 64, 1, kNegative), testing::ElementsAre(8764)); EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(2, 64, 2, kNegative), testing::ElementsAre(8764, 5196, 7204, 11148)); } #ifdef __ANDROID__ TEST(uKernels, MatrixBatchVectorMultiplyAccumulateSymmetricQuantizedTest) { const int a_rows = 4, a_cols = 29; const int kWeightsPerUint32 = 4; const float a_float_data[] = { 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13, 14.14, 15.15, 16.16, 17.17, 18.18, 19.19, 20.2, 21.21, 22.22, 23.23, 24.24, 25.25, 26.26, 27.27, 28.28, 0, -1.1, -2.2, -3.3, -4.4, -5.5, -6.6, -7.7, -8.8, -9.9, -10.1, -11.11, -12.12, -13.13, -14.14, -15.15, -16.16, -17.17, -18.18, -19.19, -20.2, -21.21, -22.22, -23.23, -24.24, -25.25, -26.26, -27.27, -28.28, 0, 1.1, -2.2, 3.3, -4.4, 5.5, -6.6, 7.7, -8.8, 9.9, -10.1, 11.11, -12.12, 13.13, -14.14, 15.15, -16.16, 17.17, -18.18, 19.19, -20.2, 21.21, -22.22, 23.23, -24.24, 25.25, -26.26, 27.27, -28.28, 0, -1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12, -13.13, 14.14, -15.15, 16.16, -17.17, 18.18, -19.19, 20.2, -21.21, 22.22, -23.23, 24.24, -25.25, 26.26, -27.27, 28.28, 0}; int8_t* a_int8_data = reinterpret_cast<int8_t*>( aligned_malloc(a_rows * a_cols, kWeightsPerUint32)); float a_min, a_max; float scaling_factor_a; SymmetricQuantizeFloats(a_float_data, a_rows * a_cols, a_int8_data, &a_min, &a_max, &scaling_factor_a); const int8_t expected_a_int8_data[] = { 5, 10, 15, 20, 25, 30, 35, 40, 44, 45, 50, 54, 59, 64, 68, 73, 77, 82, 86, 91, 95, 100, 104, 109, 113, 118, 122, 127, 0, -5, -10, -15, -20, -25, -30, -35, -40, -44, -45, -50, -54, -59, -64, -68, -73, -77, -82, -86, -91, -95, -100, -104, -109, -113, -118, -122, -127, 0, 5, -10, 15, -20, 25, -30, 35, -40, 44, -45, 50, -54, 59, -64, 68, -73, 77, -82, 86, -91, 95, -100, 104, -109, 113, -118, 122, -127, 0, -5, 10, -15, 20, -25, 30, -35, 40, -44, 45, -50, 54, -59, 64, -68, 73, -77, 82, -86, 91, -95, 100, -104, 109, -113, 118, -122, 127, 0, }; for (int i = 0; i < a_rows * a_cols; ++i) { EXPECT_EQ(expected_a_int8_data[i], a_int8_data[i]); } const int b_rows = 29, b_cols = 1, batches = 2; const float b_float_data[] = { 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 2.5, -2.1, 3.0, -1.3, 1.3, -1.1, 2.0, -1.7, 1.9, -1.5, 0.5, -0.7, 0.8, -0.3, 2.8, -2.8, 1.1, -2.3, 1.9, -1.9, 2.1, -0.5, 2.4, -0.1, 1.0, -2.5, 0.7, -1.9, 0.2, }; int8_t b_int8_data[b_rows * b_cols * batches]; float b_min, b_max; float scaling_factor_b[batches]; SymmetricQuantizeFloats(b_float_data, b_rows * b_cols, b_int8_data, &b_min, &b_max, &scaling_factor_b[0]); SymmetricQuantizeFloats(&b_float_data[b_rows * b_cols], b_rows * b_cols, &b_int8_data[b_rows * b_cols], &b_min, &b_max, &scaling_factor_b[1]); const int8_t expected_b_int8_data[] = { 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, 106, -89, 127, -55, 55, -47, 85, -72, 80, -64, 21, -30, 34, -13, 119, -119, 47, -97, 80, -80, 89, -21, 102, -4, 42, -106, 30, -80, 8, }; for (int i = 0; i < b_rows * b_cols * batches; ++i) { EXPECT_EQ(expected_b_int8_data[i], b_int8_data[i]); } float c_float_data[a_rows * b_cols * batches]; for (int i = 0; i < a_rows * b_cols * batches; ++i) { c_float_data[i] = 0.0; } const float scaling_factor_c[2] = { scaling_factor_a * scaling_factor_b[0], scaling_factor_a * scaling_factor_b[1], }; MatrixBatchVectorMultiplyAccumulate(a_int8_data, a_rows, a_cols, b_int8_data, scaling_factor_c, batches, c_float_data); const float expected_c_float_data[] = { -14.474, 14.474, 414.402, -414.402, -6.92228, 6.92228, 632.042, -632.042, }; for (int i = 0; i < a_rows * b_cols * batches; ++i) { EXPECT_NEAR(expected_c_float_data[i], c_float_data[i], 0.001); } std::vector<int32_t> accum_scratch(a_rows * batches); std::vector<float> c_float_data_2(a_rows * batches, 0.0); CpuBackendContext context; MatrixBatchVectorMultiplyAccumulate( a_int8_data, a_rows, a_cols, b_int8_data, scaling_factor_c, batches, accum_scratch.data(), c_float_data_2.data(), &context); for (int i = 0; i < a_rows * b_cols * batches; ++i) { EXPECT_NEAR(expected_c_float_data[i], c_float_data_2[i], 0.001); } aligned_free(a_int8_data); } #endif TEST(uKernels, SparseMatrixBatchVectorMultiplyAccumulateTest) { const int kRow = 4; const int kCol = 48; const int kBatch = 2; float matrix[kRow * kCol] = { 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13, 14.14, 15.15, 16.16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 33.33, 34.34, 35.35, 36.36, 37.37, 38.38, 39.39, 40.40, 41.41, 42.42, 43.43, 44.44, 0, 0, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -17.17, -18.18, -19.19, -20.2, -21.21, -22.22, -23.23, -24.24, -25.25, -26.26, -27.27, -28.28, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 17.17, -18.18, 19.19, -20.2, 21.21, -22.22, 23.23, -24.24, 25.25, -26.26, 27.27, -28.28, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, -1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12, -13.13, 14.14, -15.15, 16.16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -33.33, 34.34, -35.35, 36.36, -37.37, 38.38, -39.39, 40.40, -41.41, 42.42, -43.43, 44.44, 0, 0, 0, 0}; float matrix_values[] = { 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13, 14.14, 15.15, 16.16, 33.33, 34.34, 35.35, 36.36, 37.37, 38.38, 39.39, 40.40, 41.41, 42.42, 43.43, 44.44, 0, 0, 0, 0, -17.17, -18.18, -19.19, -20.2, -21.21, -22.22, -23.23, -24.24, -25.25, -26.26, -27.27, -28.28, 0, 0.0, 0.0, 0.0, 17.17, -18.18, 19.19, -20.2, 21.21, -22.22, 23.23, -24.24, 25.25, -26.26, 27.27, -28.28, 0, 0.0, 0.0, 0.0, -1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12, -13.13, 14.14, -15.15, 16.16, -33.33, 34.34, -35.35, 36.36, -37.37, 38.38, -39.39, 40.40, -41.41, 42.42, -43.43, 44.44, 0, 0, 0, 0}; uint8_t ledger[] = { 2, 0, 2, 1, 1, 1, 1, 2, 0, 2 }; float vector[kBatch * kCol] = { 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 2.5, 0.0, -2.1, 0.0, 3.0, 0.0, -1.3, 0.0, 1.3, 0.0, -1.1, 0.0, 2.0, 0.0, -1.7, 0.0, 1.9, 0.0, -1.5, 0.0, 0.5, 0.0, -0.7, 0.0, 0.8, 0.0, -0.3, 0.0, 2.8, 0.0, -2.8, 0.0, 1.1, -2.3, 1.9, -1.9, 2.1, -0.5, 2.4, -0.1, 1.0, -2.5, 0.7, -1.9, 0.2, 0.0, 0.1, 0.2, }; std::vector<float> dense_output(kRow * kBatch, 0.0); MatrixBatchVectorMultiplyAccumulate(matrix, kRow, kCol, vector, kBatch, dense_output.data()); EXPECT_THAT(dense_output, ElementsAreArray(ArrayFloatNear( {-13.69, 6.06001, 272.7, -608.03, -9.66602, -10.201, 10.201, -713.897949}, 1e-4))); std::vector<float> sparse_output(kRow * kBatch, 0.0); SparseMatrixBatchVectorMultiplyAccumulate( matrix_values, ledger, kRow, kCol, vector, kBatch, sparse_output.data()); EXPECT_THAT(sparse_output, ElementsAreArray(ArrayFloatNear(dense_output, 1e-4))); } #ifdef __ANDROID__ TEST(uKernels, SparseMatrixBatchVectorMultiplyAccumulateSymmetricQuantizedTest) { const int kRow = 4; const int kCol = 48; const int kBatch = 2; const int8_t quantized_matrix[] = { 3, 6, 9, 13, 16, 19, 22, 25, 28, 29, 32, 35, 38, 40, 43, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 98, 101, 104, 107, 110, 113, 115, 118, 121, 124, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -49, -52, -55, -58, -61, -64, -66, -69, -72, -75, -78, -81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, -52, 55, -58, 61, -64, 66, -69, 72, -75, 78, -81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 6, -9, 13, -16, 19, -22, 25, -28, 29, -32, 35, -38, 40, -43, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -95, 98, -101, 104, -107, 110, -113, 115, -118, 121, -124, 127, 0, 0, 0, 0, }; const int8_t quantized_matrix_values[] = { 3, 6, 9, 13, 16, 19, 22, 25, 28, 29, 32, 35, 38, 40, 43, 46, 95, 98, 101, 104, 107, 110, 113, 115, 118, 121, 124, 127, 0, 0, 0, 0, -49, -52, -55, -58, -61, -64, -66, -69, -72, -75, -78, -81, 0, 0, 0, 0, 49, -52, 55, -58, 61, -64, 66, -69, 72, -75, 78, -81, 0, 0, 0, 0, -3, 6, -9, 13, -16, 19, -22, 25, -28, 29, -32, 35, -38, 40, -43, 46, -95, 98, -101, 104, -107, 110, -113, 115, -118, 121, -124, 127, 0, 0, 0, 0, }; uint8_t ledger[] = { 2, 0, 2, 1, 1, 1, 1, 2, 0, 2 }; float matrix_scaling_factor = 0.349921; const int8_t quantized_vector[] = { 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 106, 0, -89, 0, 127, 0, -55, 0, 55, 0, -47, 0, 85, 0, -72, 0, 80, 0, -64, 0, 21, 0, -30, 0, 34, 0, -13, 0, 119, 0, -119, 0, 47, -97, 80, -80, 89, -21, 102, -4, 42, -106, 30, -80, 8, 1, 2, 3, }; float vector_scaling_factor[2] = {0.00787402, 0.023622}; float result_scaling_factor[2] = { matrix_scaling_factor * vector_scaling_factor[0], matrix_scaling_factor * vector_scaling_factor[1], }; std::vector<float> dense_output(kRow * kBatch, 0.0); MatrixBatchVectorMultiplyAccumulate(quantized_matrix, kRow, kCol, quantized_vector, result_scaling_factor, kBatch, dense_output.data()); EXPECT_THAT(dense_output, ElementsAreArray(ArrayFloatNear( {-13.646927, 6.298582, 272.938538, -607.813110, -6.637464, -9.381721, 9.381721, -713.845642}))); std::vector<float> sparse_output(kRow * kBatch, 0.0); SparseMatrixBatchVectorMultiplyAccumulate( quantized_matrix_values, ledger, kRow, kCol, quantized_vector, result_scaling_factor, kBatch, sparse_output.data()); EXPECT_THAT(sparse_output, ElementsAreArray(ArrayFloatNear( {-13.646927, 6.298582, 272.938538, -607.813110, -6.637464, -9.381721, 9.381721, -713.845642}))); } #endif TEST(uKernels, VectorVectorCwiseProductTest) { constexpr int kVectorSize = 10; static float input1[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0, -2.5, 3.0, -3.5, 4.0, -4.5}; static float input2[kVectorSize] = {0.1, -0.1, 0.1, -0.1, 0.1, -0.1, 0.1, -0.1, 0.1, -0.1}; std::vector<float> output(kVectorSize); VectorVectorCwiseProduct(input1, input2, kVectorSize, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear( {0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45}))); } TEST(uKernels, VectorVectorCwiseProductAccumulateTest) { constexpr int kVectorSize = 10; static float input1[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0, -2.5, 3.0, -3.5, 4.0, -4.5}; static float input2[kVectorSize] = {0.1, -0.1, 0.1, -0.1, 0.1, -0.1, 0.1, -0.1, 0.1, -0.1}; std::vector<float> output(kVectorSize); std::fill(output.begin(), output.end(), 1.0); VectorVectorCwiseProductAccumulate(input1, input2, kVectorSize, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear( {1.0, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45}))); } TEST(uKernels, VectorBatchVectorAddTest) { constexpr int kVectorSize = 3; constexpr int kBatchSize = 2; static float input[kVectorSize] = {0.0, -0.5, 1.0}; std::vector<float> output = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; VectorBatchVectorAdd(input, kVectorSize, kBatchSize, output.data()); EXPECT_THAT(output, testing::ElementsAreArray({1.0, 1.5, 4.0, 4.0, 4.5, 7.0})); } TEST(uKernels, VectorBatchVectorAssignTest) { constexpr int kVectorSize = 5; constexpr int kBatchSize = 3; static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0}; std::vector<float> output(kVectorSize * kBatchSize); VectorBatchVectorAssign(input, kVectorSize, kBatchSize, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear( {0.0, -0.5, 1.0, -1.5, 2.0, 0.0, -0.5, 1.0, -1.5, 2.0, 0.0, -0.5, 1.0, -1.5, 2.0}))); } TEST(uKernels, ApplySigmoidToVectorTest) { constexpr int kVectorSize = 5; static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0}; std::vector<float> output(kVectorSize); ApplySigmoidToVector(input, kVectorSize, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear( {0.5, 0.377541, 0.731059, 0.182426, 0.880797}))); } TEST(uKernels, ApplyActivationToVectorTest) { constexpr int kVectorSize = 5; static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0}; std::vector<float> output(kVectorSize); ApplyActivationToVector(input, kVectorSize, kTfLiteActRelu, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({0.0, 0.0, 1.0, 0.0, 2.0}))); ApplyActivationToVector(input, kVectorSize, kTfLiteActTanh, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear( {0.0, -0.462117, 0.761594, -0.905148, 0.964028}))); } TEST(uKernels, Sub1VectorTest) { constexpr int kVectorSize = 5; static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0}; std::vector<float> output(kVectorSize); Sub1Vector(input, kVectorSize, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({1.0, 1.5, 0.0, 2.5, -1.0}))); } TEST(uKernels, Sub1VectorInt16Test) { constexpr int kVectorSize = 30; static int16_t input[kVectorSize] = { 32760, 300, 1, 2, 3, 4, 5, 6, 300, 1000, 32767, 32000, 300, 1, 2, 3, 4, 5, 56, 300, 1000, 32767, 32761, 1300, 1, 2, 3, 4, 5, 6, }; std::vector<int16_t> output(kVectorSize); Sub1Vector(input, kVectorSize, output.data()); EXPECT_THAT( output, testing::ElementsAreArray({ 7, 32467, 32766, 32765, 32764, 32763, 32762, 32761, 32467, 31767, 0, 767, 32467, 32766, 32765, 32764, 32763, 32762, 32711, 32467, 31767, 0, 6, 31467, 32766, 32765, 32764, 32763, 32762, 32761, })); } TEST(uKernels, VectorBatchVectorCwiseProductAccumulateInteger) { constexpr int kVectorSize = 29; constexpr int kBatchSize = 4; static int16_t vector[kVectorSize] = {-10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}; const std::vector<int16_t> batch_vector = { 10, 11, 12, 13, 14, 15, 16, 17, 18, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18}; std::vector<int16_t> batch_output = { -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 10, 11, 12, 13, 14, 15, 16, 17, 18, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 13, 14, 15, 16, 17, 18}; VectorBatchVectorCwiseProductAccumulate(vector, kVectorSize, batch_vector.data(), kBatchSize, 1073741824, -1, batch_output.data()); const std::vector<int16_t> expected_output = { -35, 34, 32, 30, 27, 24, 20, 16, 11, -1, 10, 13, 16, 18, 19, 20, 21, 21, 20, 0, 4, 8, 12, 17, 23, 29, 35, 42, 50, 27, 24, 20, 18, 15, 14, 12, 12, 1, 2, 2, 6, 10, 15, 20, 26, 32, 39, 26, 9, 11, 13, 15, 18, 22, 26, 30, 35, 51, 11, 15, 4, 7, 8, 10, 10, 11, 10, 10, 8, 12, -6, 15, 14, 14, 12, 11, 8, 6, 27, 32, 46, 54, 61, 70, 78, 88, 97, 17, 21, 14, 17, 18, 20, 20, 21, 20, 20, 18, -7, 13, 14, 13, 13, 11, 10, 7, 5, 26, 31, 37, 56, 63, 72, 80, 90, 99}; CompareRoundingResults<int16_t>(4 * 29, expected_output.data(), batch_output.data(), 1, 1); } TEST(uKernels, VectorBatchVectorCwiseProductAccumulateFloat) { constexpr int kVectorSize = 29; constexpr int kBatchSize = 4; static float input[kVectorSize] = { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.10f, 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.20f, 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}; std::vector<float> output = { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.10f, 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.20f, 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f, -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.10f, -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.20f, -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f, 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.10f, 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.20f, 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f, -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.10f, -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.20f, -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}; VectorBatchVectorCwiseProductAccumulate(input, kVectorSize, output.data(), kBatchSize, output.data()); const std::vector<float> expected_output = { 2.31f, 7.04f, 14.19f, 23.76f, 35.75f, 50.16f, 66.99f, 86.24f, 107.91f, 112.11f, 134.5421f, 159.0144f, 185.5269f, 214.0796f, 244.6725f, 277.3056f, 311.9789f, 348.6924f, 387.4461f, 428.24f, 471.0741f, 515.9484f, 562.8629f, 611.8176f, 662.8125f, 715.8476f, 770.9229f, 828.0384f, 0.0f, -2.31f, -7.04f, -14.19f, -23.76f, -35.75f, -50.16f, -66.99f, -86.24f, -107.91f, -112.11f, -134.5421f, -159.0144f, -185.5269f, -214.0796f, -244.6725f, -277.3056f, -311.9789f, -348.6924f, -387.4461f, -428.24f, -471.0741f, -515.9484f, -562.8629f, -611.8176f, -662.8125f, -715.8476f, -770.9229f, -828.0384f, 0.0f, 2.31f, -7.04f, 14.19f, -23.76f, 35.75f, -50.16f, 66.99f, -86.24f, 107.91f, -112.11f, 134.5421f, -159.0144f, 185.5269f, -214.0796f, 244.6725f, -277.3056f, 311.9789f, -348.6924f, 387.4461f, -428.24f, 471.0741f, -515.9484f, 562.8629f, -611.8176f, 662.8125f, -715.8476f, 770.9229f, -828.0384f, 0.0f, -2.31f, 7.04f, -14.19f, 23.76f, -35.75f, 50.16f, -66.99f, 86.24f, -107.91f, 112.11f, -134.5421f, 159.0144f, -185.5269f, 214.0796f, -244.6725f, 277.3056f, -311.9789f, 348.6924f, -387.4461f, 428.24f, -471.0741f, 515.9484f, -562.8629f, 611.8176f, -662.8125f, 715.8476f, -770.9229f, 828.0384f, 0.0f}; EXPECT_THAT(output, testing::ElementsAreArray( ArrayFloatNear(expected_output, 6.5e-5f))); } TEST(uKernels, VectorBatchVectorCwiseProductNoAccumulate) { constexpr int kVectorSize = 29; constexpr int kBatchSize = 4; static float input[kVectorSize] = { 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13, 14.14, 15.15, 16.16, 17.17, 18.18, 19.19, 20.2, 21.21, 22.22, 23.23, 24.24, 25.25, 26.26, 27.27, 28.28, 0}; std::vector<float> output = { 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13, 14.14, 15.15, 16.16, 17.17, 18.18, 19.19, 20.2, 21.21, 22.22, 23.23, 24.24, 25.25, 26.26, 27.27, 28.28, 0, -1.1, -2.2, -3.3, -4.4, -5.5, -6.6, -7.7, -8.8, -9.9, -10.1, -11.11, -12.12, -13.13, -14.14, -15.15, -16.16, -17.17, -18.18, -19.19, -20.2, -21.21, -22.22, -23.23, -24.24, -25.25, -26.26, -27.27, -28.28, 0, 1.1, -2.2, 3.3, -4.4, 5.5, -6.6, 7.7, -8.8, 9.9, -10.1, 11.11, -12.12, 13.13, -14.14, 15.15, -16.16, 17.17, -18.18, 19.19, -20.2, 21.21, -22.22, 23.23, -24.24, 25.25, -26.26, 27.27, -28.28, 0, -1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12, -13.13, 14.14, -15.15, 16.16, -17.17, 18.18, -19.19, 20.2, -21.21, 22.22, -23.23, 24.24, -25.25, 26.26, -27.27, 28.28, 0}; VectorBatchVectorCwiseProduct(input, kVectorSize, output.data(), kBatchSize, output.data()); const std::vector<float> expected_output = { 1.210000, 4.840000, 10.889999, 19.360001, 30.250000, 43.559998, 59.289997, 77.440002, 98.009995, 102.010010, 123.432091, 146.894394, 172.396896, 199.939606, 229.522491, 261.145599, 294.808899, 330.512421, 368.256134, 408.040039, 449.864075, 493.728363, 539.632874, 587.577576, 637.562500, 689.587585, 743.652954, 799.758423, 0.000000, -1.210000, -4.840000, -10.889999, -19.360001, -30.250000, -43.559998, -59.289997, -77.440002, -98.009995, -102.010010, -123.432091, -146.894394, -172.396896, -199.939606, -229.522491, -261.145599, -294.808899, -330.512421, -368.256134, -408.040039, -449.864075, -493.728363, -539.632874, -587.577576, -637.562500, -689.587585, -743.652954, -799.758423, 0.000000, 1.210000, -4.840000, 10.889999, -19.360001, 30.250000, -43.559998, 59.289997, -77.440002, 98.009995, -102.010010, 123.432091, -146.894394, 172.396896, -199.939606, 229.522491, -261.145599, 294.808899, -330.512421, 368.256134, -408.040039, 449.864075, -493.728363, 539.632874, -587.577576, 637.562500, -689.587585, 743.652954, -799.758423, 0.000000, -1.210000, 4.840000, -10.889999, 19.360001, -30.250000, 43.559998, -59.289997, 77.440002, -98.009995, 102.010010, -123.432091, 146.894394, -172.396896, 199.939606, -229.522491, 261.145599, -294.808899, 330.512421, -368.256134, 408.040039, -449.864075, 493.728363, -539.632874, 587.577576, -637.562500, 689.587585, -743.652954, 799.758423, 0.000000}; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } TEST(uKernels, BatchVectorBatchVectorDotProductTest) { constexpr int kVectorSize = 5; constexpr int kBatch = 2; static float input1[kVectorSize * kBatch] = {0.0, -0.5, 1.0, -1.5, 2.0, -2.5, 3.0, -3.5, 4.0, -4.5}; static float input2[kVectorSize * kBatch] = {0.1, -0.1, 0.1, -0.1, 0.1, -0.1, 0.1, -0.1, 0.1, -0.1}; std::vector<float> output(kBatch); BatchVectorBatchVectorDotProduct(input1, input2, kVectorSize, kBatch, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({0.5, 1.75}))); } TEST(uKernels, BatchVectorBatchVectorDotProductIntegerTest) { constexpr int kVectorSize = 5; constexpr int kBatch = 2; static int16_t input1[kVectorSize * kBatch] = {0, 5, 10, -15, 20, -25, 30, -35, 40, -45}; static int16_t input2[kVectorSize * kBatch] = {1, -1, 1, -1, 1, -1, 1, -1, 1, 1}; std::vector<int32_t> output(kBatch); BatchVectorBatchVectorDotProduct(input1, input2, kVectorSize, kBatch, output.data()); EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({40, 85}))); } TEST(uKernels, ReductionSumVectorTest) { constexpr int kInputVectorSize = 10; constexpr int kOutputVectorSize1 = 5; constexpr int kReductionSize1 = 2; static float input[kInputVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0, 0.0, -0.5, 1.0, 1.0, 2.0}; std::vector<float> result1(kOutputVectorSize1); ReductionSumVector(input, result1.data(), kOutputVectorSize1, kReductionSize1); EXPECT_THAT(result1, ElementsAreArray(ArrayFloatNear({-0.5, -0.5, 2.0, 0.5, 3.0}))); constexpr int kOutputVectorSize2 = 2; constexpr int kReductionSize2 = 5; std::vector<float> result2(kOutputVectorSize2); ReductionSumVector(input, result2.data(), kOutputVectorSize2, kReductionSize2); EXPECT_THAT(result2, ElementsAreArray(ArrayFloatNear({1.0, 3.5}))); } TEST(uKernels, ReductionSumVectorIntegerTest) { constexpr int kInputVectorSize = 10; constexpr int kOutputVectorSize1 = 5; constexpr int kReductionSize1 = 2; static int32_t input[kInputVectorSize] = {1, 2, 1, 5, -3, 2, 1, 2, 5, 10}; std::vector<int32_t> result1(kOutputVectorSize1); ReductionSumVector(input, result1.data(), kOutputVectorSize1, kReductionSize1); EXPECT_THAT(result1, testing::ElementsAreArray({3, 6, -1, 3, 15})); } void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, const int8_t* recurrent, int8_t recurrent_zp, int32_t input_effective_scale_a, int32_t input_effective_scale_b, int32_t recurrent_effective_scale_a, int32_t recurrent_effective_scale_b, int32_t n_batch, int32_t n_cell, int16_t* output); TEST(uKernels, TwoGateSaturateAddTest) { const std::vector<int8_t> input1 = {1, 2, 3, 4, 55, 66, 77}; const std::vector<int8_t> input2 = {100, 2, 3, 4, 55, 66, 77}; const int32_t input1_zp = 10; const int32_t input2_zp = -5; const int32_t multiplier1 = 1347771520; const int32_t shift1 = -7; const int32_t multiplier2 = 1047577121; const int32_t shift2 = -6; std::vector<int16_t> output(7); TwoGateSaturatingAdd(input1.data(), input1_zp, input2.data(), input2_zp, multiplier1, shift1, multiplier2, shift2, 1, 7, output.data()); const std::vector<int16_t> expected_output = {1, 0, 0, 0, 0, 1, 1}; EXPECT_THAT(output, testing::ElementsAreArray(expected_output)); } namespace { class MeanStddevNormalizationTest : public testing::TestWithParam<std::tuple<float, float, float>> {}; } TEST_P(MeanStddevNormalizationTest, SeparateBatches) { const float mean = std::get<0>(GetParam()); const float diff = std::get<1>(GetParam()); const float tolerance = std::get<2>(GetParam()); constexpr int kVectorSize = 4; const float input[kVectorSize] = {mean - 2 * diff, mean - diff, mean + diff, mean + 2 * diff}; float output[kVectorSize]; MeanStddevNormalization(input, output, kVectorSize, 1); std::vector<float> expected_output; if (diff == 0.0f) { expected_output.assign({0.0f, 0.0f, 0.0f, 0.0f}); } else { const float ksqrt16 = std::sqrt(1.6f); const float ksqrt04 = std::sqrt(0.4f); expected_output.assign({-ksqrt16, -ksqrt04, ksqrt04, ksqrt16}); } EXPECT_THAT(output, testing::ElementsAreArray( ArrayFloatNear(expected_output, tolerance))); } INSTANTIATE_TEST_SUITE_P( uKernels, MeanStddevNormalizationTest, testing::Values( std::make_tuple(0.0f, 0.0f, 0.0f), std::make_tuple(0.0f, 0.01f, 2.53e-5f), std::make_tuple(0.0f, 100.0f, 1.20e-7f), std::make_tuple(0.01f, 0.0f, 0.0f), std::make_tuple(0.01f, 0.01f, 2.53e-5f), std::make_tuple(0.01f, 100.0f, 1.20e-7f), std::make_tuple(100.0f, 0.0f, 0.0f), std::make_tuple(100.0f, 0.01f, 1.81e-4f), std::make_tuple(100.0f, 100.0f, 1.20e-7f) )); TEST(uKernels, MeanStddevNormalizationAllBatches) { constexpr int kVectorSize = 4; constexpr int kBatchSize = 9; static float input[kVectorSize * kBatchSize] = { 0.0f, 0.0f, 0.0f, 0.0f, -0.02f, -0.01f, 0.01f, 0.02f, -200.0f, -100.0f, 100.0f, 200.0f, 0.01f, 0.01f, 0.01f, 0.01f, -0.01f, 0.0f, 0.02f, 0.03f, -199.99f, -99.99f, 100.01f, 200.01f, 100.0f, 100.0f, 100.0f, 100.0f, 99.98f, 99.99f, 100.01f, 100.02f, -100.0f, 0.0f, 200.0f, 300.0f, }; float output[kVectorSize * kBatchSize]; MeanStddevNormalization(input, output, kVectorSize, kBatchSize); const float ksqrt16 = std::sqrt(1.6f); const float ksqrt04 = std::sqrt(0.4f); const std::vector<float> expected_output = { 0.0f, 0.0f, 0.0f, 0.0f, -ksqrt16, -ksqrt04, ksqrt04, ksqrt16, -ksqrt16, -ksqrt04, ksqrt04, ksqrt16, 0.0f, 0.0f, 0.0f, 0.0f, -ksqrt16, -ksqrt04, ksqrt04, ksqrt16, -ksqrt16, -ksqrt04, ksqrt04, ksqrt16, 0.0f, 0.0f, 0.0f, 0.0f, -ksqrt16, -ksqrt04, ksqrt04, ksqrt16, -ksqrt16, -ksqrt04, ksqrt04, ksqrt16, }; EXPECT_THAT(output, testing::ElementsAreArray( ArrayFloatNear(expected_output, 1.81e-4f))); } TEST(uKernels, MeanStddevNormalizationLargeVector) { const float mean = 100.0f; const float diff = 1.0f; constexpr int kVectorSize = 16 * 16 + 16 + 1; float input[kVectorSize]; input[0] = mean; for (int i = 1; i < kVectorSize - 1; i += 2) { input[i + 0] = mean + diff; input[i + 1] = mean - diff; } float output[kVectorSize]; MeanStddevNormalization(input, output, kVectorSize, 1); float expected_output[kVectorSize]; expected_output[0] = 0.0; const float expected_elem = std::sqrt(static_cast<double>(kVectorSize) / static_cast<double>(kVectorSize - 1)); for (int i = 1; i < kVectorSize - 1; i += 2) { expected_output[i + 0] = +expected_elem; expected_output[i + 1] = -expected_elem; } EXPECT_THAT(output, testing::Pointwise(testing::FloatEq(), expected_output)); } TEST(uKernels, UnpackInt4Basic) { const int8_t input[2] = {0x38, static_cast<int8_t>(0xBE)}; const int8_t expected_output[4] = {-8, 3, -2, -5}; int8_t actual_output[4]; UnpackDenseInt4IntoInt8(input, 4, actual_output); EXPECT_THAT(actual_output, testing::Pointwise(testing::Eq(), expected_output)); } TEST(uKernels, UnpackInt4OddLength) { const int8_t input[2] = {0x21, 0x43}; const int8_t expected_output[3] = {1, 2, 3}; int8_t actual_output[3]; UnpackDenseInt4IntoInt8(input, 3, actual_output); EXPECT_THAT(actual_output, testing::Pointwise(testing::Eq(), expected_output)); } } } #ifdef DOTPROD_BENCHMARKS void BM_DotprodBatchOneMultiply(benchmark::State& state) { const int rows = state.range(0); const int cols = state.range(1); const int batch = state.range(2); const int copies = state.range(3); std::vector<tflite::tensor_utils::MatrixVectorData> datas; for (int i = 0; i < copies; i++) { datas.push_back( tflite::tensor_utils::SetupMatrixVectorData(rows, cols, batch)); } int copy = 0; for (auto _ : state) { copy = (copy + 1) % datas.size(); auto& data = datas[copy]; for (int i = 0; i < batch; i++) { tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( data.matrix.data(), data.rows, data.cols, data.vectors.data() + (data.cols * i), data.scale_factors.data(), 1, &data.results[0]); testing::DoNotOptimize(data.results[2]); } } } BENCHMARK(BM_DotprodBatchOneMultiply) ->Args({16, 16, 1, 1}) ->Args({16, 16, 4, 1}) ->Args({32, 32, 1, 1}) ->Args({32, 32, 4, 1}) ->Args({64, 64, 1, 1}) ->Args({64, 64, 4, 1}) ->Args({128, 128, 1, 1}) ->Args({128, 128, 4, 1}) ->Args({992, 992, 1, 1}) ->Args({992, 992, 8, 1}) ->Args({1024, 1024, 1, 1}) ->Args({1024, 1024, 1, 8}) ->Args({1024, 1024, 4, 1}) ->Args({1024, 1024, 4, 8}) ->Args({1024, 1024, 8, 1}) ->Args({640, 2048, 1, 1}) ->Args({640, 2048, 4, 1}) ->Args({640, 2048, 8, 1}) ->Args({640, 2048, 8, 8}) ->Args({2048, 2048, 1, 1}) ->Args({2048, 2048, 1, 8}) ->Args({2048, 2048, 8, 1}) ->Args({4096, 4096, 8, 1}) ->Args({4096, 4096, 1, 8}) ->Args({8192, 8192, 8, 1}) ->Args({8192, 8192, 1, 8}) ->Args({16384, 16384, 8, 1}) ->Args({16384, 16384, 1, 8}); void BM_DotprodBatchFourMultiply(benchmark::State& state) { const int rows = state.range(0); const int cols = state.range(1); const int batch = state.range(2); const int copies = state.range(3); std::vector<tflite::tensor_utils::MatrixVectorData> datas; for (int i = 0; i < copies; i++) { datas.push_back( tflite::tensor_utils::SetupMatrixVectorData(rows, cols, batch)); } int copy = 0; for (auto _ : state) { copy = (copy + 1) % datas.size(); auto& data = datas[copy]; tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( data.matrix.data(), data.rows, data.cols, data.vectors.data(), data.scale_factors.data(), data.batch, &data.results[0]); testing::DoNotOptimize(data.results[2]); } } BENCHMARK(BM_DotprodBatchFourMultiply) ->Args({16, 16, 4, 1}) ->Args({32, 32, 4, 1}) ->Args({64, 64, 4, 1}) ->Args({64, 256, 64, 1}) ->Args({64, 256, 256, 1}) ->Args({64, 256, 1024, 1}) ->Args({64, 256, 12544, 1}) ->Args({128, 128, 2, 1}) ->Args({128, 128, 3, 1}) ->Args({128, 128, 4, 1}) ->Args({128, 128, 5, 1}) ->Args({640, 640, 4, 1}) ->Args({992, 992, 8, 1}) ->Args({1024, 1024, 2, 1}) ->Args({1024, 1024, 3, 1}) ->Args({1024, 1024, 4, 1}) ->Args({1024, 1024, 5, 1}) ->Args({1024, 1024, 8, 1}) ->Args({1024, 1024, 8, 8}) ->Args({1024, 1024, 256, 1}) ->Args({640, 2048, 2, 1}) ->Args({640, 2048, 3, 1}) ->Args({640, 2048, 4, 1}) ->Args({640, 2048, 4, 8}) ->Args({640, 2048, 8, 1}) ->Args({2048, 2048, 3, 1}) ->Args({2048, 2048, 4, 1}) ->Args({2048, 2048, 4, 8}) ->Args({2048, 2048, 5, 1}) ->Args({2048, 2048, 8, 1}) ->Args({2048, 2048, 64, 1}) ->Args({2048, 2048, 1024, 1}) ->Args({4096, 4096, 1024, 1}) ->Args({8192, 8192, 1024, 1}) ->Args({8192, 8192, 1024, 8}) ->Args({16384, 16384, 1024, 1}) ->Args({16384, 8192, 1024, 1}); void BM_DotprodSparseMultiply(benchmark::State& state) { const int rows = state.range(0); const int cols = state.range(1); const int batch = state.range(2); const int copies = state.range(3); std::vector<tflite::tensor_utils::MatrixVectorData> datas; for (int i = 0; i < copies; i++) { datas.push_back( tflite::tensor_utils::SetupMatrixVectorData(rows, cols, batch)); } int copy = 0; for (auto _ : state) { copy = (copy + 1) % datas.size(); auto& data = datas[copy]; tflite::tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate( data.sparse_matrix.data(), data.ledger.data(), data.rows, data.cols, data.vectors.data(), data.scale_factors.data(), data.batch, &data.results[0]); testing::DoNotOptimize(data.results[2]); } } BENCHMARK(BM_DotprodSparseMultiply) ->Args({128, 128, 1, 1}) ->Args({128, 128, 4, 1}) ->Args({640, 640, 4, 1}) ->Args({992, 992, 8, 1}) ->Args({1024, 1024, 1, 1}) ->Args({1024, 1024, 4, 1}) ->Args({1024, 1024, 8, 1}) ->Args({640, 2048, 1, 1}) ->Args({640, 2048, 4, 1}) ->Args({640, 2048, 8, 1}) ->Args({2048, 2048, 1, 1}) ->Args({2048, 2048, 8, 1}); void BM_DotprodFloatMultiply(benchmark::State& state) { const int rows = state.range(0); const int cols = state.range(1); const int batch = state.range(2); std::vector<float> matrix(rows * cols); std::fill(matrix.begin(), matrix.end(), 1.0); std::vector<float> vector(cols * batch); std::fill(vector.begin(), vector.end(), 0.3); std::vector<float> output(rows * batch); for (auto _ : state) { std::fill(output.begin(), output.end(), 0.0); tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( matrix.data(), rows, cols, vector.data(), batch, output.data()); } } BENCHMARK(BM_DotprodFloatMultiply) ->Args({16, 16, 4}) ->Args({32, 32, 4}) ->Args({64, 64, 4}) ->Args({64, 256, 64}) ->Args({64, 256, 256}) ->Args({64, 256, 1024}) ->Args({64, 256, 12544}) ->Args({128, 128, 2}) ->Args({128, 128, 3}) ->Args({128, 128, 4}) ->Args({128, 128, 5}) ->Args({640, 640, 4}) ->Args({992, 992, 8}) ->Args({1024, 1024, 2}) ->Args({1024, 1024, 3}) ->Args({1024, 1024, 4}) ->Args({1024, 1024, 5}) ->Args({1024, 1024, 8}) ->Args({1024, 1024, 8}) ->Args({1024, 1024, 256}) ->Args({640, 2048, 2}) ->Args({640, 2048, 3}) ->Args({640, 2048, 4}) ->Args({640, 2048, 4}) ->Args({640, 2048, 8}) ->Args({2048, 2048, 3}) ->Args({2048, 2048, 4}) ->Args({2048, 2048, 4}) ->Args({2048, 2048, 5}) ->Args({2048, 2048, 8}); #endif
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/tensor_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/tensor_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
81c66f85-a1d8-463d-9347-1c571f71ce56
cpp
tensorflow/tensorflow
uniform_quantized_dot_ops
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops.cc
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h" #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h" namespace tensorflow { namespace { using tensorflow::errors::InvalidArgument; Status DotInputShapeValid(const TensorShape& lhs_shape, const TensorShape& rhs_shape) { if (lhs_shape.dims() != 2) { return InvalidArgument("lhs rank must be 2, but given lhs shape ", lhs_shape.DebugString()); } if (rhs_shape.dims() != 2) { return InvalidArgument("rhs rank must be 2, but given rhs shape ", rhs_shape.DebugString()); } if (lhs_shape.dim_size(1) != rhs_shape.dim_size(0)) { return InvalidArgument( "lhs.dim_size(1) and rhs.dim_size(0) must be equal, but given lhs " "shape ", lhs_shape.DebugString(), " and rhs shape ", rhs_shape.DebugString()); } return absl::OkStatus(); } template <typename Tlhs, typename Trhs, typename Tout, typename AccF, typename OutputF> void DotWithAccFunctionAndOutputFunction(const Tensor& lhs, const Tensor& rhs, Tensor& output, const AccF& acc_f, const OutputF& output_f) { const int64_t batches = output.dim_size(0); const int64_t output_depth = output.dim_size(1); const int64_t accum_depth = rhs.dim_size(0); const Tlhs* lhs_data = lhs.flat<Tlhs>().data(); const Trhs* rhs_data = rhs.flat<Trhs>().data(); Tout* output_data = output.flat<Tout>().data(); for (int64_t b = 0; b < batches; ++b) { for (int64_t out_c = 0; out_c < output_depth; ++out_c) { int32_t acc = 0; for (int64_t d = 0; d < accum_depth; ++d) { acc += acc_f(lhs_data[b * accum_depth + d], rhs_data[d * output_depth + out_c], b, out_c); } output_data[b * output_depth + out_c] = output_f(acc, b, out_c); } } } template <typename Tin, typename Tout> Status EvalLhsPerTensorAndRhsPerTensorQuantizedDot( const Tensor& lhs, const Tensor& rhs, float lhs_scale, int32_t lhs_zero_point, float rhs_scale, int32_t rhs_zero_point, float output_scale, int32_t output_zero_point, int output_quantization_min_val, int output_quantization_max_val, Tensor& output) { const double effective_multiplier = static_cast<double>(lhs_scale) * rhs_scale / output_scale; int32_t effective_quantized_multiplier; int effective_shift; TF_RETURN_IF_ERROR(QuantizeMultiplier( effective_multiplier, effective_quantized_multiplier, effective_shift)); DotWithAccFunctionAndOutputFunction<Tin, Tin, Tout>( lhs, rhs, output, [lhs_zero_point, rhs_zero_point](Tin lhs_val, Tin rhs_val, int64_t b, int64_t out_c) { return static_cast<Tout>( (static_cast<int32_t>(lhs_val) - lhs_zero_point) * (static_cast<int32_t>(rhs_val) - rhs_zero_point)); }, [effective_quantized_multiplier, effective_shift, output_zero_point, output_quantization_min_val, output_quantization_max_val](int32_t acc, int64_t b, int64_t out_c) { return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>( acc, effective_quantized_multiplier, effective_shift, 0, output_zero_point, output_quantization_min_val, output_quantization_max_val); }); return absl::OkStatus(); } template <typename Tin, typename Tout> Status EvalLhsPerTensorAndRhsPerChannelQuantizedDot( OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, float lhs_scale, int32_t lhs_zero_point, const Tensor& rhs_scales, const Tensor& rhs_zero_points, const Tensor& output_scales, const Tensor& output_zero_points, int output_quantization_min_val, int output_quantization_max_val, Tensor& output) { const int output_depth = output.dim_size(1); const float* rhs_scales_data = rhs_scales.flat<float>().data(); const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data(); Tensor effective_quantized_multipliers; TF_RETURN_IF_ERROR(context->allocate_temp(DT_INT32, rhs_scales.shape(), &effective_quantized_multipliers)); Tensor effective_shifts; TF_RETURN_IF_ERROR( context->allocate_temp(DT_INT32, rhs_scales.shape(), &effective_shifts)); int32_t* effective_quantized_multipliers_data = effective_quantized_multipliers.flat<int32_t>().data(); int32_t* effective_shifts_data = effective_shifts.flat<int32_t>().data(); const bool is_output_scales_scalar = output_scales.dims() == 0; if (!is_output_scales_scalar) { const float* output_scales_data = output_scales.flat<float>().data(); for (int64_t out_c = 0; out_c < output_depth; ++out_c) { const double effective_multiplier = static_cast<double>(lhs_scale) * rhs_scales_data[out_c] / output_scales_data[out_c]; TF_RETURN_IF_ERROR(QuantizeMultiplier( effective_multiplier, effective_quantized_multipliers_data[out_c], effective_shifts_data[out_c])); } } else { const float output_scale = output_scales.scalar<float>()(); for (int64_t out_c = 0; out_c < output_depth; ++out_c) { const double effective_multiplier = static_cast<double>(lhs_scale) * rhs_scales_data[out_c] / output_scale; TF_RETURN_IF_ERROR(QuantizeMultiplier( effective_multiplier, effective_quantized_multipliers_data[out_c], effective_shifts_data[out_c])); } } const int32_t* output_zero_points_data = output_zero_points.flat<int32_t>().data(); DotWithAccFunctionAndOutputFunction<Tin, Tin, Tout>( lhs, rhs, output, [lhs_zero_point, rhs_zero_points_data](Tin lhs_val, Tin rhs_val, int64_t b, int64_t out_c) { return (static_cast<int32_t>(lhs_val) - lhs_zero_point) * (static_cast<int32_t>(rhs_val) - rhs_zero_points_data[out_c]); }, [effective_quantized_multipliers_data, effective_shifts_data, output_zero_points_data, output_quantization_min_val, output_quantization_max_val, is_output_scales_scalar](int32_t acc, int64_t b, int64_t out_c) { return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>( acc, effective_quantized_multipliers_data[out_c], effective_shifts_data[out_c], 0, output_zero_points_data[is_output_scales_scalar ? 0 : out_c], output_quantization_min_val, output_quantization_max_val); }); return absl::OkStatus(); } template <typename Tlhs, typename Trhs> void EvalLhsPerBatchAndRhsPerTensorQuantizedDot( OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const Tensor& lhs_scales, const Tensor& lhs_zero_points, float rhs_scale, int32_t rhs_zero_point, Tensor& output) { const float* lhs_scales_data = lhs_scales.flat<float>().data(); const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data(); DotWithAccFunctionAndOutputFunction<Tlhs, Trhs, float>( lhs, rhs, output, [lhs_zero_points_data, rhs_zero_point](Tlhs lhs_val, Trhs rhs_val, int64_t b, int64_t out_c) { return (static_cast<int32_t>(lhs_val) - lhs_zero_points_data[b]) * (static_cast<int32_t>(rhs_val) - rhs_zero_point); }, [lhs_scales_data, rhs_scale](int32_t acc, int64_t b, int64_t out_c) { return acc * lhs_scales_data[b] * rhs_scale; }); } template <typename Tlhs, typename Trhs> void EvalLhsPerBatchAndRhsPerChannelQuantizedDot( const Tensor& lhs, const Tensor& rhs, const Tensor& lhs_scales, const Tensor& lhs_zero_points, const Tensor& rhs_scales, const Tensor& rhs_zero_points, Tensor& output) { const float* lhs_scales_data = lhs_scales.flat<float>().data(); const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data(); const float* rhs_scales_data = rhs_scales.flat<float>().data(); const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data(); DotWithAccFunctionAndOutputFunction<Tlhs, Trhs, float>( lhs, rhs, output, [lhs_zero_points_data, rhs_zero_points_data](Tlhs lhs_val, Trhs rhs_val, int64_t b, int64_t out_c) { return (static_cast<int32_t>(lhs_val) - lhs_zero_points_data[b]) * (static_cast<int32_t>(rhs_val) - rhs_zero_points_data[out_c]); }, [lhs_scales_data, rhs_scales_data](int32_t acc, int64_t b, int64_t out_c) { return acc * lhs_scales_data[b] * rhs_scales_data[out_c]; }); } template <typename Tin, typename Tout> Status EvalQuantizedDot(OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const Tensor& lhs_scales, const Tensor& lhs_zero_points, const Tensor& rhs_scales, const Tensor& rhs_zero_points, const Tensor& output_scales, const Tensor& output_zero_points, int output_quantization_min_val, int output_quantization_max_val, Tensor& output) { const float lhs_scale = lhs_scales.scalar<float>()(); const int32_t lhs_zero_point = lhs_zero_points.scalar<int32_t>()(); if (rhs_scales.dims() != 0) { return EvalLhsPerTensorAndRhsPerChannelQuantizedDot<Tin, Tout>( context, lhs, rhs, lhs_scale, lhs_zero_point, rhs_scales, rhs_zero_points, output_scales, output_zero_points, output_quantization_min_val, output_quantization_max_val, output); } else { const float rhs_scale = rhs_scales.scalar<float>()(); const int32_t rhs_zero_point = rhs_zero_points.scalar<int32_t>()(); const float output_scale = output_scales.scalar<float>()(); const int32_t output_zero_point = output_zero_points.scalar<int32_t>()(); return EvalLhsPerTensorAndRhsPerTensorQuantizedDot<Tin, Tout>( lhs, rhs, lhs_scale, lhs_zero_point, rhs_scale, rhs_zero_point, output_scale, output_zero_point, output_quantization_min_val, output_quantization_max_val, output); } } template <typename Trhs> Status EvalHybridDot(OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const Tensor& rhs_scales, const Tensor& rhs_zero_points, Tensor& output) { const int64_t batches = lhs.dim_size(0); Tensor lhs_quantized; TF_RETURN_IF_ERROR( context->allocate_temp(DT_QINT8, lhs.shape(), &lhs_quantized)); Tensor lhs_scales; TF_RETURN_IF_ERROR(context->allocate_temp(DT_FLOAT, {batches}, &lhs_scales)); Tensor lhs_zero_points; TF_RETURN_IF_ERROR( context->allocate_temp(DT_INT32, {batches}, &lhs_zero_points)); float* lhs_scales_data = lhs_scales.flat<float>().data(); int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data(); auto lhs_tensor = lhs.template tensor<float, 2>(); auto lhs_quantized_tensor = lhs_quantized.template tensor<qint8, 2>(); for (int64_t b = 0; b < batches; ++b) { TF_RETURN_IF_ERROR(AsymmetricQuantize( lhs_tensor.template chip<0>(b), -128, 127, lhs_scales_data[b], lhs_zero_points_data[b], lhs_quantized_tensor.template chip<0>(b))); } if (rhs_scales.dims() != 0) { EvalLhsPerBatchAndRhsPerChannelQuantizedDot<qint8, Trhs>( lhs_quantized, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, output); } else { EvalLhsPerBatchAndRhsPerTensorQuantizedDot<qint8, Trhs>( context, lhs_quantized, rhs, lhs_scales, lhs_zero_points, rhs_scales.scalar<float>()(), rhs_zero_points.scalar<int32_t>()(), output); } return absl::OkStatus(); } } template <typename Tin, typename Tout> class UniformQuantizedDotOp : public OpKernel { public: explicit UniformQuantizedDotOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES(context, (std::is_same<Tin, qint8>()), InvalidArgument("Unsupported lhs/rhs type.")); OP_REQUIRES(context, (std::is_same<Tout, qint32>()), InvalidArgument("Unsupported output type.")); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_min_val", &output_quantization_min_val_)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_max_val", &output_quantization_max_val_)); int lhs_quantization_axis; OP_REQUIRES_OK(context, context->GetAttr("lhs_quantization_axis", &lhs_quantization_axis)); OP_REQUIRES( context, (lhs_quantization_axis == -1), InvalidArgument("lhs_quantization_axis Attr must be -1 (per-tensor).")); int rhs_quantization_axis; OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis", &rhs_quantization_axis)); OP_REQUIRES(context, (rhs_quantization_axis == 1 || rhs_quantization_axis == -1), InvalidArgument("rhs_quantization_axis Attr must be 1 " "(per-channel) or -1 (per-tensor).")); int output_quantization_axis; OP_REQUIRES_OK(context, context->GetAttr("output_quantization_axis", &output_quantization_axis)); OP_REQUIRES( context, (output_quantization_axis == 1 || output_quantization_axis == -1), InvalidArgument("output_quantization_axis Attr must be 1 " "(per-channel) or -1 (per-tensor).")); } void Compute(OpKernelContext* context) override { const Tensor& lhs = context->input(0); const Tensor& rhs = context->input(1); const Tensor& lhs_scales = context->input(2); const Tensor& lhs_zero_points = context->input(3); const Tensor& rhs_scales = context->input(4); const Tensor& rhs_zero_points = context->input(5); const Tensor& output_scales = context->input(6); const Tensor& output_zero_points = context->input(7); OP_REQUIRES(context, (AllElementsPositive<float>(lhs_scales)), InvalidArgument("lhs scales elements must be all positive.")); OP_REQUIRES(context, (AllElementsPositive<float>(rhs_scales)), InvalidArgument("rhs scales elements must be all positive.")); OP_REQUIRES( context, (AllElementsPositive<float>(output_scales)), InvalidArgument("output scales elements must be all positive.")); OP_REQUIRES_OK(context, DotInputShapeValid(lhs.shape(), rhs.shape())); OP_REQUIRES( context, (lhs_scales.IsSameSize(lhs_zero_points) && lhs_scales.dims() == 0), InvalidArgument( "lhs scales/zero_points must be all scalar tensors. Given: ", lhs_scales.shape().DebugString(), lhs_zero_points.shape().DebugString())); OP_REQUIRES_OK(context, QuantizationAxisAndShapeValid( rhs.shape(), rhs_scales.shape(), rhs_zero_points.shape(), rhs_scales.dims() == 0 ? -1 : 1)); TensorShape output_shape({lhs.dim_size(0), rhs.dim_size(1)}); OP_REQUIRES_OK( context, QuantizationAxisAndShapeValid( output_shape, output_scales.shape(), output_zero_points.shape(), output_scales.dims() == 0 ? -1 : 1)); OP_REQUIRES( context, (rhs_scales.dims() > 0 || output_scales.dims() == 0), InvalidArgument( "If rhs is per-tensor quantized, output must be also per-tensor " "quantized. Given rhs scales and zero_points of shape ", rhs_scales.shape().DebugString(), " but given output scales and zero_points of shape ", output_scales.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({lhs.dim_size(0), rhs.dim_size(1)}), &output)); OP_REQUIRES_OK( context, EvalQuantizedDot<Tin, Tout>( context, lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, output_zero_points, output_quantization_min_val_, output_quantization_max_val_, *output)); } private: int output_quantization_min_val_; int output_quantization_max_val_; }; template <typename Tlhs, typename Trhs, typename Tout> class UniformQuantizedDotHybridOp : public OpKernel { public: explicit UniformQuantizedDotHybridOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES(context, (std::is_same<Tlhs, float>()), InvalidArgument("Unsupported lhs type.")); OP_REQUIRES(context, (std::is_same<Trhs, qint8>()), InvalidArgument("Unsupported rhs type.")); OP_REQUIRES(context, (std::is_same<Tout, float>()), InvalidArgument("Unsupported output type.")); int rhs_quantization_axis; OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis", &rhs_quantization_axis)); OP_REQUIRES(context, (rhs_quantization_axis == 1 || rhs_quantization_axis == -1), InvalidArgument("rhs_quantization_axis Attr must be 1 " "(per-channel) or -1 (per-tensor).")); } void Compute(OpKernelContext* context) override { const Tensor& lhs = context->input(0); const Tensor& rhs = context->input(1); const Tensor& rhs_scales = context->input(2); const Tensor& rhs_zero_points = context->input(3); OP_REQUIRES_OK(context, DotInputShapeValid(lhs.shape(), rhs.shape())); OP_REQUIRES_OK(context, QuantizationAxisAndShapeValid( rhs.shape(), rhs_scales.shape(), rhs_zero_points.shape(), rhs_scales.dims() == 0 ? -1 : 1)); OP_REQUIRES(context, AllElementsPositive<float>(rhs_scales), InvalidArgument("rhs scales elements must be all positive.")); Tensor* output = nullptr; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({lhs.dim_size(0), rhs.dim_size(1)}), &output)); OP_REQUIRES_OK(context, EvalHybridDot<Trhs>(context, lhs, rhs, rhs_scales, rhs_zero_points, *output)); } }; REGISTER_KERNEL_BUILDER(Name("UniformQuantizedDot") .Device(DEVICE_CPU) .TypeConstraint<qint8>("Tin") .TypeConstraint<qint32>("Tout"), UniformQuantizedDotOp<qint8, qint32>); REGISTER_KERNEL_BUILDER(Name("UniformQuantizedDotHybrid") .Device(DEVICE_CPU) .TypeConstraint<float>("Tlhs") .TypeConstraint<qint8>("Trhs") .TypeConstraint<float>("Tout"), UniformQuantizedDotHybridOp<float, qint8, float>); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { class UniformQuantizedDotTest : public OpsTestBase { protected: }; TEST_F(UniformQuantizedDotTest, PerTensorQuantized) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedDot") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", -128) .Attr("lhs_quantization_max_val", 127) .Attr("rhs_quantization_min_val", -128) .Attr("rhs_quantization_max_val", 127) .Attr("output_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("output_quantization_max_val", static_cast<int32_t>(2147483647)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4}); AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {0.25}); AddInputFromArray<int32>(TensorShape({}), {-20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-12, -8, -4, -4, 16, 36}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedDotTest, PerChannelQuantized) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedDot") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", -128) .Attr("lhs_quantization_max_val", 127) .Attr("rhs_quantization_min_val", -128) .Attr("rhs_quantization_max_val", 127) .Attr("rhs_quantization_axis", 1) .Attr("output_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("output_quantization_max_val", static_cast<int32_t>(2147483647)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4}); AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 4, 3, 4, 7, 6}); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); AddInputFromArray<float>(TensorShape({}), {0.25}); AddInputFromArray<int32>(TensorShape({}), {-20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-12, 4, -4, -4, 52, 36}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedDotTest, PerTensorQuantizedEffectiveMultiplierOne) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedDot") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", -128) .Attr("lhs_quantization_max_val", 127) .Attr("rhs_quantization_min_val", -128) .Attr("rhs_quantization_max_val", 127) .Attr("output_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("output_quantization_max_val", static_cast<int32_t>(2147483647)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4}); AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<int32>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {0.25}); AddInputFromArray<int32>(TensorShape({}), {-4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-2, -1, 0, 0, 5, 10}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedDotTest, PerChannelQuantizedEffectiveMultiplierOne) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedDot") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_QINT32) .Attr("lhs_quantization_min_val", -128) .Attr("lhs_quantization_max_val", 127) .Attr("rhs_quantization_min_val", -128) .Attr("rhs_quantization_max_val", 127) .Attr("rhs_quantization_axis", 1) .Attr("output_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("output_quantization_max_val", static_cast<int32_t>(2147483647)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4}); AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<int32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({3}), {0.5, 1.0, 0.5}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); AddInputFromArray<float>(TensorShape({3}), {0.25, 0.5, 0.25}); AddInputFromArray<int32>(TensorShape({3}), {4, 8, 4}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {6, 9, 8, 8, 7, 18}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedDotTest, HybridPerTensorQuantized) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedDotHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", -128) .Attr("rhs_quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2}), {-32.2, -12.1, 10.7, 11.6}); AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<float>(TensorShape({}), {2.0}); AddInputFromArray<int32>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected, {16.0, -72.6, -161.2, 25.0, 69.6, 114.2}); test::ExpectClose(expected, *GetOutput(0), 0.1, 0.01); } TEST_F(UniformQuantizedDotTest, HybridPerChannelQuantized) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedDotHybrid") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tlhs", DT_FLOAT) .Attr("Trhs", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("rhs_quantization_min_val", -128) .Attr("rhs_quantization_max_val", 127) .Attr("rhs_quantization_axis", 1) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 2}), {-32.2, -12.1, 10.7, 11.6}); AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0}); AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected, {16.0, 209.2, -161.2, 25.0, -39.2, 114.2}); test::ExpectClose(expected, *GetOutput(0), 0.1, 0.01); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8933e6bb-c4c3-4af6-9261-521952ef7498
cpp
tensorflow/tensorflow
uniform_quantized_add_op
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op.cc
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op_test.cc
#include <algorithm> #include <vector> #include "absl/algorithm/container.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h" #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h" namespace tensorflow { namespace { using errors::InvalidArgument; absl::StatusOr<TensorShape> CalculateOutputShape(const TensorShape& lhs_shape, const TensorShape& rhs_shape) { if (lhs_shape.dims() == 0) { return rhs_shape; } else if (rhs_shape.dims() == 0) { return lhs_shape; } std::vector<int64_t> reversed_output_shape; int l_dim = lhs_shape.dims() - 1; int r_dim = rhs_shape.dims() - 1; while (l_dim >= 0 || r_dim >= 0) { const int64_t l_dim_size = l_dim >= 0 ? lhs_shape.dim_size(l_dim) : 1; const int64_t r_dim_size = r_dim >= 0 ? rhs_shape.dim_size(r_dim) : 1; if (l_dim_size != 1 && r_dim_size != 1 && l_dim_size != r_dim_size) { return InvalidArgument("Cannot Add tensors of shapes: ", lhs_shape.DebugString(), rhs_shape.DebugString()); } reversed_output_shape.push_back(l_dim_size == 1 ? r_dim_size : l_dim_size); --l_dim; --r_dim; } absl::c_reverse(reversed_output_shape); TensorShape output_shape; TF_RETURN_IF_ERROR( TensorShape::BuildTensorShape(reversed_output_shape, &output_shape)); return output_shape; } template <typename T> void QuantizedAdd(const Tensor& lhs, const Tensor& rhs, const Tensor& output_zero_points, int output_quantization_min_val, int output_quantization_max_val, int lhs_quantization_axis, int rhs_quantization_axis, int output_quantizaiton_axis, Tensor& output) { const T* lhs_data = lhs.flat<T>().data(); const T* rhs_data = rhs.flat<T>().data(); T* output_data = output.flat<T>().data(); const int32* output_zero_points_data = output_zero_points.flat<int32>().data(); for (int64_t output_idx = 0; output_idx < output.NumElements(); ++output_idx) { int64_t output_idx_remain = output_idx; int64_t lhs_idx = 0; int64_t rhs_idx = 0; int64_t lhs_inner_dim_size = 1; int64_t rhs_inner_dim_size = 1; int64_t output_zero_points_idx_of_quantization_axis = 0; for (int output_dim = output.dims() - 1; output_dim >= 0; --output_dim) { const int64_t output_idx_of_dim = output_idx_remain % output.dim_size(output_dim); output_idx_remain /= output.dim_size(output_dim); if (output_quantizaiton_axis == output_dim) { output_zero_points_idx_of_quantization_axis = output_idx_of_dim; } const int lhs_dim = output_dim - (output.dims() - lhs.dims()); if (lhs_dim >= 0) { const int64_t lhs_idx_of_dim = lhs.dim_size(lhs_dim) == 1 ? 0 : output_idx_of_dim; lhs_idx += lhs_idx_of_dim * lhs_inner_dim_size; lhs_inner_dim_size *= lhs.dim_size(lhs_dim); } const int rhs_dim = output_dim - (output.dims() - rhs.dims()); if (rhs_dim >= 0) { const int64_t rhs_idx_of_dim = rhs.dim_size(rhs_dim) == 1 ? 0 : output_idx_of_dim; rhs_idx += rhs_idx_of_dim * rhs_inner_dim_size; rhs_inner_dim_size *= rhs.dim_size(rhs_dim); } } const int32_t output_zero_point = output_zero_points_data[output_zero_points_idx_of_quantization_axis]; const int32_t unclamped = static_cast<int32_t>(lhs_data[lhs_idx]) + static_cast<int32_t>(rhs_data[rhs_idx]) + output_zero_point; output_data[output_idx] = static_cast<T>(std::clamp( unclamped, output_quantization_min_val, output_quantization_max_val)); } } template <typename T> Status EvalQuantizedAdd(OpKernelContext* context, const Tensor& lhs, const Tensor& rhs, const Tensor& lhs_scales, const Tensor& lhs_zero_points, const Tensor& rhs_scales, const Tensor& rhs_zero_points, const Tensor& output_scales, const Tensor& output_zero_points, int output_quantization_min_val, int output_quantization_max_val, int lhs_quantization_axis, int rhs_quantization_axis, int output_quantization_axis, Tensor& output) { const DataType dtype = DataTypeToEnum<T>::v(); Tensor zeros_of_output_scales_shape; TF_RETURN_IF_ERROR(context->allocate_temp(DT_INT32, output_scales.shape(), &zeros_of_output_scales_shape)); zeros_of_output_scales_shape.flat<int32_t>().setZero(); Tensor lhs_requantized; TF_RETURN_IF_ERROR( context->allocate_temp(dtype, lhs.shape(), &lhs_requantized)); const int lhs_requantize_output_quantization_axis = output_quantization_axis == -1 ? -1 : lhs_quantization_axis; TF_RETURN_IF_ERROR(EvalRequantize<T, T>( context, lhs, lhs_scales, lhs_zero_points, output_scales, zeros_of_output_scales_shape, lhs_quantization_axis, lhs_requantize_output_quantization_axis, std::numeric_limits<T>::min(), std::numeric_limits<T>::max(), lhs_requantized)); Tensor rhs_requantized; TF_RETURN_IF_ERROR( context->allocate_temp(dtype, rhs.shape(), &rhs_requantized)); TF_RETURN_IF_ERROR(EvalRequantize<T, T>( context, rhs, rhs_scales, rhs_zero_points, output_scales, zeros_of_output_scales_shape, rhs_quantization_axis, output_quantization_axis, std::numeric_limits<T>::min(), std::numeric_limits<T>::max(), rhs_requantized)); QuantizedAdd<T>(lhs_requantized, rhs_requantized, output_zero_points, output_quantization_min_val, output_quantization_max_val, lhs_quantization_axis, rhs_quantization_axis, output_quantization_axis, output); return absl::OkStatus(); } } template <typename T> class UniformQuantizedAddOp : public OpKernel { public: explicit UniformQuantizedAddOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES(context, (std::is_same<T, qint32>()), InvalidArgument("Unsupported operand type.")); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_min_val", &output_quantization_min_val_)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_max_val", &output_quantization_max_val_)); OP_REQUIRES_OK(context, context->GetAttr("lhs_quantization_axis", &lhs_quantization_axis_)); OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis", &rhs_quantization_axis_)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_axis", &output_quantization_axis_)); OP_REQUIRES( context, (lhs_quantization_axis_ >= -1 && rhs_quantization_axis_ >= -1 && output_quantization_axis_ >= -1), InvalidArgument("lhs, rhs and output quantization_axis must be -1 or " "within [0, dims)")); } void Compute(OpKernelContext* context) override { const Tensor& lhs = context->input(0); const Tensor& rhs = context->input(1); const Tensor& lhs_scales = context->input(2); const Tensor& lhs_zero_points = context->input(3); const Tensor& rhs_scales = context->input(4); const Tensor& rhs_zero_points = context->input(5); const Tensor& output_scales = context->input(6); const Tensor& output_zero_points = context->input(7); OP_REQUIRES_OK( context, QuantizationAxisAndShapeValid(lhs.shape(), lhs_scales.shape(), lhs_zero_points.shape(), lhs_quantization_axis_)); OP_REQUIRES_OK( context, QuantizationAxisAndShapeValid(rhs.shape(), rhs_scales.shape(), rhs_zero_points.shape(), rhs_quantization_axis_)); auto output_shape_status = CalculateOutputShape(lhs.shape(), rhs.shape()); OP_REQUIRES_OK(context, output_shape_status.status()); const auto& output_shape = output_shape_status.value(); OP_REQUIRES_OK(context, QuantizationAxisAndShapeValid( output_shape, output_scales.shape(), output_zero_points.shape(), output_quantization_axis_)); OP_REQUIRES( context, (!(lhs_quantization_axis_ >= 0 && output_quantization_axis_ >= 0) || (lhs.dims() - lhs_quantization_axis_ == output_shape.dims() - output_quantization_axis_)), InvalidArgument("If lhs and output is both per-axis quantized, the " "quantization axis must match.")); OP_REQUIRES( context, (!(rhs_quantization_axis_ >= 0 && output_quantization_axis_ >= 0) || (rhs.dims() - rhs_quantization_axis_ == output_shape.dims() - output_quantization_axis_)), InvalidArgument("If rhs and output is both per-axis quantized, the " "quantization axis must match.")); OP_REQUIRES(context, (AllElementsPositive<float>(lhs_scales) && AllElementsPositive<float>(rhs_scales) && AllElementsPositive<float>(output_scales)), InvalidArgument( "lhs/rhs/output scales elements must be all positive.")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); OP_REQUIRES_OK( context, EvalQuantizedAdd<T>( context, lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales, rhs_zero_points, output_scales, output_zero_points, output_quantization_min_val_, output_quantization_max_val_, lhs_quantization_axis_, rhs_quantization_axis_, output_quantization_axis_, *output)); } private: int lhs_quantization_axis_; int rhs_quantization_axis_; int output_quantization_axis_; int output_quantization_min_val_; int output_quantization_max_val_; }; REGISTER_KERNEL_BUILDER( Name("UniformQuantizedAdd").Device(DEVICE_CPU).TypeConstraint<qint32>("T"), UniformQuantizedAddOp<qint32>); }
#include <limits> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { namespace { constexpr int32_t kInt32Min = std::numeric_limits<int32_t>::min(); constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max(); } class UniformQuantizedAddOpTest : public OpsTestBase { protected: }; TEST_F(UniformQuantizedAddOpTest, InvalidShape) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", 1) .Attr("rhs_quantization_axis", 0) .Attr("output_quantization_axis", 1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({2}), {-100, 0}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 4}); AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20}); AddInputFromArray<float>(TensorShape({2}), {2, 3}); AddInputFromArray<int32>(TensorShape({2}), {0, 0}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 4}); AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40}); EXPECT_TRUE(absl::IsInvalidArgument(RunOpKernel())); } TEST_F(UniformQuantizedAddOpTest, PerChannelSameScale) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", 1) .Attr("rhs_quantization_axis", 0) .Attr("output_quantization_axis", 1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 4}); AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 4}); AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 4}); AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-126, -4, 118, -120, 2, 124}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleLhsMultiDims) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-126, -24, 78, -120, -18, 84}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleRhsMultiDims) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100}); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-126, -24, 78, -120, -18, 84}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerChannelDifferentScale) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", 1) .Attr("rhs_quantization_axis", 0) .Attr("output_quantization_axis", 1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 1}); AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20}); AddInputFromArray<float>(TensorShape({3}), {1, 3, 2}); AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0}); AddInputFromArray<float>(TensorShape({3}), {4, 3, 2}); AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-58, -4, 129, -55, 2, 132}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerChannelDifferentScaleBroadcastLhs) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", 1) .Attr("rhs_quantization_axis", 1) .Attr("output_quantization_axis", 1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({1, 3}), {-100, 0, 100}); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<float>(TensorShape({3}), {1, 3, 2}); AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 1}); AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20}); AddInputFromArray<float>(TensorShape({3}), {4, 3, 2}); AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-58, -4, 129, -55, 2, 132}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerTensorDifferentScale) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {1}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {4}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-58, -32, -6, -55, -29, -3}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleTensorAddScalar) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({}), {-100}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-126, -124, -122, -120, -118, -116}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleScalarAddTensor) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({}), {-100}); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-126, -124, -122, -120, -118, -116}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleScalarAddScalar) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({}), {-6}); AddInputFromArray<qint32>(TensorShape({}), {-100}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({})); test::FillValues<qint32>(&expected, {-126}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, TensorAddEmptyTensor) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 1, 1}), {-6, -12}); AddInputFromArray<qint32>(TensorShape({2, 0, 1}), {}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 0, 1})); test::FillValues<qint32>(&expected, {}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedAddOpTest, ScalarAddEmptyTensor) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("lhs_quantization_axis", -1) .Attr("rhs_quantization_axis", -1) .Attr("output_quantization_axis", -1) .Attr("lhs_quantization_min_val", kInt32Min) .Attr("lhs_quantization_max_val", kInt32Max) .Attr("rhs_quantization_min_val", kInt32Min) .Attr("rhs_quantization_max_val", kInt32Max) .Attr("output_quantization_min_val", kInt32Min) .Attr("output_quantization_max_val", kInt32Max) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({}), {-6}); AddInputFromArray<qint32>(TensorShape({2, 0, 1}), {}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-40}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 0, 1})); test::FillValues<qint32>(&expected, {}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0d80ba27-9a46-4b16-9ac8-5a387fe5246a
cpp
tensorflow/tensorflow
uniform_requantize_op
tensorflow/core/kernels/uniform_quant_ops/uniform_requantize_op.cc
tensorflow/core/kernels/uniform_quant_ops/uniform_requantize_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h" #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { using tensorflow::errors::InvalidArgument; template <typename Tin, typename Tout> class UniformRequantizeOp : public OpKernel { public: explicit UniformRequantizeOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES(context, (std::is_same<Tin, qint32>() || std::is_same<Tin, qint8>()), InvalidArgument("Unsupported input type.")); OP_REQUIRES(context, (std::is_same<Tout, qint8>()), InvalidArgument("Unsupported output type.")); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_min_val", &output_quantization_min_val_)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_max_val", &output_quantization_max_val_)); OP_REQUIRES_OK(context, context->GetAttr("input_quantization_axis", &input_quantization_axis_)); OP_REQUIRES_OK(context, context->GetAttr("output_quantization_axis", &output_quantization_axis_)); OP_REQUIRES( context, (input_quantization_axis_ >= -1), InvalidArgument("input_quantization_axis must be >= -1, given: ", input_quantization_axis_)); OP_REQUIRES( context, (output_quantization_axis_ >= -1), InvalidArgument("output_quantization_axis must be >= -1, given: ", output_quantization_axis_)); OP_REQUIRES( context, (!(input_quantization_axis_ >= 0 && output_quantization_axis_ >= 0) || input_quantization_axis_ == output_quantization_axis_), InvalidArgument("If input and output is both per-axis quantized, the " "quantization axis must be same.")); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& input_scales = context->input(1); const Tensor& input_zero_points = context->input(2); const Tensor& output_scales = context->input(3); const Tensor& output_zero_points = context->input(4); OP_REQUIRES_OK(context, (QuantizationAxisAndShapeValid( input.shape(), input_scales.shape(), input_zero_points.shape(), input_quantization_axis_))); OP_REQUIRES_OK(context, (QuantizationAxisAndShapeValid( input.shape(), output_scales.shape(), output_zero_points.shape(), output_quantization_axis_))); OP_REQUIRES( context, (AllElementsPositive<float>(input_scales) && AllElementsPositive<float>(output_scales)), InvalidArgument("input/output scales elements must be all positive.")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); OP_REQUIRES_OK( context, EvalRequantize<Tin, Tout>( context, input, input_scales, input_zero_points, output_scales, output_zero_points, input_quantization_axis_, output_quantization_axis_, output_quantization_min_val_, output_quantization_max_val_, *output)); } private: int input_quantization_axis_; int32_t input_quantization_min_val_; int32_t input_quantization_max_val_; int output_quantization_axis_; int32_t output_quantization_min_val_; int32_t output_quantization_max_val_; }; REGISTER_KERNEL_BUILDER(Name("UniformRequantize") .Device(DEVICE_CPU) .TypeConstraint<qint8>("Tin") .TypeConstraint<qint8>("Tout"), UniformRequantizeOp<qint8, qint8>); REGISTER_KERNEL_BUILDER(Name("UniformRequantize") .Device(DEVICE_CPU) .TypeConstraint<qint32>("Tin") .TypeConstraint<qint8>("Tout"), UniformRequantizeOp<qint32, qint8>); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { class UniformRequantizeOpTest : public OpsTestBase { protected: }; TEST_F(UniformRequantizeOpTest, RequantizeInvalidQuantizationAxis) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformRequantize") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT32) .Attr("Tout", DT_QINT8) .Attr("input_quantization_axis", -2) .Attr("input_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("input_quantization_max_val", static_cast<int32_t>(2147483647)) .Attr("output_quantization_min_val", -127) .Attr("output_quantization_max_val", 127) .Finalize(node_def())); EXPECT_TRUE(absl::IsInvalidArgument(InitOp())); TF_ASSERT_OK( NodeDefBuilder("test", "UniformRequantize") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT32) .Attr("Tout", DT_QINT8) .Attr("input_quantization_axis", 0) .Attr("output_quantization_axis", 1) .Attr("input_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("input_quantization_max_val", static_cast<int32_t>(2147483647)) .Attr("output_quantization_min_val", -127) .Attr("output_quantization_max_val", 127) .Finalize(node_def())); EXPECT_TRUE(absl::IsInvalidArgument(InitOp())); TF_ASSERT_OK( NodeDefBuilder("test", "UniformRequantize") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT32) .Attr("Tout", DT_QINT8) .Attr("input_quantization_axis", 2) .Attr("input_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("input_quantization_max_val", static_cast<int32_t>(2147483647)) .Attr("output_quantization_min_val", -127) .Attr("output_quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<int32>(TensorShape({}), {0}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<int32>(TensorShape({}), {0}); EXPECT_TRUE(absl::IsInvalidArgument(RunOpKernel())); } TEST_F(UniformRequantizeOpTest, PerTensorToPerTensorReQuantize) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformRequantize") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT32) .Attr("Tout", DT_QINT8) .Attr("input_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("input_quantization_max_val", static_cast<int32_t>(2147483647)) .Attr("output_quantization_min_val", -127) .Attr("output_quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-28, -21, -1, 0, 4, 9}); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<int32>(TensorShape({}), {-1}); AddInputFromArray<float>(TensorShape({}), {0.125}); AddInputFromArray<int32>(TensorShape({}), {-20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3})); test::FillValues<qint8>(&expected, {-127, -100, -20, -16, 0, 20}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } TEST_F(UniformRequantizeOpTest, PerChannelToPerTensorReQuantize) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformRequantize") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT32) .Attr("Tout", DT_QINT8) .Attr("input_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("input_quantization_max_val", static_cast<int32_t>(2147483647)) .Attr("input_quantization_axis", 0) .Attr("output_quantization_axis", -1) .Attr("output_quantization_min_val", -127) .Attr("output_quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-28, -21, -1, -1, 3, 8}); AddInputFromArray<float>(TensorShape({2}), {0.5, 0.6}); AddInputFromArray<int32>(TensorShape({2}), {-1, -2}); AddInputFromArray<float>(TensorShape({}), {0.125}); AddInputFromArray<int32>(TensorShape({}), {-20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3})); test::FillValues<qint8>(&expected, {-127, -100, -20, -15, 4, 28}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } TEST_F(UniformRequantizeOpTest, PerTensorToPerChannelReQuantize) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformRequantize") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT32) .Attr("Tout", DT_QINT8) .Attr("input_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("input_quantization_max_val", static_cast<int32_t>(2147483647)) .Attr("input_quantization_axis", -1) .Attr("output_quantization_axis", 0) .Attr("output_quantization_min_val", -127) .Attr("output_quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-28, -21, -1, -1, 3, 8}); AddInputFromArray<float>(TensorShape({}), {0.5}); AddInputFromArray<int32>(TensorShape({}), {-1}); AddInputFromArray<float>(TensorShape({2}), {0.125, 0.3}); AddInputFromArray<int32>(TensorShape({2}), {-20, -10}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3})); test::FillValues<qint8>(&expected, {-127, -100, -20, -10, -3, 5}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } TEST_F(UniformRequantizeOpTest, PerChannelToPerChannelReQuantize) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformRequantize") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT32) .Attr("Tout", DT_QINT8) .Attr("input_quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("input_quantization_max_val", static_cast<int32_t>(2147483647)) .Attr("input_quantization_axis", 0) .Attr("output_quantization_axis", 0) .Attr("output_quantization_min_val", -127) .Attr("output_quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-28, -21, -1, -1, 3, 8}); AddInputFromArray<float>(TensorShape({2}), {0.5, 0.6}); AddInputFromArray<int32>(TensorShape({2}), {-1, -2}); AddInputFromArray<float>(TensorShape({2}), {0.125, 0.3}); AddInputFromArray<int32>(TensorShape({2}), {-20, -10}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3})); test::FillValues<qint8>(&expected, {-127, -100, -20, -8, 0, 10}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_requantize_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_requantize_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ce3edf87-4a36-4188-91f1-aa63e44f2bfb
cpp
tensorflow/tensorflow
uniform_quantized_clip_by_value_op
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_clip_by_value_op.cc
tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_clip_by_value_op_test.cc
#include <algorithm> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { using errors::InvalidArgument; template <typename T> void EvalQuantizedClipByValue(const Tensor& operand, const Tensor& min, const Tensor& max, int quantization_axis, Tensor& output) { if (quantization_axis >= 0) { auto operand_tensor = operand.template flat_inner_outer_dims<T, 3>(quantization_axis - 1); auto output_tensor = output.template flat_inner_outer_dims<T, 3>(quantization_axis - 1); auto min_tensor = min.flat<T>(); auto max_tensor = max.flat<T>(); const int64_t quantization_dim_size = operand.dim_size(quantization_axis); for (int i = 0; i < quantization_dim_size; ++i) { output_tensor.template chip<1>(i) = operand_tensor.template chip<1>(i) .cwiseMax(min_tensor(i)) .cwiseMin(max_tensor(i)); } } else { output.flat<T>() = operand.flat<T>() .cwiseMax(min.scalar<T>()()) .cwiseMin(max.scalar<T>()()); } } } template <typename T> class UniformQuantizedClipByValueOp : public OpKernel { public: explicit UniformQuantizedClipByValueOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES(context, (std::is_same<T, qint32>()), InvalidArgument("Unsupported operand type.")); OP_REQUIRES_OK(context, context->GetAttr("quantization_axis", &quantization_axis_)); } void Compute(OpKernelContext* context) override { const Tensor& operand = context->input(0); const Tensor& min = context->input(1); const Tensor& max = context->input(2); const Tensor& scales = context->input(3); const Tensor& zero_points = context->input(4); OP_REQUIRES_OK(context, QuantizationAxisAndShapeValid( operand.shape(), scales.shape(), zero_points.shape(), quantization_axis_)); OP_REQUIRES(context, (min.IsSameSize(scales)), InvalidArgument("Input min shape must be same as " "scales/zero_points. Given min of shape ", min.shape().DebugString(), " and scales/zero_points of shape ", scales.shape().DebugString())); OP_REQUIRES(context, (max.IsSameSize(scales)), InvalidArgument("Input max shape must be same as " "scales/zero_points. Given max of shape ", max.shape().DebugString(), " and scales/zero_points of shape ", scales.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, operand.shape(), &output)); EvalQuantizedClipByValue<T>(operand, min, max, quantization_axis_, *output); } private: int quantization_axis_; }; REGISTER_KERNEL_BUILDER(Name("UniformQuantizedClipByValue") .Device(DEVICE_CPU) .TypeConstraint<qint32>("T"), UniformQuantizedClipByValueOp<qint32>); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { class UniformQuantizedClipByValueOpTest : public OpsTestBase { protected: }; TEST_F(UniformQuantizedClipByValueOpTest, PerChannel) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedClipByValue") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("quantization_axis", 1) .Attr("quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("quantization_max_val", static_cast<int32_t>(2147483647)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({3}), {-1, -5, -1}); AddInputFromArray<qint32>(TensorShape({3}), {1, 1, 5}); AddInputFromArray<float>(TensorShape({3}), {2, 3, 4}); AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-1, -4, -1, 0, 1, 4}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } TEST_F(UniformQuantizedClipByValueOpTest, PerTensor) { TF_ASSERT_OK( NodeDefBuilder("test", "UniformQuantizedClipByValue") .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_QINT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_QINT32) .Attr("quantization_axis", -1) .Attr("quantization_min_val", static_cast<int32_t>(-2147483648)) .Attr("quantization_max_val", static_cast<int32_t>(2147483647)) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4}); AddInputFromArray<qint32>(TensorShape({}), {-1}); AddInputFromArray<qint32>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {2}); AddInputFromArray<int32>(TensorShape({}), {-20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3})); test::FillValues<qint32>(&expected, {-1, -1, -1, 0, 1, 1}); test::ExpectTensorEqual<qint32>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_clip_by_value_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_clip_by_value_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6024a006-606f-4cb0-881b-383587845c64
cpp
tensorflow/tensorflow
uniform_quantize_op
tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op.cc
tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h" #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { using tensorflow::errors::InvalidArgument; template <typename Tin, typename Tout> void EvalPerTensorQuantize(const Tensor& input, float scale, int32_t zero_point, int32_t quantization_min_val, int32_t quantization_max_val, Tensor& output) { const float inv_scale = 1.0f / scale; AffineQuantize(input.flat<Tin>(), inv_scale, zero_point, quantization_min_val, quantization_max_val, output.flat<Tout>()); } template <typename Tin, typename Tout> void EvalPerChannelQuantize(const Tensor& input, const Tensor& scales, const Tensor& zero_points, int quantization_axis, int32_t quantization_min_val, int32_t quantization_max_val, Tensor& output) { DCHECK(input.IsSameSize(output)); const float* scales_data = scales.flat<float>().data(); const int32_t* zero_points_data = zero_points.flat<int32_t>().data(); auto input_tensor = input.template flat_inner_outer_dims<Tin, 3>(quantization_axis - 1); auto output_tensor = output.template flat_inner_outer_dims<Tout, 3>(quantization_axis - 1); for (int i = 0; i < output.dim_size(quantization_axis); ++i) { const float inv_scale = 1.0f / scales_data[i]; AffineQuantize(input_tensor.template chip<1>(i), inv_scale, zero_points_data[i], quantization_min_val, quantization_max_val, output_tensor.template chip<1>(i)); } } template <typename Tin, typename Tout> void EvalQuantize(const Tensor& input, const Tensor& scales, const Tensor& zero_points, int quantization_axis, int32_t quantization_min_val, int32_t quantization_max_val, Tensor& output) { if (quantization_axis >= 0) { EvalPerChannelQuantize<Tin, Tout>(input, scales, zero_points, quantization_axis, quantization_min_val, quantization_max_val, output); } else { EvalPerTensorQuantize<Tin, Tout>( input, scales.scalar<float>()(), zero_points.scalar<int32>()(), quantization_min_val, quantization_max_val, output); } } } class UniformQuantizeOp : public OpKernel { public: explicit UniformQuantizeOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("Tin", &tin_)); OP_REQUIRES(context, tin_ == DataType::DT_FLOAT, InvalidArgument("Unsupported input type.")); OP_REQUIRES_OK(context, context->GetAttr("Tout", &tout_)); OP_REQUIRES(context, tout_ == DataType::DT_QINT8 || tout_ == DataType::DT_QINT32, InvalidArgument("Unsupported output type.")); OP_REQUIRES_OK(context, context->GetAttr("quantization_min_val", &quantization_min_val_)); OP_REQUIRES_OK(context, context->GetAttr("quantization_max_val", &quantization_max_val_)); OP_REQUIRES_OK(context, context->GetAttr("quantization_axis", &quantization_axis_)); OP_REQUIRES(context, (quantization_axis_ >= -1), InvalidArgument("quantization_axis must be >= -1, given: ", quantization_axis_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& scales = context->input(1); const Tensor& zero_points = context->input(2); OP_REQUIRES_OK(context, (QuantizationAxisAndShapeValid( input.shape(), scales.shape(), zero_points.shape(), quantization_axis_))); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); if (tout_ == DataType::DT_QINT8) { EvalQuantize<float, qint8>(input, scales, zero_points, quantization_axis_, quantization_min_val_, quantization_max_val_, *output); } else { EvalQuantize<float, qint32>(input, scales, zero_points, quantization_axis_, quantization_min_val_, quantization_max_val_, *output); } } private: DataType tin_, tout_; int quantization_axis_; int32_t quantization_min_val_; int32_t quantization_max_val_; }; REGISTER_KERNEL_BUILDER(Name("UniformQuantize") .Device(DEVICE_CPU) .TypeConstraint<float>("Tin") .TypeConstraint("Tout", {DT_QINT8, DT_QINT32}), UniformQuantizeOp); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { class UniformQuantizeOpsTest : public OpsTestBase { protected: }; TEST_F(UniformQuantizeOpsTest, QuantizeInvalidQuantizationAxis) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_FLOAT) .Attr("Tout", DT_QINT8) .Attr("quantization_axis", -2) .Attr("quantization_min_val", -127) .Attr("quantization_max_val", 127) .Finalize(node_def())); EXPECT_TRUE(absl::IsInvalidArgument(InitOp())); TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_FLOAT) .Attr("Tout", DT_QINT8) .Attr("quantization_axis", 2) .Attr("quantization_min_val", -127) .Attr("quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0}); AddInputFromArray<float>(TensorShape({}), {1.0}); AddInputFromArray<int32>(TensorShape({}), {0}); EXPECT_TRUE(absl::IsInvalidArgument(RunOpKernel())); } TEST_F(UniformQuantizeOpsTest, PerTensorQuantize) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_FLOAT) .Attr("Tout", DT_QINT8) .Attr("quantization_axis", -1) .Attr("quantization_min_val", -127) .Attr("quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 3}), {-27.0, -20.0, 0.0, 1.0, 5.0, 10.0}); AddInputFromArray<float>(TensorShape({}), {0.25}); AddInputFromArray<int32>(TensorShape({}), {-20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3})); test::FillValues<qint8>(&expected, {-127, -100, -20, -16, 0, 20}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } TEST_F(UniformQuantizeOpsTest, PerChannelQuantize) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantize") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_FLOAT) .Attr("Tout", DT_QINT8) .Attr("quantization_axis", 0) .Attr("quantization_min_val", -127) .Attr("quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<float>(TensorShape({2, 3}), {-27.0, -20.0, 0.0, 1.0, 5.0, 10.0}); AddInputFromArray<float>(TensorShape({2}), {0.25, 0.5}); AddInputFromArray<int32>(TensorShape({2}), {-20, -10}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_QINT8, TensorShape({2, 3})); test::FillValues<qint8>(&expected, {-127, -100, -20, -8, 0, 10}); test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantize_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8133875b-a6c3-4187-b888-19f5be8d3a03
cpp
tensorflow/tensorflow
math_utils
tensorflow/compiler/mlir/quantization/stablehlo/utils/math_utils.cc
tensorflow/compiler/mlir/quantization/stablehlo/utils/math_utils_test.cc
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/math_utils.h" #include <cmath> #include <cstdint> #include "mlir/Support/LogicalResult.h" namespace mlir::quant::stablehlo { LogicalResult QuantizeMultiplier(double double_multiplier, int32_t& quantized_fraction, int32_t& shift) { if (!std::isfinite(double_multiplier) || double_multiplier <= 0) { return failure(); } const double fraction = std::frexp(double_multiplier, &shift); quantized_fraction = static_cast<int32_t>(std::round(fraction * (1L << 15))); if (quantized_fraction == (1L << 15)) { quantized_fraction /= 2; ++shift; } if (shift < -15) { shift = 0; quantized_fraction = 0; } if (shift > 14) { shift = 14; quantized_fraction = (1LL << 15) - 1; } return success(); } }
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/math_utils.h" #include <gtest/gtest.h> #include "mlir/Support/LogicalResult.h" namespace mlir::quant::stablehlo { namespace { TEST(UtilsTest, QuantizeMultiplierNormalMultipliers) { int32_t quantized_fraction; int32_t shift; EXPECT_TRUE(succeeded(QuantizeMultiplier(1.2, quantized_fraction, shift))); EXPECT_EQ(quantized_fraction, 19661); EXPECT_EQ(shift, 1); EXPECT_TRUE(succeeded(QuantizeMultiplier(15.5, quantized_fraction, shift))); EXPECT_EQ(quantized_fraction, 31744); EXPECT_EQ(shift, 4); EXPECT_TRUE(succeeded(QuantizeMultiplier(1, quantized_fraction, shift))); EXPECT_EQ(quantized_fraction, 16384); EXPECT_EQ(shift, 1); } TEST(UtilsTest, QuantizeMultiplierExtremeMultipliers) { int32_t quantized_fraction; int32_t shift; EXPECT_TRUE( succeeded(QuantizeMultiplier(0.00001f, quantized_fraction, shift))); EXPECT_EQ(quantized_fraction, 0); EXPECT_EQ(shift, 0); EXPECT_TRUE(succeeded(QuantizeMultiplier(40000, quantized_fraction, shift))); EXPECT_EQ(quantized_fraction, 32767); EXPECT_EQ(shift, 14); } TEST(UtilsTest, QuantizeMultiplierInvalidArgument) { int32_t quantized_fraction; int32_t shift; EXPECT_FALSE(succeeded(QuantizeMultiplier(0, quantized_fraction, shift))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/math_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/math_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2990a7a8-f107-4a38-9baa-a95ddb80a44f
cpp
tensorflow/tensorflow
uniform_dequantize_op
tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op.cc
tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op_test.cc
#include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h" #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h" namespace tensorflow { namespace { using tensorflow::errors::InvalidArgument; template <typename Tin, typename Tout> void EvalPerTensorDequantize(const Tensor& input, float scale, int32_t zero_point, Tensor& output) { DCHECK(input.IsSameSize(output)); AffineDequantize(input.flat<Tin>(), scale, zero_point, output.flat<Tout>()); } template <typename Tin, typename Tout> void EvalPerChannelDequantize(const Tensor& input, const Tensor& scales, const Tensor& zero_points, int quantization_axis, Tensor& output) { DCHECK(input.IsSameSize(output)); const float* scales_data = scales.flat<float>().data(); const int32_t* zero_points_data = zero_points.flat<int32_t>().data(); auto input_tensor = input.template flat_inner_outer_dims<Tin, 3>(quantization_axis - 1); auto output_tensor = output.template flat_inner_outer_dims<Tout, 3>(quantization_axis - 1); for (int i = 0; i < output.dim_size(quantization_axis); ++i) { AffineDequantize(input_tensor.template chip<1>(i), scales_data[i], zero_points_data[i], output_tensor.template chip<1>(i)); } } template <typename Tin, typename Tout> void EvalDequantize(const Tensor& input, const Tensor& scales, const Tensor& zero_points, int quantization_axis, Tensor& output) { if (quantization_axis >= 0) { EvalPerChannelDequantize<Tin, Tout>(input, scales, zero_points, quantization_axis, output); } else { EvalPerTensorDequantize<Tin, Tout>(input, scales.scalar<float>()(), zero_points.scalar<int32>()(), output); } } } template <typename Tin, typename Tout> class UniformDequantizeOp : public OpKernel { public: explicit UniformDequantizeOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("quantization_axis", &quantization_axis_)); OP_REQUIRES(context, (std::is_same<Tin, qint8>() || std::is_same<Tin, qint32>()), InvalidArgument("Unsupported input type.")); OP_REQUIRES(context, (std::is_same<Tout, float>()), InvalidArgument("Unsupported output type.")); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& scales = context->input(1); const Tensor& zero_points = context->input(2); OP_REQUIRES_OK(context, QuantizationAxisAndShapeValid( input.shape(), scales.shape(), zero_points.shape(), quantization_axis_)); OP_REQUIRES(context, AllElementsPositive<float>(scales), InvalidArgument("rhs scales elements must be all positive.")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); EvalDequantize<Tin, Tout>(input, scales, zero_points, quantization_axis_, *output); } private: int quantization_axis_; }; REGISTER_KERNEL_BUILDER(Name("UniformDequantize") .Device(DEVICE_CPU) .TypeConstraint<qint8>("Tin") .TypeConstraint<float>("Tout"), UniformDequantizeOp<qint8, float>); REGISTER_KERNEL_BUILDER(Name("UniformDequantize") .Device(DEVICE_CPU) .TypeConstraint<qint32>("Tin") .TypeConstraint<float>("Tout"), UniformDequantizeOp<qint32, float>); }
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { class UniformDequantizeOpTest : public OpsTestBase { protected: }; TEST_F(UniformDequantizeOpTest, PerTensorDequantize) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformDequantize") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("quantization_axis", -1) .Attr("quantization_min_val", -128) .Attr("quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 3}), {-128, -100, -20, -16, 0, 20}); AddInputFromArray<float>(TensorShape({}), {0.25}); AddInputFromArray<int32>(TensorShape({}), {-20}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected, {-27.0, -20.0, 0.0, 1.0, 5.0, 10.0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(UniformDequantizeOpTest, PerChannelDequantize) { TF_ASSERT_OK(NodeDefBuilder("test", "UniformDequantize") .Input(FakeInput(DT_QINT8)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("Tin", DT_QINT8) .Attr("Tout", DT_FLOAT) .Attr("quantization_axis", 1) .Attr("quantization_min_val", -128) .Attr("quantization_max_val", 127) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray<qint8>(TensorShape({2, 2, 3}), {-128, -100, -20, -8, 0, 5, 10, 15, 20, 40, 50, 55}); AddInputFromArray<float>(TensorShape({2}), {0.25, 0.5}); AddInputFromArray<int32>(TensorShape({2}), {-20, -10}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 3})); test::FillValues<float>(&expected, {-27.0, -20.0, 0.0, 1.0, 5.0, 7.5, 7.5, 8.75, 10.0, 25.0, 30.0, 32.5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
70b0b0ff-804f-41eb-a231-486bed6ccfc1
cpp
tensorflow/tensorflow
banded_triangular_solve_op
tensorflow/core/kernels/linalg/banded_triangular_solve_op.cc
tensorflow/core/kernels/linalg/banded_triangular_solve_op_test.cc
#include "Eigen/Core" #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/kernels/linalg/linalg_ops_common.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/matmul_bcast.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename Scalar> Scalar eigen_conj(const Scalar& scalar) { return Eigen::numext::conj<Scalar>(scalar); } template <typename Scalar> struct SequentialBandedTriangularSolveKernel { using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using ConstMatrixMap = Eigen::Map<const Matrix>; using MatrixMap = Eigen::Map<Matrix>; using RealScalar = typename Eigen::NumTraits<Scalar>::Real; static ConstMatrixMap ConstTensorSliceToEigenMatrix(const Tensor& t, int slice) { return ConstMatrixMap( t.flat<Scalar>().data() + slice * t.dim_size(1) * t.dim_size(2), t.dim_size(1), t.dim_size(2)); } static MatrixMap TensorSliceToEigenMatrix(Tensor* t, int slice) { return MatrixMap( t->flat<Scalar>().data() + slice * t->dim_size(1) * t->dim_size(2), t->dim_size(1), t->dim_size(2)); } static void Run(const Tensor& in_x, const Tensor& in_y, bool lower, bool adjoint, const MatMulBCast& bcast, Tensor* out, int start, int limit) { const bool should_bcast = bcast.IsBroadcastingRequired(); const auto& x_batch_indices = bcast.x_batch_indices(); const auto& y_batch_indices = bcast.y_batch_indices(); int num_bands = in_x.dim_size(1); int matrix_size = in_x.dim_size(2); for (int64_t i = start; i < limit; ++i) { const int64_t x_batch_index = should_bcast ? x_batch_indices[i] : i; const int64_t y_batch_index = should_bcast ? y_batch_indices[i] : i; auto matrix = ConstTensorSliceToEigenMatrix(in_x, x_batch_index); auto rhs = ConstTensorSliceToEigenMatrix(in_y, y_batch_index); auto output = TensorSliceToEigenMatrix(out, i); if (lower) { if (!adjoint) { output.row(0) = rhs.row(0) / matrix(0, 0); for (int i = 1; i < matrix_size; ++i) { if (i < num_bands) { output.row(i).noalias() = (rhs.row(i) - matrix.block(1, i, i, 1).reverse().transpose() * output.topRows(i)) / matrix(0, i); } else { output.row(i).noalias() = (rhs.row(i) - matrix.block(1, i, num_bands - 1, 1).reverse().transpose() * output.middleRows(i - (num_bands - 1), num_bands - 1)) / matrix(0, i); } } } else { output.row(matrix_size - 1) = rhs.row(matrix_size - 1) / eigen_conj(matrix(0, matrix_size - 1)); for (int i = matrix_size - 1; i >= 0; --i) { output.row(i).noalias() = rhs.row(i); for (int j = i + 1; j < std::min(matrix_size, i + num_bands); ++j) { output.row(i).noalias() -= eigen_conj(matrix(j - i, j)) * output.row(j); } output.row(i) /= eigen_conj(matrix(0, i)); } } } else { if (!adjoint) { output.row(matrix_size - 1) = rhs.row(matrix_size - 1) / matrix(num_bands - 1, matrix_size - 1); for (int i = 1; i < matrix_size; ++i) { int k = matrix_size - 1 - i; if (i < num_bands) { output.row(k).noalias() = (rhs.row(k) - matrix.block(num_bands - 1 - i, k, i, 1) .reverse() .transpose() * output.bottomRows(i)) / matrix(num_bands - 1, k); } else { output.row(k).noalias() = (rhs.row(k) - matrix.block(0, k, num_bands - 1, 1).reverse().transpose() * output.middleRows(k + 1, num_bands - 1)) / matrix(num_bands - 1, k); } } } else { output.row(0) = rhs.row(0) / eigen_conj(matrix(num_bands - 1, 0)); for (int i = 1; i < matrix_size; ++i) { output.row(i).noalias() = rhs.row(i); for (int j = std::max(0, i - (num_bands - 1)); j < i; ++j) { output.row(i).noalias() -= eigen_conj(matrix(num_bands - 1 - (i - j), j)) * output.row(j); } output.row(i) /= eigen_conj(matrix(num_bands - 1, i)); } } } } } }; template <typename Scalar> struct LaunchBatchBandedTriangularSolve; template <typename Scalar> struct LaunchBatchBandedTriangularSolve { static void Launch(OpKernelContext* context, const Tensor& in_x, const Tensor& in_y, bool adjoint, bool lower, const MatMulBCast& bcast, Tensor* out) { const int64_t batch_size = bcast.output_batch_size(); const int64_t cost_per_unit = in_x.dim_size(1) * in_x.dim_size(2) * in_y.dim_size(2); auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using ConstMatrixMap = Eigen::Map<const Matrix>; using RealScalar = typename Eigen::NumTraits<Scalar>::Real; auto matrix = ConstMatrixMap(in_x.flat<Scalar>().data(), in_x.dim_size(1), in_x.dim_size(2)); RealScalar min_abs_pivot; if (lower) { min_abs_pivot = matrix.row(0).cwiseAbs().minCoeff(); } else { min_abs_pivot = matrix.row(in_x.dim_size(1) - 1).cwiseAbs().minCoeff(); } OP_REQUIRES(context, min_abs_pivot > RealScalar(0), errors::InvalidArgument("Input matrix is not invertible.")); Shard(worker_threads.num_threads, worker_threads.workers, batch_size, cost_per_unit, [&in_x, &in_y, adjoint, lower, &bcast, out](int64_t start, int64_t limit) { SequentialBandedTriangularSolveKernel<Scalar>::Run( in_x, in_y, lower, adjoint, bcast, out, start, limit); }); } }; template <typename Scalar> class BandedTriangularSolveOpCpu : public OpKernel { public: explicit BandedTriangularSolveOpCpu(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("lower", &lower_)); OP_REQUIRES_OK(context, context->GetAttr("adjoint", &adjoint_)); } ~BandedTriangularSolveOpCpu() override {} void Compute(OpKernelContext* ctx) override { const Tensor& in0 = ctx->input(0); const Tensor& in1 = ctx->input(1); ValidateInputTensors(ctx, in0, in1); if (!ctx->status().ok()) return; MatMulBCast bcast(in0.shape().dim_sizes(), in1.shape().dim_sizes()); OP_REQUIRES( ctx, bcast.IsValid(), errors::InvalidArgument( "In[0] and In[1] must have compatible batch dimensions: ", in0.shape().DebugString(), " vs. ", in1.shape().DebugString())); TensorShape out_shape = bcast.output_batch_shape(); auto batch_size = bcast.output_batch_size(); auto d0 = in0.dim_size(in0.dims() - 2); auto d1 = in0.dim_size(in0.dims() - 1); Tensor in0_reshaped; OP_REQUIRES( ctx, in0_reshaped.CopyFrom(in0, TensorShape({bcast.x_batch_size(), d0, d1})), errors::Internal("Failed to reshape In[0] from ", in0.shape().DebugString())); auto d2 = in1.dim_size(in1.dims() - 2); auto d3 = in1.dim_size(in1.dims() - 1); Tensor in1_reshaped; OP_REQUIRES( ctx, in1_reshaped.CopyFrom(in1, TensorShape({bcast.y_batch_size(), d2, d3})), errors::Internal("Failed to reshape In[1] from ", in1.shape().DebugString())); OP_REQUIRES(ctx, d1 == d2, errors::InvalidArgument( "In[0] mismatch In[1] shape: ", d1, " vs. ", d2, ": ", in0.shape().DebugString(), " ", in1.shape().DebugString(), " ", lower_, " ", adjoint_)); OP_REQUIRES_OK(ctx, out_shape.AddDimWithStatus(d1)); OP_REQUIRES_OK(ctx, out_shape.AddDimWithStatus(d3)); Tensor* out = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out)); if (out->NumElements() == 0) { return; } Tensor out_reshaped; OP_REQUIRES(ctx, out_reshaped.CopyFrom(*out, TensorShape({batch_size, d1, d3})), errors::Internal("Failed to reshape output from ", out->shape().DebugString())); LaunchBatchBandedTriangularSolve<Scalar>::Launch( ctx, in0_reshaped, in1_reshaped, adjoint_, lower_, bcast, &out_reshaped); } private: void ValidateInputTensors(OpKernelContext* ctx, const Tensor& in0, const Tensor& in1) { OP_REQUIRES( ctx, in0.dims() >= 2, errors::InvalidArgument("In[0] ndims must be >= 2: ", in0.dims())); OP_REQUIRES( ctx, in1.dims() >= 2, errors::InvalidArgument("In[1] ndims must be >= 2: ", in1.dims())); OP_REQUIRES(ctx, in0.NumElements() > 0, errors::InvalidArgument("In[0] must not be an empty tensor: ", in0.DebugString())); OP_REQUIRES(ctx, in1.NumElements() > 0, errors::InvalidArgument("In[1] must not be an empty tensor: ", in1.DebugString())); } bool lower_; bool adjoint_; }; #define REGISTER_BANDED_TRIANGULAR_SOLVE_CPU(TYPE) \ REGISTER_KERNEL_BUILDER(Name("BandedTriangularSolve") \ .Device(DEVICE_CPU) \ .TypeConstraint<TYPE>("T"), \ BandedTriangularSolveOpCpu<TYPE>); REGISTER_BANDED_TRIANGULAR_SOLVE_CPU(float); REGISTER_BANDED_TRIANGULAR_SOLVE_CPU(double); REGISTER_BANDED_TRIANGULAR_SOLVE_CPU(complex64); REGISTER_BANDED_TRIANGULAR_SOLVE_CPU(complex128); }
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/linalg/matrix_set_diag_op.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { Node* SetDiag(int num_bands, Graph* g, Node* bands, Node* triangular) { Node* ret; Tensor bandwidth(DT_INT32, TensorShape({2})); bandwidth.flat<int32>()(0) = -(num_bands - 1); bandwidth.flat<int32>()(1) = 0; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "MatrixSetDiagV3") .Input(triangular) .Input(bands) .Input(test::graph::Constant(g, bandwidth)) .Attr("align", "RIGHT_LEFT") .Finalize(g, &ret)); return ret; } Node* BandedTriangularSolve(Graph* g, Node* in0, Node* in1) { Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BandedTriangularSolve") .Input(in0) .Input(in1) .Attr("lower", true) .Attr("adjoint", false) .Finalize(g, &ret)); return ret; } Node* MatrixTriangularSolve(Graph* g, Node* in0, Node* in1) { Node* ret; TF_CHECK_OK(NodeBuilder(g->NewName("n"), "MatrixTriangularSolve") .Input(in0) .Input(in1) .Attr("lower", true) .Attr("adjoint", false) .Finalize(g, &ret)); return ret; } template <typename T> static Graph* BandedTriangularSolve(int64_t num_bands, int64_t n, int64_t m, bool use_banded_solver, DataType type) { Graph* g = new Graph(OpRegistry::Global()); Tensor in0(type, TensorShape({num_bands, n})); in0.flat<T>().setRandom(); in0.flat<T>() = in0.flat<T>().abs() + in0.flat<T>().constant(static_cast<T>(0.5)); Tensor in1(type, TensorShape({n, m})); in1.flat<T>().setRandom(); if (use_banded_solver) { BandedTriangularSolve(g, test::graph::Constant(g, in0), test::graph::Constant(g, in1)); } else { Tensor in2(type, TensorShape({n, n})); in2.flat<T>().setZero(); Node* triangular_matrix = SetDiag(num_bands, g, test::graph::Constant(g, in0), test::graph::Constant(g, in2)); MatrixTriangularSolve(g, triangular_matrix, test::graph::Constant(g, in1)); } return g; } #define BM_BandedTriangularSolveDev(K, N, M, BS, T, TT, D) \ static void BM_BandedTriangularSolve##_##K##_##N##_##M##_##BS##_##TT( \ ::testing::benchmark::State& state) { \ test::Benchmark(#D, BandedTriangularSolve<T>(K, N, M, BS, TT), \ false) \ .Run(state); \ state.SetItemsProcessed(state.iterations() * K * N + N * M); \ } \ BENCHMARK(BM_BandedTriangularSolve##_##K##_##N##_##M##_##BS##_##TT) \ ->UseRealTime(); #define BM_BandedTriangularSolve(K, N, M, BS, D) \ BM_BandedTriangularSolveDev(K, N, M, BS, float, DT_FLOAT, D); \ BM_BandedTriangularSolveDev(K, N, M, BS, double, DT_DOUBLE, D); BM_BandedTriangularSolve(2, 32, 1, true, cpu); BM_BandedTriangularSolve(2, 32, 1, false, cpu); BM_BandedTriangularSolve(4, 32, 1, true, cpu); BM_BandedTriangularSolve(4, 32, 1, false, cpu); BM_BandedTriangularSolve(8, 32, 1, true, cpu); BM_BandedTriangularSolve(8, 32, 1, false, cpu); BM_BandedTriangularSolve(16, 32, 1, true, cpu); BM_BandedTriangularSolve(16, 32, 1, false, cpu); BM_BandedTriangularSolve(2, 128, 1, true, cpu); BM_BandedTriangularSolve(2, 128, 1, false, cpu); BM_BandedTriangularSolve(4, 128, 1, true, cpu); BM_BandedTriangularSolve(4, 128, 1, false, cpu); BM_BandedTriangularSolve(8, 128, 1, true, cpu); BM_BandedTriangularSolve(8, 128, 1, false, cpu); BM_BandedTriangularSolve(16, 128, 1, true, cpu); BM_BandedTriangularSolve(16, 128, 1, false, cpu); BM_BandedTriangularSolve(2, 512, 1, true, cpu); BM_BandedTriangularSolve(2, 512, 1, false, cpu); BM_BandedTriangularSolve(4, 512, 1, true, cpu); BM_BandedTriangularSolve(4, 512, 1, false, cpu); BM_BandedTriangularSolve(8, 512, 1, true, cpu); BM_BandedTriangularSolve(8, 512, 1, false, cpu); BM_BandedTriangularSolve(16, 512, 1, true, cpu); BM_BandedTriangularSolve(16, 512, 1, false, cpu); BM_BandedTriangularSolve(2, 32, 32, true, cpu); BM_BandedTriangularSolve(2, 32, 32, false, cpu); BM_BandedTriangularSolve(4, 32, 32, true, cpu); BM_BandedTriangularSolve(4, 32, 32, false, cpu); BM_BandedTriangularSolve(8, 32, 32, true, cpu); BM_BandedTriangularSolve(8, 32, 32, false, cpu); BM_BandedTriangularSolve(16, 32, 32, true, cpu); BM_BandedTriangularSolve(16, 32, 32, false, cpu); BM_BandedTriangularSolve(2, 128, 128, true, cpu); BM_BandedTriangularSolve(2, 128, 128, false, cpu); BM_BandedTriangularSolve(4, 128, 128, true, cpu); BM_BandedTriangularSolve(4, 128, 128, false, cpu); BM_BandedTriangularSolve(8, 128, 128, true, cpu); BM_BandedTriangularSolve(8, 128, 128, false, cpu); BM_BandedTriangularSolve(16, 128, 128, true, cpu); BM_BandedTriangularSolve(16, 128, 128, false, cpu); BM_BandedTriangularSolve(2, 512, 512, true, cpu); BM_BandedTriangularSolve(2, 512, 512, false, cpu); BM_BandedTriangularSolve(4, 512, 512, true, cpu); BM_BandedTriangularSolve(4, 512, 512, false, cpu); BM_BandedTriangularSolve(8, 512, 512, true, cpu); BM_BandedTriangularSolve(8, 512, 512, false, cpu); BM_BandedTriangularSolve(16, 512, 512, true, cpu); BM_BandedTriangularSolve(16, 512, 512, false, cpu); BM_BandedTriangularSolve(2, 2048, 2048, true, cpu); BM_BandedTriangularSolve(2, 2048, 2048, false, cpu); BM_BandedTriangularSolve(4, 2048, 2048, true, cpu); BM_BandedTriangularSolve(4, 2048, 2048, false, cpu); BM_BandedTriangularSolve(8, 2048, 2048, true, cpu); BM_BandedTriangularSolve(8, 2048, 2048, false, cpu); BM_BandedTriangularSolve(16, 2048, 2048, true, cpu); BM_BandedTriangularSolve(16, 2048, 2048, false, cpu); BM_BandedTriangularSolve(32, 2048, 2048, true, cpu); BM_BandedTriangularSolve(32, 2048, 2048, false, cpu); BM_BandedTriangularSolve(64, 2048, 2048, true, cpu); BM_BandedTriangularSolve(64, 2048, 2048, false, cpu); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/linalg/banded_triangular_solve_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/linalg/banded_triangular_solve_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
dc692c4b-5539-4049-846b-b6721c7cd45d
cpp
tensorflow/tensorflow
batch_resource_base
tensorflow/core/kernels/batching_util/batch_resource_base.cc
tensorflow/core/kernels/batching_util/batch_resource_base_test.cc
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <functional> #include <iterator> #include <memory> #include <optional> #include <sstream> #include <string> #include <utility> #include <vector> #include "absl/container/fixed_array.h" #include "absl/container/flat_hash_map.h" #include "absl/functional/bind_front.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" #include "absl/types/optional.h" #include "tensorflow/core/common_runtime/cost_constants.h" #include "tensorflow/core/common_runtime/cost_measurement.h" #include "tensorflow/core/common_runtime/cost_measurement_registry.h" #include "tensorflow/core/common_runtime/cost_util.h" #include "tensorflow/core/common_runtime/request_cost.h" #include "tensorflow/core/common_runtime/request_cost_accessor.h" #include "tensorflow/core/common_runtime/request_cost_accessor_registry.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/ops_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h" #include "tensorflow/core/kernels/batching_util/batch_stats.h" #include "tensorflow/core/kernels/batching_util/concat_split_util.h" #include "tensorflow/core/kernels/batching_util/input_split_metadata.h" #include "tensorflow/core/kernels/batching_util/threadsafe_status.h" #include "tensorflow/core/kernels/batching_util/warmup.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/monitoring/counter.h" #include "tensorflow/core/lib/monitoring/gauge.h" #include "tensorflow/core/lib/monitoring/percentile_sampler.h" #include "tensorflow/core/lib/monitoring/sampler.h" #include "tensorflow/core/lib/monitoring/types.h" #include "tensorflow/core/platform/context.h" #include "tensorflow/core/platform/env_time.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/profiler/lib/traceme_encode.h" #include "tensorflow/core/util/incremental_barrier.h" #include "tsl/platform/criticality.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace serving { namespace { void RecordPaddingSize(int32_t padding_size, const string& model_name, int32_t execution_batch_size, const string& op_name) { static auto* cell = tensorflow::monitoring::PercentileSampler<3>::New( {"/tensorflow/serving/batching/padding_size", "Tracks the padding size distribution on batches by model_name (if " "available).", "model_name", "execution_batch_size", "op_name"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, tensorflow::monitoring::UnitOfMeasure::kNumber); cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name) ->Add(static_cast<double>(padding_size)); } void RecordPaddingSizeV2(int32_t padding_size, const string& model_name, int32_t execution_batch_size, const string& op_name) { std::vector<double> bucket_limits; bucket_limits.push_back(-2.0 / 3.0); double bound = 2.0 / 3.0; double growth_factor = 2; for (int i = 0; i < 16; i++) { bucket_limits.push_back(bound); bound *= growth_factor; } static auto* cell = tensorflow::monitoring::Sampler<3>::New( {"/tensorflow/serving/batching/padding_size_v2", "Tracks the padding size distribution on batches by model_name (if " "available).", "model_name", "execution_batch_size", "op_name"}, monitoring::Buckets::Explicit(bucket_limits)); cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name) ->Add(static_cast<double>(padding_size)); } void RecordInputBatchSize(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New( {"/tensorflow/serving/batching/input_batch_size", "Tracks the batch size distribution on the inputs by model_name (if " "available).", "model_name", "op_name"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, tensorflow::monitoring::UnitOfMeasure::kNumber); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordInputBatchSizeV2(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::Sampler<2>::New( {"/tensorflow/serving/batching/input_batch_size_v2", "Tracks the batch size distribution on the inputs by model_name (if " "available).", "model_name", "op_name"}, monitoring::Buckets::Exponential(2.0 / 3.0, 2, 15)); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordBatchSize(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::Sampler<2>::New( {"/tensorflow/serving/batching/batch_size", "Tracks the batch size distribution on the batch result by model_name " "(if available).", "model_name", "op_name"}, monitoring::Buckets::Exponential(1, 1.5, 20)); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordProcessedBatchSize(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New( {"/tensorflow/serving/batching/processed_batch_size", "Tracks the batch size distribution on processing by model_name (if " "available).", "model_name", "op_name"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, tensorflow::monitoring::UnitOfMeasure::kNumber); cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size)); } void RecordProcessedBatchSizeV2(int32_t batch_size, const string& model_name, const string& op_name) { static auto* cell = monitoring::Counter<3>::New( "/tensorflow/serving/batching/processed_batch_size_v2", "Tracks the batch size on processing by model_name and op name (if " "available).", "model_name", "op_name", "batch_size"); cell->GetCell(model_name, op_name, std::to_string(batch_size)) ->IncrementBy(1); } void RecordBatchDelayUs(int64_t batch_delay_us, const string& model_name, const string& op_name, int32_t batch_size) { static auto* cell = monitoring::PercentileSampler<3>::New( {"/tensorflow/serving/batching/batch_delay_us", "Tracks the batching delay (in microseconds) for inputs by model_name " "(if available).", "model_name", "op_name", "processed_batch_size"}, {25.0, 50.0, 75.0, 90.0, 95.0, 99.0}, 1024, monitoring::UnitOfMeasure::kTime); cell->GetCell(model_name, op_name, std::to_string(batch_size)) ->Add(static_cast<double>(batch_delay_us)); } void RecordBatchDelayUsV2(int64_t batch_delay_us, const string& model_name, const string& op_name, int32_t batch_size) { static auto* cell = tensorflow::monitoring::Sampler<3>::New( {"/tensorflow/serving/batching/batch_delay_us_v2", "Tracks the batching delay (in microseconds) for inputs by model_name " "(if available).", "model_name", "op_name", "processed_batch_size"}, monitoring::Buckets::Exponential(1, 2, 27)); cell->GetCell(model_name, op_name, std::to_string(batch_size)) ->Add(static_cast<double>(batch_delay_us)); } void RecordBatchParamBatchTimeoutMicros(int64_t batch_timeout_micros, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<int64_t, 2>::New( "/tensorflow/serving/batching/batch_timeout_micros", "Tracks how long a request can wait before being processed by a batch.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(batch_timeout_micros); } void RecordBatchParamMaxBatchSize(int64_t max_batch_size, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<int64_t, 2>::New( "/tensorflow/serving/batching/max_batch_size", "Tracks the maximum size of a batch.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(max_batch_size); } void RecordBatchParamPaddingPolicy(const string& batch_padding_policy, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<string, 2>::New( "/tensorflow/serving/batching/configured_batch_padding_policy", "The value of BatchFunction.batch_padding_policy attribute.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(batch_padding_policy); } void RecordBatchParamMaxEnqueuedBatches(int64_t max_enqueued_batches, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<int64_t, 2>::New( "/tensorflow/serving/batching/max_enqueued_batches", "Tracks the maximum number of enqueued batches.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(max_enqueued_batches); } void RecordBatchParamAllowedBatchSizes(const string& allowed_batch_sizes, const string& model_name, const string& op_name) { static auto* cell = monitoring::Gauge<string, 2>::New( "/tensorflow/serving/batching/allowed_batch_sizes", "Tracks the sizes that are allowed to form a batch.", "model_name", "op_name"); cell->GetCell(model_name, op_name)->Set(allowed_batch_sizes); } void RecordBatchCosts(const std::string& model_name, const int64_t processed_size, const absl::string_view cost_type, const absl::Duration total_cost) { static auto* cell = tensorflow::monitoring::Sampler<3>::New( {"/tensorflow/serving/batching/costs", "Tracks the batch costs (in microseconds) by model name and processed " "size.", "model_name", "processed_size", "cost_type"}, monitoring::Buckets::Exponential(1, 2, 27)); cell->GetCell(model_name, std::to_string(processed_size), std::string(cost_type)) ->Add(absl::ToDoubleMicroseconds(total_cost)); } const string& GetModelName(OpKernelContext* ctx) { static string* kModelNameUnset = new string("model_name_unset"); if (!ctx->session_metadata()) return *kModelNameUnset; if (ctx->session_metadata()->name().empty()) return *kModelNameUnset; return ctx->session_metadata()->name(); } int GetTotalTaskSize( const std::vector<std::unique_ptr<BatchResourceBase::BatchTask>>& tasks) { int tasks_size = 0; for (const auto& task : tasks) { tasks_size += task->size(); } return tasks_size; } } std::unique_ptr<BatchResourceBase::BatchTask> BatchResourceBase::BatchTask::CreateSplitTask( int split_index, AsyncOpKernel::DoneCallback done_callback) { std::unique_ptr<BatchTask> task = CreateDerivedTask(); task->guid = this->guid; task->propagated_context = Context(ContextKind::kThread); task->inputs.reserve(this->inputs.size()); task->captured_inputs = this->captured_inputs; task->context = this->context; task->done_callback = done_callback; task->split_index = split_index; task->output = this->output; task->status = this->status; task->is_partial = true; task->start_time = this->start_time; task->request_cost = this->request_cost; task->forced_warmup_batch_size = this->forced_warmup_batch_size; return task; } using ::tensorflow::concat_split_util::Concat; using ::tensorflow::concat_split_util::Split; using TensorMatrix = std::vector<std::vector<Tensor>>; string GetTensorNamesAndShapesString(const OpKernelContext* context, const OpInputList& tensors) { std::stringstream out; int i = 0; for (const Tensor& tensor : tensors) { out << " - " << context->op_kernel().requested_input(i++) << " has shape " << tensor.shape().DebugString() << "\n"; } return out.str(); } Status BatchResourceBase::RegisterWarmupInputs( int64_t guid, OpKernelContext* context, const string& batcher_queue_name, const CreateBatchTaskFn& create_batch_task_fn, AsyncOpKernel::DoneCallback done) { auto shared_status = std::make_shared<ThreadSafeStatus>(); auto create_batch_task_fn_share_status = [&create_batch_task_fn, &shared_status]() { auto batch_task = create_batch_task_fn(); if (!batch_task.ok()) { return batch_task; } (*batch_task)->status = shared_status; return batch_task; }; auto warmup_counter = std::make_shared<absl::BlockingCounter>(allowed_batch_sizes_.size()); for (int i = 0; i < allowed_batch_sizes_.size(); ++i) { Status status = RegisterInput( guid, context, batcher_queue_name, create_batch_task_fn_share_status, [warmup_counter = warmup_counter.get()]() { warmup_counter->DecrementCount(); }, allowed_batch_sizes_[i]); if (!status.ok()) return status; } return RegisterInput( guid, context, batcher_queue_name, create_batch_task_fn_share_status, [warmup_counter, context, shared_status, done = std::move(done)]() { warmup_counter->Wait(); context->SetStatus(shared_status->status()); done(); }); } Status BatchResourceBase::RegisterInput( int64_t guid, OpKernelContext* context, const string& batcher_queue_name, const CreateBatchTaskFn& create_batch_task_fn, AsyncOpKernel::DoneCallback done_callback, int forced_warmup_batch_size) { TF_ASSIGN_OR_RETURN(std::unique_ptr<BatchTask> batch_components, create_batch_task_fn()); batch_components->start_time = EnvTime::NowNanos(); batch_components->guid = guid; batch_components->propagated_context = Context(ContextKind::kThread); OpInputList tensors; TF_RETURN_IF_ERROR(context->input_list("in_tensors", &tensors)); batch_components->inputs.reserve(tensors.size()); for (const Tensor& tensor : tensors) { if (tensor.shape().dims() == 0) { return errors::InvalidArgument( "Batching input tensors must have at least one dimension.\nBelow are " "the input tensors: \n", GetTensorNamesAndShapesString(context, tensors)); } if (tensors.size() >= 2 && tensor.shape().dim_size(0) != tensors[0].shape().dim_size(0)) { return errors::InvalidArgument( "Batching input tensors supplied in a given op invocation must " "have equal 0th-dimension size.\nBelow are the input tensors: \n", GetTensorNamesAndShapesString(context, tensors)); } batch_components->inputs.push_back(tensor); } RecordInputBatchSize(tensors[0].shape().dim_size(0), GetModelName(context), context->op_kernel().name()); RecordInputBatchSizeV2(tensors[0].shape().dim_size(0), GetModelName(context), context->op_kernel().name()); if (batcher_) { RecordBatchParamBatchTimeoutMicros( batcher_queue_options_.batch_timeout_micros, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxBatchSize( batcher_queue_options_.max_execution_batch_size, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxEnqueuedBatches( batcher_queue_options_.max_enqueued_batches, GetModelName(context), context->op_kernel().name()); RecordBatchParamPaddingPolicy( this->batcher_queue_options_.batch_padding_policy, GetModelName(context), context->op_kernel().name()); } else if (adaptive_batcher_) { RecordBatchParamBatchTimeoutMicros( adaptive_batcher_queue_options_.batch_timeout_micros, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxBatchSize(adaptive_batcher_queue_options_.max_batch_size, GetModelName(context), context->op_kernel().name()); RecordBatchParamMaxEnqueuedBatches( adaptive_batcher_queue_options_.max_enqueued_batches, GetModelName(context), context->op_kernel().name()); } else { return errors::Internal("No batcher defined."); } RecordBatchParamAllowedBatchSizes(allowed_batch_sizes_str_, GetModelName(context), context->op_kernel().name()); if (tensors[0].shape().dim_size(0) == 0) { for (int i = 0; i < context->num_outputs(); i++) { Tensor* empty_output; AllocatorAttributes cpu_alloc; cpu_alloc.set_on_host(true); TF_RETURN_IF_ERROR(context->allocate_output(i, TensorShape({0}), &empty_output, cpu_alloc)); } done_callback(); return absl::OkStatus(); } OpInputList captured_tensors; const auto captured_status = context->input_list("captured_tensors", &captured_tensors); if (captured_status.ok()) { batch_components->captured_inputs.reserve(captured_tensors.size()); for (const Tensor& captured_tensor : captured_tensors) { batch_components->captured_inputs.push_back(captured_tensor); } } batch_components->context = context; batch_components->split_index = 0; batch_components->output = std::make_shared<TensorMatrix>(); if (!batch_components->status) { batch_components->status = std::make_shared<ThreadSafeStatus>(); batch_components->done_callback = [done_callback = std::move(done_callback), shared_status = batch_components->status, context = context]() { context->SetStatus(shared_status->status()); done_callback(); }; } else { batch_components->done_callback = std::move(done_callback); } batch_components->forced_warmup_batch_size = forced_warmup_batch_size; std::unique_ptr<RequestCostAccessor> request_cost_accessor = CreateRequestCostAccessor(); if (request_cost_accessor) { batch_components->request_cost = request_cost_accessor->GetRequestCost(); } BatcherQueueT* batcher_queue; TF_RETURN_IF_ERROR(LookupOrCreateBatcherQueue( batcher_queue_name, GetModelName(context), context->op_kernel().name(), &batcher_queue)); if (!session_metadata().name().empty()) { absl::MutexLock lock(&outstanding_batch_mu_); WarmupStateRegistry::Key key(session_metadata().name(), session_metadata().version()); if (GetGlobalWarmupStateRegistry().Lookup(key)) { outstanding_batch_mu_.Await({+[](int* num_outstanding_batched_items) { return *num_outstanding_batched_items == 0; }, &num_outstanding_batched_items_}); } num_outstanding_batched_items_ += batch_components->size(); } return batcher_queue->Schedule(&batch_components); } BatchResourceBase::BatcherT::QueueOptions BatchResourceBase::GetBatcherQueueOptions( int32_t num_batch_threads, int32_t max_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, const std::vector<int32>& allowed_batch_sizes, bool enable_large_batch_splitting, bool disable_padding) { return GetBatcherQueueOptions( num_batch_threads, max_batch_size, batch_timeout_micros, max_enqueued_batches, allowed_batch_sizes, enable_large_batch_splitting, disable_padding, kPadUpPolicy, 0, 0, 0, {}, MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize); } BatchResourceBase::BatcherT::QueueOptions BatchResourceBase::GetBatcherQueueOptions( int32_t num_batch_threads, int32_t max_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, const std::vector<int32>& allowed_batch_sizes, bool enable_large_batch_splitting, bool disable_padding, absl::string_view batch_padding_policy, int32_t low_priority_max_batch_size, int32_t low_priority_batch_timeout_micros, int32_t low_priority_max_enqueued_batches, const std::vector<int32>& low_priority_allowed_batch_sizes, MixedPriorityBatchingPolicy mixed_priority_batching_policy) { BatcherT::QueueOptions batcher_queue_options; batcher_queue_options.input_batch_size_limit = max_batch_size; batcher_queue_options.max_enqueued_batches = max_enqueued_batches; batcher_queue_options.batch_timeout_micros = batch_timeout_micros; batcher_queue_options.batch_padding_policy = std::string(batch_padding_policy); if (low_priority_max_batch_size > 0) { batcher_queue_options.enable_priority_queue = true; } batcher_queue_options.high_priority_queue_options.input_batch_size_limit = max_batch_size; batcher_queue_options.high_priority_queue_options.max_enqueued_batches = max_enqueued_batches; batcher_queue_options.high_priority_queue_options.batch_timeout_micros = batch_timeout_micros; batcher_queue_options.low_priority_queue_options.input_batch_size_limit = low_priority_max_batch_size; batcher_queue_options.low_priority_queue_options.max_enqueued_batches = low_priority_max_enqueued_batches; batcher_queue_options.low_priority_queue_options.batch_timeout_micros = low_priority_batch_timeout_micros; if (low_priority_allowed_batch_sizes.empty()) { batcher_queue_options.low_priority_queue_options.max_execution_batch_size = low_priority_max_batch_size; } else { batcher_queue_options.low_priority_queue_options.max_execution_batch_size = *low_priority_allowed_batch_sizes.rbegin(); } batcher_queue_options.low_priority_queue_options.allowed_batch_sizes = low_priority_allowed_batch_sizes; batcher_queue_options.mixed_priority_batching_policy = mixed_priority_batching_policy; batcher_queue_options.enable_large_batch_splitting = enable_large_batch_splitting; if (enable_large_batch_splitting) { batcher_queue_options.split_input_task_func = [](std::unique_ptr<BatchTask>* input_task, int open_batch_remaining_slot, int max_batch_size, std::vector<std::unique_ptr<BatchTask>>* output_tasks) -> Status { return SplitInputTask(input_task, open_batch_remaining_slot, max_batch_size, output_tasks); }; if (allowed_batch_sizes.empty()) { batcher_queue_options.max_execution_batch_size = max_batch_size; batcher_queue_options.high_priority_queue_options .max_execution_batch_size = max_batch_size; } else { batcher_queue_options.max_execution_batch_size = *allowed_batch_sizes.rbegin(); batcher_queue_options.high_priority_queue_options .max_execution_batch_size = *allowed_batch_sizes.rbegin(); batcher_queue_options.allowed_batch_sizes = allowed_batch_sizes; } } batcher_queue_options.disable_padding = disable_padding; return batcher_queue_options; } BatchResourceBase::AdaptiveBatcherT::QueueOptions BatchResourceBase::GetAdaptiveBatcherQueueOptions( int32_t max_batch_size, int32_t batch_timeout_micros, int32_t max_enqueued_batches, bool enable_large_batch_splitting, const std::vector<int32>& allowed_batch_sizes, bool disable_padding) { AdaptiveBatcherT::QueueOptions batcher_queue_options; batcher_queue_options.max_input_task_size = std::make_optional(max_batch_size); batcher_queue_options.max_enqueued_batches = max_enqueued_batches; batcher_queue_options.batch_timeout_micros = batch_timeout_micros; if (allowed_batch_sizes.empty()) { batcher_queue_options.max_batch_size = max_batch_size; } else { batcher_queue_options.max_batch_size = *allowed_batch_sizes.rbegin(); } if (enable_large_batch_splitting) { batcher_queue_options.split_input_task_func = [](std::unique_ptr<BatchTask>* input_task, int open_batch_remaining_slot, int max_batch_size, std::vector<std::unique_ptr<BatchTask>>* output_tasks) -> Status { return SplitInputTask(input_task, open_batch_remaining_slot, max_batch_size, output_tasks); }; } batcher_queue_options.disable_padding = disable_padding; return batcher_queue_options; } Status BatchResourceBase::ValidateBatch(const BatchT& batch) { for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) { const BatchResourceBase::BatchTask& task = batch.task(task_idx); if (task.inputs.size() != batch.task(0).inputs.size()) { return errors::InvalidArgument( "Batching inputs must have equal number of edges"); } } return absl::OkStatus(); } bool BatchResourceBase::IsLowPriorityBatch(const BatchT& batch) const { if (!batcher_queue_options_.enable_priority_queue) return false; if (batch.empty()) return false; return batch.task(0).criticality() == tsl::criticality::Criticality::kSheddablePlus || batch.task(0).criticality() == tsl::criticality::Criticality::kSheddable; } int BatchResourceBase::RoundToLowestAllowedBatchSize( int batch_size, bool is_low_priority_batch) const { const std::vector<int32>& allowed_batch_sizes = is_low_priority_batch ? batcher_queue_options_.low_priority_queue_options .allowed_batch_sizes : allowed_batch_sizes_; return GetNextAllowedBatchSize(batch_size, allowed_batch_sizes, batcher_queue_options_.disable_padding); } Status BatchResourceBase::ConcatInputTensors( const BatchT& batch, const std::vector<std::unique_ptr<BatchTask>>& unbatched_tasks, OpKernelContext* context, std::vector<Tensor>* concatenated_tensors) const { if (batch.num_tasks() == 0) { return errors::InvalidArgument("Empty batch."); } int unbatched_tasks_size = GetTotalTaskSize(unbatched_tasks); const bool just_for_warmup = batch.task(0).forced_warmup_batch_size > 0; const int padded_batch_size = just_for_warmup ? batch.task(0).forced_warmup_batch_size : RoundToLowestAllowedBatchSize(batch.size() + unbatched_tasks_size, IsLowPriorityBatch(batch)); const int padding_amount = just_for_warmup ? padded_batch_size : padded_batch_size - batch.size() - unbatched_tasks_size; tsl::profiler::TraceMe trace_me( [padded_batch_size, padding_amount, disable_padding = batcher_queue_options_.disable_padding]() { return tsl::profiler::TraceMeEncode( "ConcatInputTensors", {{"batch_size_after_padding", padded_batch_size}, {"padding_amount", padding_amount}, {"disable_padding", disable_padding}}); }); RecordPaddingSize(padding_amount, GetModelName(context), padded_batch_size, context->op_kernel().name()); RecordPaddingSizeV2(padding_amount, GetModelName(context), padded_batch_size, context->op_kernel().name()); RecordProcessedBatchSize(padded_batch_size, GetModelName(context), context->op_kernel().name()); RecordProcessedBatchSizeV2(padded_batch_size, GetModelName(context), context->op_kernel().name()); RecordBatchSize(batch.size(), GetModelName(context), context->op_kernel().name()); const int num_inputs = batch.task(0).inputs.size(); concatenated_tensors->reserve(num_inputs); for (int i = 0; i < num_inputs; ++i) { std::vector<Tensor> to_concatenate; if (just_for_warmup) { to_concatenate.reserve(padding_amount); } else { to_concatenate.reserve(batch.num_tasks() + unbatched_tasks.size() + padding_amount); for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) { to_concatenate.push_back(batch.task(task_idx).inputs.at(i)); } for (int task_idx = 0; task_idx < unbatched_tasks.size(); ++task_idx) { to_concatenate.push_back(unbatched_tasks[task_idx]->inputs.at(i)); } } if (padding_amount != 0) { const Tensor& padding_source = batch.task(0).inputs.at(i); Tensor padding; if (padding_source.shape().dim_size(0) == 0) { return errors::InvalidArgument( "Cannot use an empty tensor with zero rows as padding when " "batching. (Input ", i, " got shape ", padding_source.shape().DebugString(), ".)"); } if (padding_source.shape().dim_size(0) == 1) { padding = padding_source; } else { padding = padding_source.Slice(0, 1); } for (int i = 0; i < padding_amount; ++i) { to_concatenate.push_back(padding); } } Tensor concatenated_tensor; Status concat_status = Concat(context, to_concatenate, &concatenated_tensor); TF_RETURN_IF_ERROR(concat_status); concatenated_tensors->push_back(concatenated_tensor); } return absl::OkStatus(); } Status BatchResourceBase::SplitInputTask( std::unique_ptr<BatchTask>* input_task_ptr, int open_batch_remaining_slot, int max_batch_size, std::vector<std::unique_ptr<BatchTask>>* output_tasks) { BatchTask& input_task = *(*input_task_ptr); const int64_t input_task_size = input_task.size(); DCHECK_GT(input_task_size, 0); std::shared_ptr<ThreadSafeStatus> shared_status = input_task.status; std::function<void()> split_task_done_callback = [done_callback = input_task.done_callback, output = input_task.output, forced_warmup_batch_size = input_task.forced_warmup_batch_size, op_kernel_context = input_task.context, status = shared_status]() mutable { const int num_output = op_kernel_context->num_outputs(); for (int i = 0; i < num_output; ++i) { Tensor output_tensor; std::vector<Tensor> to_concatenate; to_concatenate.reserve(output->size()); for (int j = 0; j < output->size(); ++j) { to_concatenate.push_back(std::move((*output)[j][i])); } const auto concat_status = Concat(op_kernel_context, to_concatenate, &output_tensor); if (!concat_status.ok()) { status->Update(concat_status); } if (forced_warmup_batch_size == 0) { op_kernel_context->set_output(i, std::move(output_tensor)); } } done_callback(); }; IncrementalBarrier barrier(split_task_done_callback); const internal::InputSplitMetadata input_split_metadata( input_task_size, open_batch_remaining_slot, max_batch_size); const absl::FixedArray<int>& task_sizes = input_split_metadata.task_sizes(); const int num_batches = task_sizes.size(); std::vector<int64_t> output_task_sizes; output_task_sizes.resize(num_batches); for (int i = 0; i < num_batches; i++) { output_task_sizes[i] = task_sizes[i]; } input_task.output->resize(num_batches); for (int i = 0; i < num_batches; ++i) { (*input_task.output)[i].resize(input_task.context->num_outputs()); } output_tasks->reserve(num_batches); for (int i = 0; i < num_batches; i++) { output_tasks->push_back(input_task.CreateSplitTask(i, barrier.Inc())); } const int num_input_tensors = input_task.inputs.size(); for (int i = 0; i < num_input_tensors; ++i) { std::vector<Tensor> split_tensors; const Tensor& input_tensor = input_task.inputs[i]; const Status split_status = Split(input_task.context, input_tensor, output_task_sizes, &split_tensors); if (!split_status.ok()) { return errors::Internal( "When splitting input, Tensor split operation failed: ", split_status.message()); } if (split_tensors.size() != output_task_sizes.size()) { return errors::Internal( "When splitting input, tensor split operation did not work as " "expected; got ", split_tensors.size(), " splits; expected ", output_task_sizes.size()); } for (int j = 0; j < output_tasks->size(); ++j) { BatchTask& output_task = *((*output_tasks)[j]); auto moved_tensor_iter = std::next(split_tensors.begin(), j); std::move(moved_tensor_iter, moved_tensor_iter + 1, std::back_inserter(output_task.inputs)); } } return absl::OkStatus(); } Status BatchResourceBase::SplitOutputTensors( const std::vector<Tensor>& combined_outputs, BatchT* batch, std::vector<std::unique_ptr<BatchTask>>& unbatched_tasks) const { DCHECK_GE(batch->num_tasks(), 1); if (batch->num_tasks() < 1) { return errors::Internal("Batch size expected to be positive; was ", batch->num_tasks()); } std::vector<int64_t> task_sizes_plus_optional_padding; task_sizes_plus_optional_padding.reserve(batch->num_tasks() + unbatched_tasks.size()); for (int i = 0; i < batch->num_tasks(); ++i) { task_sizes_plus_optional_padding.push_back(batch->task(i).size()); } for (int i = 0; i < unbatched_tasks.size(); ++i) { task_sizes_plus_optional_padding.push_back(unbatched_tasks[i]->size()); } int unbatched_tasks_size = GetTotalTaskSize(unbatched_tasks); const int padding_size = batcher_queue_options_.disable_padding ? 0 : RoundToLowestAllowedBatchSize(batch->size() + unbatched_tasks_size, IsLowPriorityBatch(*batch)) - batch->size() - unbatched_tasks_size; if (padding_size > 0) { task_sizes_plus_optional_padding.push_back(padding_size); } DCHECK_EQ(batch->task(0).context->num_outputs(), combined_outputs.size()); int combined_outputs_size = combined_outputs.size(); if (combined_outputs_size != batch->task(0).context->num_outputs()) { return errors::Internal("Wrong number of batched output tensors"); } for (int i = 0, iter_limit = combined_outputs.size(); i < iter_limit; ++i) { const Tensor& output_tensor = combined_outputs[i]; if (output_tensor.shape().dims() == 0) { return errors::FailedPrecondition( "Batched output tensor has 0 dimensions"); } if (output_tensor.shape().dim_size(0) != static_cast<int64_t>(batch->size() + unbatched_tasks_size + padding_size)) { return errors::FailedPrecondition( "Batched output tensor's 0th dimension does not equal the sum of " "the 0th dimension sizes of the input tensors"); } std::vector<Tensor> split_tensor; const Status split_status = tensor::Split( output_tensor, task_sizes_plus_optional_padding, &split_tensor); DCHECK(split_status.ok()) << split_status; if (!split_status.ok()) { return errors::Internal("Tensor split operation failed: ", split_status.message()); } DCHECK_EQ(split_tensor.size(), task_sizes_plus_optional_padding.size()); if (split_tensor.size() != task_sizes_plus_optional_padding.size()) { return errors::Internal( "Tensor split operation did not work as expected; got ", split_tensor.size(), " splits; expected ", task_sizes_plus_optional_padding.size()); } for (int j = 0; j < batch->num_tasks(); ++j) { BatchTask& task = *(batch->mutable_task(j)); if (task.is_partial) { std::vector<Tensor>& tensor_vector = (*task.output)[task.split_index]; tensor_vector[i] = std::move(split_tensor[j]); } else { task.context->set_output(i, split_tensor[j]); } } for (int j = 0; j < unbatched_tasks.size(); ++j) { unbatched_tasks[j]->context->set_output( i, split_tensor[batch->num_tasks() + j]); } } return absl::OkStatus(); } void BatchResourceBase::CleanUpFunctionHelper(BatchTask& task, const Status& status) const { WithContext wc(task.propagated_context); if (!status.ok()) { if (!absl::StrContains(status.message(), "Function was cancelled before it was started")) { task.status->Update(status); } else { LOG(ERROR) << "ERROR!!!! " << status.message(); } } task.done_callback(); } void BatchResourceBase::ProcessFuncBatch( std::unique_ptr<BatchT> batch, std::vector<std::unique_ptr<BatchTask>> unbatched_tasks) const { if (batch->empty()) { return; } WithContext wc(batch->task(batch->num_tasks() - 1).propagated_context); const CostMeasurement::Context batching_context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements = CreateCostMeasurements(batching_context); auto& last_task = batch->task(batch->num_tasks() - 1); OpKernelContext* last_task_context = last_task.context; const std::string& model_name = GetModelName(last_task_context); const std::string& op_name = last_task_context->op_kernel().name(); Status status; bool cleanup_done = false; int64_t processed_size = batch->size(); auto cleanup_fn = [&](const Status& status) { if (cleanup_done) { return; } SplitBatchCostsAndRecordMetrics( model_name, op_name, batch_cost_measurements, processed_size, *batch); batch_cost_measurements.clear(); for (int i = 0; i < batch->num_tasks(); ++i) { CleanUpFunctionHelper(*batch->mutable_task(i), status); } for (int i = 0; i < unbatched_tasks.size(); ++i) { CleanUpFunctionHelper(*unbatched_tasks[i], status); } cleanup_done = true; }; auto finally = gtl::MakeCleanup([&cleanup_fn, &status] { cleanup_fn(status); }); status = ValidateBatch(*batch); if (!status.ok()) { return; } std::vector<Tensor> concatenated_tensors; status = ConcatInputTensors(*batch, unbatched_tasks, last_task_context, &concatenated_tensors); processed_size = RoundToLowestAllowedBatchSize(batch->size()); if (!status.ok()) { return; } std::vector<Tensor> combined_outputs; std::vector<Tensor> args(concatenated_tensors.begin(), concatenated_tensors.end()); const auto& captured_inputs = batch->task(batch->num_tasks() - 1).captured_inputs; args.insert(args.end(), captured_inputs.begin(), captured_inputs.end()); uint64 current_time = EnvTime::NowNanos(); for (int i = 0; i < batch->num_tasks(); ++i) { RecordBatchDelayUs((current_time - batch->task(i).start_time) * 1e-3, model_name, last_task_context->op_kernel().name(), processed_size); RecordBatchDelayUsV2((current_time - batch->task(i).start_time) * 1e-3, model_name, last_task_context->op_kernel().name(), processed_size); } finally.release(); ProcessFuncBatchImpl( last_task, args, &combined_outputs, [&](const Status& run_status) { Status final_status; auto run_finally = gtl::MakeCleanup([&]() { cleanup_fn(final_status); }); final_status = run_status; if (!final_status.ok()) { return; } if (last_task.forced_warmup_batch_size == 0) { final_status = SplitOutputTensors(combined_outputs, batch.get(), unbatched_tasks); } }); } void BatchResourceBase::ProcessBatch(std::unique_ptr<BatchT> batch) const { if (batch->empty()) { return; } WithContext wc(batch->task(batch->num_tasks() - 1).propagated_context); const CostMeasurement::Context batching_context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements = CreateCostMeasurements(batching_context); int64_t processed_size = batch->size(); OpKernelContext* last_task_context = batch->task(batch->num_tasks() - 1).context; AsyncOpKernel::DoneCallback last_task_callback = batch->task(batch->num_tasks() - 1).done_callback; const std::string& model_name = GetModelName(last_task_context); const std::string& op_name = last_task_context->op_kernel().name(); auto batch_cost_cleanup = gtl::MakeCleanup([&] { SplitBatchCostsAndRecordMetrics( model_name, op_name, batch_cost_measurements, processed_size, *batch); }); OP_REQUIRES_OK_ASYNC(last_task_context, ValidateBatch(*batch), last_task_callback); const int num_input_edges = batch->task(0).inputs.size(); std::vector<Tensor> concatenated_tensors; const Status concat_status = ConcatInputTensors(*batch, {}, last_task_context, &concatenated_tensors); processed_size = RoundToLowestAllowedBatchSize(batch->size()); OP_REQUIRES_OK_ASYNC(last_task_context, concat_status, last_task_callback); for (int i = 0; i < num_input_edges; ++i) { last_task_context->set_output(i, concatenated_tensors[i]); for (int task_idx = 0; task_idx < batch->num_tasks() - 1; ++task_idx) { const BatchTask& task = batch->task(task_idx); TensorShape output_shape(task.inputs[i].shape()); output_shape.set_dim(0, 0); Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC( task.context, task.context->allocate_output(i, output_shape, &output), task.done_callback); } } for (int task_idx = 0; task_idx < batch->num_tasks() - 1; ++task_idx) { const BatchTask& task = batch->task(task_idx); TensorShape index_shape({0, 3}); Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC( task.context, task.context->allocate_output(num_input_edges, index_shape, &output), task.done_callback); } for (int task_idx = 0; task_idx < batch->num_tasks(); ++task_idx) { const BatchTask& task = batch->task(task_idx); Tensor* id; OP_REQUIRES_OK_ASYNC(task.context, task.context->allocate_output(num_input_edges + 1, TensorShape({}), &id), task.done_callback); id->scalar<int64_t>()() = task.guid; } OP_REQUIRES_OK_ASYNC( last_task_context, EmitIndexTensor(last_task_context, *batch, num_input_edges), last_task_callback); for (int task_idx = 0; task_idx < batch->num_tasks(); ++task_idx) { batch->mutable_task(task_idx)->done_callback(); } } Status BatchResourceBase::EmitIndexTensor(OpKernelContext* context, const BatchT& batch, int output_index) { const TensorShape index_shape({batch.num_tasks(), 3}); Tensor* index = nullptr; TF_RETURN_IF_ERROR( context->allocate_output(output_index, index_shape, &index)); auto index_flat = index->shaped<int64_t, 2>({batch.num_tasks(), 3}); size_t offset = 0; for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) { const BatchTask& task = batch.task(task_idx); index_flat(task_idx, 0) = task.guid; index_flat(task_idx, 1) = offset; index_flat(task_idx, 2) = offset + task.size(); offset += task.size(); } return absl::OkStatus(); } void BatchResourceBase::ProcessBatchCallBack( std::unique_ptr<Batch<BatchTask>> batch, std::vector<std::unique_ptr<BatchTask>> unbatched_tasks) { if (!session_metadata().name().empty()) { absl::MutexLock lock(&outstanding_batch_mu_); num_outstanding_batched_items_ -= batch->size(); } if (!has_process_batch_function_) { ProcessBatch(std::move(batch)); } else { ProcessFuncBatch(std::move(batch), std::move(unbatched_tasks)); } } Status BatchResourceBase::LookupOrCreateBatcherQueue(const string& queue_name, const string& model_name, const string& op_name, BatcherQueueT** queue) { mutex_lock l(batcher_queues_mu_); auto it = batcher_queues_.find(queue_name); if (it != batcher_queues_.end()) { *queue = it->second.get(); return absl::OkStatus(); } std::unique_ptr<BatcherQueueT> new_queue; if (batcher_) { BatcherT::QueueOptions batcher_queue_options = batcher_queue_options_; batcher_queue_options.model_batch_stats = &GlobalBatchStatsRegistry().model( model_name, op_name); TF_RETURN_IF_ERROR(batcher_->AddQueue( batcher_queue_options, absl::bind_front(&BatchResourceBase::ProcessBatchCallBack, this), &new_queue)); } else if (adaptive_batcher_) { std::function<void(std::unique_ptr<Batch<BatchTask>>)> reduced_process_batch_callback = [this](std::unique_ptr<BatchT> batch) { ProcessBatchCallBack(std::move(batch), {}); }; TF_RETURN_IF_ERROR(adaptive_batcher_->AddQueue( adaptive_batcher_queue_options_, reduced_process_batch_callback, &new_queue)); } else { return errors::Internal("No batcher defined."); } *queue = new_queue.get(); batcher_queues_[queue_name] = std::move(new_queue); return absl::OkStatus(); } void BatchResourceBase::SplitBatchCostsAndRecordMetrics( const std::string& model_name, const std::string& op_name, const std::vector<std::unique_ptr<CostMeasurement>>& batch_cost_measurements, const int64_t processed_size, BatchT& batch) { absl::flat_hash_map<std::string, absl::Duration> batch_costs; for (const auto& batch_cost_measurement : batch_cost_measurements) { if (batch_cost_measurement->GetTotalCost() <= absl::ZeroDuration()) { continue; } if (batch.size() == 0) { LOG_EVERY_N_SEC(ERROR, 60) << "Non-zero cost collected but the batch size is 0."; return; } if (processed_size == 0) { LOG_EVERY_N_SEC(ERROR, 60) << "Non-zero cost collected but the processed size is 0."; return; } const absl::string_view cost_type = batch_cost_measurement->GetCostType(); const absl::Duration total_cost = batch_cost_measurement->GetTotalCost(); batch_costs[cost_type] = total_cost; RecordBatchCosts(model_name, processed_size, absl::StrCat(cost_type, kWithSmearSuffix), total_cost); RecordBatchCosts(model_name, processed_size, absl::StrCat(cost_type, kNoSmearSuffix), total_cost / processed_size * batch.size()); if (cost_type == kTpuCostName) { ModelBatchStats& model_stats = GlobalBatchStatsRegistry().model( model_name, op_name); model_stats.batch_size(processed_size).tpu_cost().Register(total_cost); model_stats.RegisterProcessedSize(batch.size()); } for (int i = 0; i < batch.num_tasks(); i++) { RequestCost* request_cost = batch.task(i).request_cost; if (!request_cost) continue; const auto cost_with_smear = total_cost / batch.size() * batch.task(i).size(); const auto cost_no_smear = total_cost / processed_size * batch.task(i).size(); request_cost->RecordCost( {{absl::StrCat(cost_type, kWithSmearSuffix), cost_with_smear}, {absl::StrCat(cost_type, kNoSmearSuffix), cost_no_smear}}); } } const int64_t padding_size = processed_size - batch.size(); for (int i = 0; i < batch.num_tasks(); i++) { RequestCost* request_cost = batch.task(i).request_cost; if (!request_cost) continue; request_cost->RecordBatchMetrics(RequestCost::BatchMetrics{ processed_size, static_cast<int64_t>(batch.task(i).size()), padding_size, batch_costs}); } } } }
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h" #include <cstdint> #include <functional> #include <memory> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "absl/types/span.h" #include "tensorflow/core/common_runtime/cost_constants.h" #include "tensorflow/core/common_runtime/cost_measurement.h" #include "tensorflow/core/common_runtime/cost_measurement_registry.h" #include "tensorflow/core/common_runtime/request_cost.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/device_factory.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler.h" #include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h" #include "tensorflow/core/kernels/batching_util/batch_stats.h" #include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h" #include "tensorflow/core/lib/monitoring/cell_reader.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/notification.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" #include "tsl/platform/criticality.h" #include "tsl/platform/status.h" namespace tensorflow { namespace serving { namespace { using ::testing::Pair; using ::testing::UnorderedElementsAre; TEST(BatchTaskCriticalityTest, CriticalityDefaultsToCritical) { BatchResourceBase::BatchTask batch_task; EXPECT_EQ(batch_task.criticality(), tsl::criticality::Criticality::kCritical); } #if defined(PLATFORM_GOOGLE) TEST(BatchTaskCriticalityTest, CriticalitySuccessfullyPropagated) { std::vector<BatchResourceBase::BatchTask> batch_tasks; { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCriticalPlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCriticalPlus); batch_tasks.push_back(BatchResourceBase::BatchTask()); } { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kCritical); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kCritical); batch_tasks.push_back(BatchResourceBase::BatchTask()); } { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddablePlus); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddablePlus); batch_tasks.push_back(BatchResourceBase::BatchTask()); } { tsl::criticality::ScopedCriticality scoped_criticality( tsl::criticality::Criticality::kSheddable); ASSERT_EQ(tsl::criticality::GetCriticality(), tsl::criticality::Criticality::kSheddable); batch_tasks.push_back(BatchResourceBase::BatchTask()); } batch_tasks.push_back(BatchResourceBase::BatchTask()); EXPECT_EQ(batch_tasks[0].criticality(), tsl::criticality::Criticality::kCriticalPlus); EXPECT_EQ(batch_tasks[1].criticality(), tsl::criticality::Criticality::kCritical); EXPECT_EQ(batch_tasks[2].criticality(), tsl::criticality::Criticality::kSheddablePlus); EXPECT_EQ(batch_tasks[3].criticality(), tsl::criticality::Criticality::kSheddable); EXPECT_EQ(batch_tasks[4].criticality(), tsl::criticality::Criticality::kCritical); } #endif class TestTpuCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::Milliseconds(100); } absl::string_view GetCostType() const override { return "test_tpu"; } }; REGISTER_COST_MEASUREMENT("test_tpu", TestTpuCostMeasurement); class TestGcuCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::Milliseconds(200); } absl::string_view GetCostType() const override { return "test_gcu"; } }; REGISTER_COST_MEASUREMENT("test_gcu", TestGcuCostMeasurement); std::unique_ptr<BatchResourceBase::BatchTask> MakeBatchTask( const int64_t task_size, RequestCost* request_cost) { auto task = std::make_unique<BatchResourceBase::BatchTask>(); task->inputs.push_back(Tensor(DT_DOUBLE, TensorShape({task_size, 1}))); task->request_cost = request_cost; return task; } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoCostMeasurement) { BatchResourceBase::BatchT batch; RequestCost cost; batch.AddTask(MakeBatchTask(1, &cost)); batch.Close(); std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 16, batch); EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty()); EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 16, 1, 15, ::testing::IsEmpty()))); } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroCost) { BatchResourceBase::BatchT batch; RequestCost cost; batch.AddTask(MakeBatchTask(1, &cost)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("no_op", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 16, batch); EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty()); EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 16, 1, 15, ::testing::IsEmpty()))); } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroBatchSize) { BatchResourceBase::BatchT batch; batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 0, batch); } TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoRequestCost) { BatchResourceBase::BatchT batch; batch.AddTask(MakeBatchTask(1, nullptr)); batch.AddTask(MakeBatchTask(9, nullptr)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 16, batch); EXPECT_EQ(batch.task(0).request_cost, nullptr); EXPECT_EQ(batch.task(1).request_cost, nullptr); } TEST(SplitBatchCostsAndRecordMetricsTest, SplitSingleCostType) { BatchResourceBase::BatchT batch; RequestCost cost1, cost2; batch.AddTask(MakeBatchTask(1, &cost1)); batch.AddTask(MakeBatchTask(9, &cost2)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 20, batch); EXPECT_THAT( batch.task(0).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)), Pair("test_tpu_no_smear", absl::Milliseconds(5)))); EXPECT_THAT( batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 1, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); EXPECT_THAT( batch.task(1).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)), Pair("test_tpu_no_smear", absl::Milliseconds(45)))); EXPECT_THAT( batch.task(1).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 9, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); } TEST(SplitBatchCostsAndRecordMetricsTest, SplitMultiCostTypes) { BatchResourceBase::BatchT batch; RequestCost cost1, cost2; batch.AddTask(MakeBatchTask(1, &cost1)); batch.AddTask(MakeBatchTask(9, &cost2)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_gcu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 20, batch); EXPECT_THAT( batch.task(0).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)), Pair("test_tpu_no_smear", absl::Milliseconds(5)), Pair("test_gcu_with_smear", absl::Milliseconds(20)), Pair("test_gcu_no_smear", absl::Milliseconds(10)))); EXPECT_THAT( batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 1, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)), Pair("test_gcu", absl::Milliseconds(200)))))); EXPECT_THAT( batch.task(1).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)), Pair("test_tpu_no_smear", absl::Milliseconds(45)), Pair("test_gcu_with_smear", absl::Milliseconds(180)), Pair("test_gcu_no_smear", absl::Milliseconds(90)))); EXPECT_THAT( batch.task(1).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 9, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)), Pair("test_gcu", absl::Milliseconds(200)))))); } TEST(SplitBatchCostsAndRecordMetricsTest, SplitOnlyNonZeroCostTypes) { BatchResourceBase::BatchT batch; RequestCost cost1, cost2; batch.AddTask(MakeBatchTask(1, &cost1)); batch.AddTask(MakeBatchTask(9, &cost2)); batch.Close(); CostMeasurement::Context context{false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("no_op", context)); batch_cost_measurements.push_back( CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context)); BatchResourceBase::SplitBatchCostsAndRecordMetrics( "model_name", "op_name", batch_cost_measurements, 20, batch); EXPECT_THAT( batch.task(0).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)), Pair("test_tpu_no_smear", absl::Milliseconds(5)))); EXPECT_THAT( batch.task(0).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 1, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); EXPECT_THAT( batch.task(1).request_cost->GetCosts(), UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)), Pair("test_tpu_no_smear", absl::Milliseconds(45)))); EXPECT_THAT( batch.task(1).request_cost->GetBatchMetrics(), ::testing::ElementsAre(::testing::FieldsAre( 20, 9, 10, UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)))))); } TEST(SplitBatchCostsAndRecordMetricsTest, UpdatesGlobalBatchStats) { class FakeTpuCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::Hours(555); } absl::string_view GetCostType() const override { return kTpuCostName; } }; CostMeasurement::Context context{ false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( std::make_unique<FakeTpuCostMeasurement>(context)); BatchResourceBase::BatchT batch; batch.AddTask(MakeBatchTask( 1, nullptr)); batch.Close(); const char kModelName[] = "test_updates_global_batch_stats"; BatchResourceBase::SplitBatchCostsAndRecordMetrics( kModelName, "op_name", batch_cost_measurements, 17, batch); EXPECT_EQ(GlobalBatchStatsRegistry() .model( kModelName, "op_name") .batch_size(17) .tpu_cost() .mean(), absl::Hours(555)); } TEST(SplitBatchCostsAndRecordMetricsTest, GlobalBatchStatsProcessedSize) { class FakeTpuCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::Hours(555); } absl::string_view GetCostType() const override { return kTpuCostName; } }; CostMeasurement::Context context{ false}; std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements; batch_cost_measurements.push_back( std::make_unique<FakeTpuCostMeasurement>(context)); BatchResourceBase::BatchT batch; batch.AddTask(MakeBatchTask( 1, nullptr)); batch.Close(); const char kModelName[] = "test_global_batch_stats_processed_size"; int original_cumulative_processed_size = GlobalBatchStatsRegistry() .model( kModelName, "op_name") .cumulative_processed_size(); BatchResourceBase::SplitBatchCostsAndRecordMetrics( kModelName, "op_name", batch_cost_measurements, 17, batch); EXPECT_EQ(GlobalBatchStatsRegistry() .model( kModelName, "op_name") .cumulative_processed_size(), original_cumulative_processed_size + 1); BatchResourceBase::BatchT batch2; batch2.AddTask(MakeBatchTask( 1, nullptr)); batch2.AddTask(MakeBatchTask( 1, nullptr)); batch2.AddTask(MakeBatchTask( 1, nullptr)); batch2.Close(); BatchResourceBase::SplitBatchCostsAndRecordMetrics( kModelName, "op_name", batch_cost_measurements, 8, batch2); EXPECT_EQ(GlobalBatchStatsRegistry() .model( kModelName, "op_name") .cumulative_processed_size(), original_cumulative_processed_size + 4); } class BatchResourceBaseTest : public ::testing::Test { protected: class MyBatchResource : public BatchResourceBase { public: using BatchResourceBase::BatchResourceBase; std::string DebugString() const override { return ""; } void ProcessFuncBatchImpl( const BatchResourceBase::BatchTask& , absl::Span<const Tensor> , std::vector<Tensor>* , std::function<void(const absl::Status&)> ) const override { process_func_batch_called_.Notify(); } Notification& process_func_batch_called() { return process_func_batch_called_; } private: mutable Notification process_func_batch_called_; }; BatchResourceBaseTest() { device_ = DeviceFactory::NewDevice("CPU", SessionOptions{}, "/job:a/replica:0/task:0"); NodeDefBuilder batch_function_builder("my_batch_node", "BatchFunction"); batch_function_builder.Attr("max_batch_size", 128); batch_function_builder.Attr("num_batch_threads", 8); batch_function_builder.Attr("allowed_batch_sizes", {2, 4, 8}); batch_function_builder.Attr("batch_timeout_micros", 100); batch_function_builder.Attr("max_enqueued_batches", 100); batch_function_builder.Attr("enable_large_batch_splitting", true); std::vector<DataType> input_dtypes = {DataType::DT_INT64, DataType::DT_INT64}; std::vector<NodeDefBuilder::NodeOut> inputs; inputs.push_back(NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64})); inputs.push_back(NodeDefBuilder::NodeOut({"n2", 1, DataType::DT_INT64})); batch_function_builder.Attr("Tin", input_dtypes); batch_function_builder.Input(inputs); batch_function_builder.Attr("Tcaptured", {DataType::DT_INT64}); batch_function_builder.Input(std::vector<NodeDefBuilder::NodeOut>{ NodeDefBuilder::NodeOut({"n3", 1, DataType::DT_INT64})}); batch_function_builder.Attr("Tout", {DataType::DT_INT64}); NameAttrList f; f.set_name("func_to_batch"); batch_function_builder.Attr("f", f); NodeDef batch_kernel_node_def; TF_CHECK_OK(batch_function_builder.Finalize(&batch_kernel_node_def)); absl::Status op_kernel_creation_status; batch_kernel_ = CreateOpKernel(DEVICE_CPU, device_.get(), device_->GetAllocator({}), batch_kernel_node_def, TF_GRAPH_DEF_VERSION, &op_kernel_creation_status); TF_CHECK_OK(op_kernel_creation_status); CHECK(batch_kernel_ != nullptr); input_tensor_ = Tensor(DataType::DT_INT64, TensorShape({5, 2, 1})); input_tensor_values_ = { TensorValue(&input_tensor_), TensorValue(&input_tensor_), TensorValue(&input_tensor_), }; session_metadata_.set_name("my_model_name"); params_.device = device_.get(); params_.op_kernel = batch_kernel_.get(); params_.inputs = input_tensor_values_; params_.session_metadata = &session_metadata_; context_ = std::make_unique<OpKernelContext>(&params_); } std::unique_ptr<Device> device_; std::unique_ptr<OpKernel> batch_kernel_; Tensor input_tensor_; std::vector<TensorValue> input_tensor_values_; SessionMetadata session_metadata_; OpKernelContext::Params params_; std::unique_ptr<OpKernelContext> context_; }; TEST_F(BatchResourceBaseTest, PassesCorrectModelBatchStatsToSbs) { using BatchTask = BatchResourceBase::BatchTask; using SharedBatchScheduler = SharedBatchScheduler<BatchTask>; class MySharedBatchScheduler : public SharedBatchScheduler { public: MySharedBatchScheduler() : SharedBatchScheduler::SharedBatchScheduler({}) {} absl::Status AddQueue( const QueueOptions& options, ProcessBatchCallback process_batch_callback, std::unique_ptr<BatchScheduler<BatchTask>>* queue) override { queue_options_ = options; return SharedBatchScheduler::AddQueue(options, process_batch_callback, queue); } const QueueOptions& queue_options() const { return queue_options_; } private: QueueOptions queue_options_; }; auto batcher = std::make_shared<MySharedBatchScheduler>(); MyBatchResource* my_batch_resource = new MyBatchResource( true, batcher, {}, {}); TF_CHECK_OK(my_batch_resource->RegisterInput( 0, context_.get(), "batcher_queue_name", []() -> absl::StatusOr<std::unique_ptr<BatchResourceBase::BatchTask>> { return std::make_unique<BatchResourceBase::BatchTask>(); }, [] {}, 0)); EXPECT_EQ(batcher->queue_options().model_batch_stats, &GlobalBatchStatsRegistry().model( "my_model_name", "my_batch_node")); my_batch_resource->process_func_batch_called().WaitForNotificationWithTimeout( absl::Seconds(1)); my_batch_resource->Unref(); } TEST_F(BatchResourceBaseTest, ConfiguredBatchPaddingPolicyMetric) { tensorflow::monitoring::testing::CellReader<std::string> metric( "/tensorflow/serving/batching/configured_batch_padding_policy"); std::shared_ptr<SharedBatchScheduler<BatchResourceBase::BatchTask>> batcher; TF_CHECK_OK( SharedBatchScheduler<BatchResourceBase::BatchTask>::Create({}, &batcher)); MyBatchResource* my_batch_resource = new MyBatchResource( true, batcher, MyBatchResource::BatcherT::QueueOptions{ .batch_padding_policy{kMinimizeTpuCostPerRequestPolicy}, }, {}); TF_CHECK_OK(my_batch_resource->RegisterInput( 0, context_.get(), "batcher_queue_name", []() -> absl::StatusOr<std::unique_ptr<BatchResourceBase::BatchTask>> { return std::make_unique<BatchResourceBase::BatchTask>(); }, [] {}, 0)); EXPECT_EQ(metric.Read( "my_model_name", "my_batch_node"), kMinimizeTpuCostPerRequestPolicy); my_batch_resource->process_func_batch_called().WaitForNotificationWithTimeout( absl::Seconds(1)); my_batch_resource->Unref(); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_resource_base.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_resource_base_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea