ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
85a652e9-c99d-43a7-9418-8569ec44a1db | cpp | tensorflow/tensorflow | rotate | tensorflow/lite/experimental/ml_adjacent/algo/rotate.cc | tensorflow/lite/experimental/ml_adjacent/algo/rotate_test.cc | #include <cmath>
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rotate {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline float DegreesToRadians(int angle) { return angle * M_PI / 180; }
void ComputeNewSize(dim_t src_width, dim_t src_height, int angle,
dim_t& dst_width, dim_t& dst_height) {
dst_width = src_width;
dst_height = src_height;
if (angle % 90 == 0) {
if (angle == 90 || angle == 270) {
dst_width = src_height;
dst_height = src_width;
}
} else {
const float angle_rad = DegreesToRadians(angle);
const float cos_angle = std::cos(angle_rad);
const float sin_angle = std::sin(angle_rad);
const int edge_x = src_width / 2;
const int edge_y = src_height / 2;
for (int y : {-edge_y, edge_y}) {
for (int x : {-edge_x, edge_x}) {
const int x_transformed =
static_cast<int>(std::floor(cos_angle * x + sin_angle * y));
const int y_transformed =
static_cast<int>(std::floor(-sin_angle * x + cos_angle * y));
if (std::abs(x_transformed) > dst_width / 2)
dst_width = 2 * std::abs(x_transformed);
if (std::abs(y_transformed) > dst_height / 2)
dst_height = 2 * std::abs(y_transformed);
}
}
}
}
void Rotate90(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
for (int x = 0; x < input_width; ++x) {
float* dst_ptr_row = dst_data_prt + x * dst_row_stride;
const float* src_ptr_pixel = src_ptr_row + x * pixel_stride;
float* dst_pixel_ptr =
dst_ptr_row + (output_width - y - 1) * pixel_stride;
for (int c = 0; c < depth; ++c) {
*dst_pixel_ptr++ = *src_ptr_pixel++;
}
}
}
}
}
void Rotate180(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int dst_pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
float* dst_ptr_row = dst_data_prt +
(output_height - y - 1) * dst_row_stride +
(output_width - 1) * dst_pixel_stride;
for (int x = 0; x < input_width; ++x) {
for (int c = 0; c < depth; ++c) {
dst_ptr_row[c] = src_ptr_row[c];
}
dst_ptr_row -= depth;
src_ptr_row += depth;
}
}
}
}
void Rotate270(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
for (int x = 0; x < input_width; ++x) {
float* dst_ptr_row =
dst_data_prt + (output_height - x - 1) * dst_row_stride;
const float* src_ptr_pixel = src_ptr_row + x * pixel_stride;
float* dst_pixel_ptr = dst_ptr_row + y * pixel_stride;
for (int c = 0; c < depth; ++c) {
*dst_pixel_ptr++ = *src_ptr_pixel++;
}
}
}
}
}
void RotateGeneric(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, int angle,
const float* input_data, float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
memset(
output_data, 0,
batches * output_width * output_height * depth * sizeof(output_data[0]));
const float angle_rad = DegreesToRadians(angle);
const float cos_angle = std::cos(angle_rad);
const float sin_angle = std::sin(angle_rad);
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = -output_height / 2; y < output_height / 2; ++y) {
for (int x = -output_width / 2; x < output_width / 2; ++x) {
const float x_transformed = cos_angle * x + sin_angle * y;
const float y_transformed = -sin_angle * x + cos_angle * y;
const int x_transformed_integer =
static_cast<int>(std::floor(x_transformed));
const int y_transformed_integer =
static_cast<int>(std::floor(y_transformed));
const int x_src_integer = x_transformed_integer + input_width / 2;
const int y_src_integer = y_transformed_integer + input_height / 2;
const int x0 = x_src_integer;
const int x1 = x_src_integer + 1;
const int y0 = y_src_integer;
const int y1 = y_src_integer + 1;
if (x0 < 0 || x0 >= input_width) continue;
if (x1 < 0 || x1 >= input_width) continue;
if (y0 < 0 || y0 >= input_height) continue;
if (y1 < 0 || y1 >= input_height) continue;
const float x_dist = x_transformed - x_transformed_integer;
const float y_dist = y_transformed - y_transformed_integer;
const float one_minus_x_dist = 1 - x_dist;
const float one_minus_y_dist = 1 - y_dist;
const float* src_ptr_row0 = src_data_prt + y0 * src_row_stride;
const float* src_ptr_row1 = src_data_prt + y1 * src_row_stride;
float* dst_row_ptr =
dst_data_prt + (y + output_height / 2) * dst_row_stride;
const float* src_ptr_pixel00 = src_ptr_row0 + x0 * pixel_stride;
const float* src_ptr_pixel10 = src_ptr_row0 + x1 * pixel_stride;
const float* src_ptr_pixel01 = src_ptr_row1 + x0 * pixel_stride;
const float* src_ptr_pixel11 = src_ptr_row1 + x1 * pixel_stride;
float* dst_pixel_ptr =
dst_row_ptr + (x + output_width / 2) * pixel_stride;
for (int c = 0; c < depth; ++c) {
const float v00 = *src_ptr_pixel00++;
const float v01 = *src_ptr_pixel01++;
const float v10 = *src_ptr_pixel10++;
const float v11 = *src_ptr_pixel11++;
*dst_pixel_ptr++ =
(v10 * one_minus_y_dist + v11 * y_dist) * x_dist +
(v00 * one_minus_y_dist + v01 * y_dist) * one_minus_x_dist;
}
}
}
}
}
void ComputeRotate(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 2);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t img_num_channels = img->Dims()[3];
const DataRef* angle = inputs[1];
const int angle_data = *reinterpret_cast<const int*>(angle->Data());
MutableDataRef* output = outputs[0];
dim_t new_width = 0;
dim_t new_height = 0;
ComputeNewSize(img_width, img_height, angle_data, new_width, new_height);
output->Resize({img_num_batches, new_height, new_width, img_num_channels});
float* output_data = reinterpret_cast<float*>(output->Data());
if (angle_data == 90) {
Rotate90(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
if (angle_data == 180) {
Rotate180(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
if (angle_data == 270) {
Rotate270(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
RotateGeneric(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, angle_data, img_data, output_data);
}
}
const Algo* Impl_Rotate() {
static const Algo rotate = {&ComputeRotate, nullptr};
return &rotate;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rotate.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rotate {
namespace {
struct RotateTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const int angle;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class RotateTest : public ::testing::TestWithParam<RotateTestParams> {};
TEST_P(RotateTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const RotateTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef angle(etype_t::i32);
angle.Resize({1});
ASSERT_EQ(angle.Bytes(), sizeof(int));
std::memcpy(angle.Data(), ¶ms.angle, angle.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rotate = Impl_Rotate();
rotate->process({&img, &angle}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
RotateTests, RotateTest,
testing::ValuesIn({
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
90,
{31, 21, 11,
32, 22, 12,
33, 23, 13},
{1, 3, 3, 1}},
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
180,
{33, 32, 31,
23, 22, 21,
13, 12, 11},
{1, 3, 3, 1}},
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
270,
{13, 23, 33,
12, 22, 32,
11, 21, 31},
{1, 3, 3, 1}},
RotateTestParams{{1, 8, 8, 1},
{1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1},
-45,
{
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.59f,
0.83f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.54f, 0.00f,
0.12f, 0.83f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.54f, 0.00f, 0.00f,
0.00f, 0.12f, 0.83f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.54f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.12f, 0.83f, 0.00f, 0.00f,
0.00f, 0.78f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.23f, 0.97f, 0.00f,
0.00f, 0.00f, 0.54f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.12f, 0.83f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.54f, 0.00f, 0.00f,
0.00f, 0.12f, 0.83f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.54f, 0.00f,
0.12f, 0.83f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.59f,
0.83f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
},
{1, 12, 12, 1}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rotate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rotate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4152b9d6-a170-4c43-a583-3cc6b5fc12ef | cpp | tensorflow/tensorflow | flip_left_right | tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.cc | tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right_test.cc | #include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace flip_left_right {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
void FlipLeftRight(dim_t batches, dim_t input_height, dim_t input_width,
const char* input_data, char* output_data,
dim_t chunk_size) {
const dim_t row_stride = input_width * chunk_size;
const dim_t batch_stride = row_stride * input_height;
for (int b = 0; b < batches; ++b) {
const char* src_data_prt = input_data + b * batch_stride;
char* dst_data_prt = output_data + b * batch_stride;
for (int y = 0; y < input_height; ++y) {
const char* src_ptr_row =
src_data_prt + y * row_stride + (input_width - 1) * chunk_size;
char* dst_ptr_row = dst_data_prt + y * row_stride;
for (int x = 0; x < input_width; ++x) {
std::memcpy(dst_ptr_row, src_ptr_row, chunk_size);
src_ptr_row -= chunk_size;
dst_ptr_row += chunk_size;
}
}
}
}
void ComputeFlipLeftRight(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const char* img_data = reinterpret_cast<const char*>(img->Data());
const dim_t num_batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t num_channels = img->Dims()[3];
const dim_t chunk_size = TypeWidth(img->Type()) * num_channels;
MutableDataRef* output = outputs[0];
output->Resize({num_batches, height, width, num_channels});
char* output_data = reinterpret_cast<char*>(output->Data());
FlipLeftRight(num_batches, height, width, img_data, output_data, chunk_size);
}
}
const Algo* Impl_FlipLeftRight() {
static const Algo flip_left_right = {&ComputeFlipLeftRight, nullptr};
return &flip_left_right;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace flip_left_right {
namespace {
struct FlipLeftRightTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class FlipLeftRightTest
: public ::testing::TestWithParam<FlipLeftRightTestParams> {};
TEST_P(FlipLeftRightTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const FlipLeftRightTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* flip_left_right = Impl_FlipLeftRight();
flip_left_right->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
FlipLeftRightTests, FlipLeftRightTest,
testing::ValuesIn({
FlipLeftRightTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
{13, 12, 11,
23, 22, 21,
33, 32, 31},
{1, 3, 3, 1}},
FlipLeftRightTestParams{{1, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{1, 3, 3, 2}},
FlipLeftRightTestParams{{2, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6,
13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4,
11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{2, 3, 3, 2}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a1d59636-3fa4-41e2-a4c4-60dc32774304 | cpp | tensorflow/tensorflow | resize | tensorflow/lite/delegates/gpu/gl/kernels/resize.cc | tensorflow/lite/delegates/hexagon/builders/tests/resize_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/resize.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Resize : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const Resize2DAttributes&>(ctx.op_attr);
if (ctx.input_shapes[0][2] > ctx.output_shapes[0][2] ||
ctx.input_shapes[0][1] > ctx.output_shapes[0][1]) {
return absl::UnimplementedError(
"Downsampling is currently not supported by the resize op on GPU.");
}
if (ctx.output_shapes[0][2] != attr.new_shape.w ||
ctx.output_shapes[0][1] != attr.new_shape.h) {
return absl::InvalidArgumentError(
"Output size does not match new_size in attributes.");
}
if (ctx.input_shapes[0][3] != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError("Input/output channels mismatch.");
}
if (ctx.input_shapes[0][1] == 1 && ctx.input_shapes[0][2] == 1) {
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
"value_0 = $input_data_0[0, 0, gid.z]$;",
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"scale_factor",
float2(CalculateResizeScale(ctx.input_shapes[0][2],
ctx.output_shapes[0][2], attr),
CalculateResizeScale(ctx.input_shapes[0][1],
ctx.output_shapes[0][1], attr))},
};
std::string source;
if (attr.type == SamplingType::BILINEAR) {
if (attr.half_pixel_centers) {
source = "vec2 coord = (vec2(gid.xy) + 0.5) * $scale_factor$ - 0.5;";
} else {
source = "vec2 coord = vec2(gid.xy) * $scale_factor$;";
}
source += R"(
vec2 coord_floor = floor(coord);
ivec2 icoord_floor = ivec2(coord_floor);
ivec2 borders = ivec2($input_data_0_w$, $input_data_0_h$) - ivec2(1, 1);
ivec4 st;
st.xy = max(icoord_floor, ivec2(0, 0));
st.zw = min(icoord_floor + ivec2(1, 1), borders);
vec2 t = coord - coord_floor;
vec4 tex11 = $input_data_0[st.x, st.y, gid.z]$;
vec4 tex21 = $input_data_0[st.z, st.y, gid.z]$;
vec4 tex12 = $input_data_0[st.x, st.w, gid.z]$;
vec4 tex22 = $input_data_0[st.z, st.w, gid.z]$;
value_0 = mix(mix(tex11, tex21, t.x), mix(tex12, tex22, t.x), t.y);)";
} else if (attr.type == SamplingType::NEAREST) {
std::string fxc;
std::string fyc;
if (attr.half_pixel_centers) {
fxc = "(float(gid.x) + 0.5) * $scale_factor.x$";
fyc = "(float(gid.y) + 0.5) * $scale_factor.y$";
} else {
fxc = "float(gid.x) * $scale_factor.x$";
fyc = "float(gid.y) * $scale_factor.y$";
}
if (attr.align_corners) {
fxc += " + 0.5";
fyc += " + 0.5";
}
source += " ivec2 coord;\n";
source += " coord.x = int(" + fxc + ");\n";
source += " coord.y = int(" + fyc + ");\n";
source += " coord.x = max(0, coord.x);\n";
source += " coord.y = max(0, coord.y);\n";
source += " coord.x = min(coord.x, $input_data_0_w$ - 1);\n";
source += " coord.y = min(coord.y, $input_data_0_h$ - 1);\n";
source += R"(
value_0 = $input_data_0[coord.x, coord.y, gid.z]$;
)";
} else {
return absl::InvalidArgumentError("Unknown sampling type");
}
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewResizeNodeShader() {
return std::make_unique<Resize>();
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class ResizeOpModel : public SingleOpModelWithHexagon {
public:
explicit ResizeOpModel(BuiltinOperator op_type, const TensorData& input,
std::initializer_list<int> size_data,
const TensorData& output, bool align_corners = false,
bool half_pixel_centers = false) {
input_ = AddInput(input);
size_ = AddConstInput(TensorType_INT32, size_data, {2});
output_ = AddOutput(output);
if (op_type == BuiltinOperator_RESIZE_NEAREST_NEIGHBOR) {
SetBuiltinOp(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
BuiltinOptions_ResizeNearestNeighborOptions,
CreateResizeNearestNeighborOptions(
builder_, align_corners,
half_pixel_centers)
.Union());
} else {
SetBuiltinOp(op_type, BuiltinOptions_ResizeBilinearOptions,
CreateResizeBilinearOptions(
builder_, align_corners,
half_pixel_centers)
.Union());
}
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
template <typename T>
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
int input() { return input_; }
private:
int input_;
int size_;
int output_;
};
TEST(ResizeOpModel, HorizontalResizeBiliear_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_BILINEAR,
{TensorType_UINT8, {1, 1, 2, 1}, -2.0, 10}, {1, 3},
{TensorType_UINT8, {}, -2.0, 10});
m.SetQuantizedInput<uint8_t>({3, 6});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({3, 5, 6}, 1)));
}
TEST(ResizeOpModel, HorizontalResizeNearestNeighbor_Int8) {
ResizeOpModel m(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
{TensorType_INT8, {1, 1, 2, 1}, -2.0, 10}, {1, 3},
{TensorType_INT8, {}, -2.0, 10});
m.SetQuantizedInput<int8_t>({3, 6});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({3.01176, 3.01176, 6.02353},
1)));
}
TEST(ResizeOpModel, VerticalResizeBiliear_Int8) {
ResizeOpModel m(BuiltinOperator_RESIZE_BILINEAR,
{TensorType_INT8, {1, 2, 1, 1}, -2.0, 20}, {3, 1},
{TensorType_INT8, {}, -2.0, 20});
m.SetQuantizedInput<int8_t>({3, 9});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({3, 7, 9}, 1)));
}
TEST(ResizeOpModel, VerticalResizeNearestNeighbor_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
{TensorType_UINT8, {1, 2, 1, 1}, -2.0, 20}, {3, 1},
{TensorType_UINT8, {}, -2.0, 20});
m.SetQuantizedInput<uint8_t>({3, 9});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({3.01961, 3.01961, 8.97255},
1)));
}
TEST(ResizeOpModel, ThreeDimensionalResizeBiliear_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_BILINEAR,
{TensorType_UINT8, {1, 2, 2, 2}, -2, 30}, {3, 3},
{TensorType_UINT8, {}, -2.0, 30.0});
m.SetQuantizedInput<uint8_t>({
3, 4, 6, 10,
10, 12, 14, 16,
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
3, 4, 5, 8, 6, 10,
7, 9, 10, 12, 11, 14,
10, 12, 12, 14, 14, 16,
},
1)));
}
TEST(ResizeOpModel, ThreeDimensionalResizeNearestNeighbor_Int8) {
ResizeOpModel m(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
{TensorType_INT8, {1, 2, 2, 2}, -2, 30}, {3, 3},
{TensorType_INT8, {}, -2.0, 30.0});
m.SetQuantizedInput<int8_t>({
3, 4, 6, 10,
10, 12, 14, 16,
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(
{
3.01177, 4.01569, 3.01177, 4.01569, 6.02353, 10.0392,
3.01177, 4.01569, 3.01177, 4.01569, 6.02353, 10.0392,
10.0392, 12.0471, 10.0392, 12.0471, 14.0549, 16.0627,
},
1)));
}
TEST(ResizeOpModel, TwoDimensionalResizeBilinearWithTwoBatches_Int8) {
ResizeOpModel m(BuiltinOperator_RESIZE_BILINEAR,
{TensorType_INT8, {2, 2, 2, 1}, -2, 30}, {3, 3},
{TensorType_INT8, {}, -2.0, 30.0});
m.SetQuantizedInput<int8_t>({
3, 6,
9, 12,
4, 10,
12, 16
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(), ElementsAreArray(ArrayFloatNear(
{
3, 5, 6,
7, 9, 10,
9, 11, 12,
4, 8, 10,
9, 12, 14,
12, 14, 16,
},
1)));
}
TEST(ResizeOpModel, TwoDimensionalResizeNNWithTwoBatches_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
{TensorType_UINT8, {2, 2, 2, 1}, -2, 30}, {3, 3},
{TensorType_UINT8, {}, -2.0, 30.0});
m.SetQuantizedInput<uint8_t>({
3, 6,
9, 12,
4, 10,
12, 16
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
3.01177, 3.01177, 6.02353,
3.01177, 3.01177, 6.02353,
9.03529, 9.03529, 12.0471,
4.01569, 4.01569, 10.0392,
4.01569, 4.01569, 10.0392,
12.0471, 12.0471, 16.0627,
},
1)));
}
TEST(ResizeOpModel, TwoDimResizeBilinearWithTwoBatches_HalfPixelCenters_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_BILINEAR,
{TensorType_UINT8, {2, 2, 2, 1}, -2.0, 20}, {3, 3},
{TensorType_UINT8, {}, -2.0, 20}, false,
true);
m.SetQuantizedInput<uint8_t>({
3, 6,
9, 12,
4, 10,
12, 16
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({2, 4, 6,
6, 7, 9,
9, 10, 12,
4, 7, 10,
8, 10, 13,
12, 14, 16},
2)));
}
TEST(ResizeOpModel, TwoDimResizeBilinearWithTwoBatches_AlignCorners_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_BILINEAR,
{TensorType_UINT8, {2, 2, 2, 1}, -2.0, 20}, {3, 3},
{TensorType_UINT8, {}, -2.0, 20}, true,
false);
m.SetQuantizedInput<uint8_t>({
3, 6,
9, 12,
4, 10,
12, 16
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({3, 5, 6,
7, 9, 10,
9, 11, 12,
4, 8, 10,
9, 12, 13,
12, 15, 16},
2)));
}
TEST(ResizeOpModel, ThreeDimensionalResizeNN_AlignCorners_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
{TensorType_UINT8, {1, 2, 2, 2}, -2.0, 20}, {3, 3},
{TensorType_UINT8, {}, -2.0, 20}, true);
m.SetQuantizedInput<uint8_t>({
3, 4, 6, 10,
10, 12, 14, 16,
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({3, 4, 6, 10, 6, 10,
10, 12, 14, 16, 14, 16,
10, 12, 14, 16, 14, 16},
1)));
}
TEST(ResizeOpModel, ThreeDimensionalResizeNN_HalfPixelCenters_UInt8) {
ResizeOpModel m(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
{TensorType_UINT8, {1, 2, 2, 2}, -2.0, 20}, {3, 3},
{TensorType_UINT8, {}, -2.0, 20}, false,
true);
m.SetQuantizedInput<uint8_t>({
3, 4, 6, 10,
10, 12, 14, 16,
});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear({3, 4, 6, 10, 6, 10,
10, 12, 14, 16, 14, 16,
10, 12, 14, 16, 14, 16},
1)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/resize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/resize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9c45de3c-81f9-4c01-82ae-09a5a486e55a | cpp | tensorflow/tensorflow | crop | tensorflow/lite/experimental/ml_adjacent/algo/crop.cc | tensorflow/lite/experimental/ml_adjacent/algo/crop_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace crop {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
inline void CropToBoundingBox(dim_t offset_height, dim_t offset_width,
dim_t out_height, dim_t out_width,
const DataRef* input, MutableDataRef* output) {
const dim_t in_height = input->Dims()[1];
const dim_t in_width = input->Dims()[2];
const dim_t num_channels = input->Dims()[3];
const dim_t chunk = TypeWidth(input->Type()) * num_channels;
const dim_t in_img_size = in_height * in_width;
const dim_t out_img_size = out_height * out_width;
for (int b = 0; b < input->Dims()[0]; ++b) {
for (int i = 0; i < out_height; ++i) {
const dim_t read_byte_ofs =
(in_img_size * b + (i + offset_height) * in_width + offset_width) *
chunk;
const void* read_start_addr =
reinterpret_cast<const char*>(input->Data()) + read_byte_ofs;
const dim_t write_byte_ofs = chunk * (out_img_size * b + i * out_width);
void* write_addr =
reinterpret_cast<char*>(output->Data()) + write_byte_ofs;
std::memcpy(write_addr, read_start_addr, chunk * out_width);
}
}
}
void ComputeCenterCrop(const InputPack& inputs, const OutputPack& outputs) {
#ifndef NDEBUG
TFLITE_CHECK(inputs.size() == 2);
TFLITE_CHECK(outputs.size() == 1);
#endif
const DataRef* img = inputs[0];
const DataRef* frac = inputs[1];
const double frac_data = *reinterpret_cast<const double*>(frac->Data());
const dim_t in_height = img->Dims()[1];
const dim_t out_height_offset = (in_height - in_height * frac_data) / 2;
const dim_t out_height = in_height - (2 * out_height_offset);
const dim_t in_width = img->Dims()[2];
const dim_t out_width_offset = (in_width - in_width * frac_data) / 2;
const dim_t out_width = in_width - (2 * out_width_offset);
MutableDataRef* output = outputs[0];
output->Resize({img->Dims()[0], out_height, out_width, img->Dims()[3]});
CropToBoundingBox(out_height_offset, out_width_offset, out_height, out_width,
img, output);
}
void ComputeCropToBoundingBox(const InputPack& inputs,
const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 5);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const DataRef* offset_height = inputs[1];
const dim_t offset_height_data =
*reinterpret_cast<const dim_t*>(offset_height->Data());
const DataRef* offset_width = inputs[2];
const dim_t offset_width_data =
*reinterpret_cast<const dim_t*>(offset_width->Data());
const DataRef* target_height = inputs[3];
const dim_t target_height_data =
*reinterpret_cast<const dim_t*>(target_height->Data());
const DataRef* target_width = inputs[4];
const dim_t target_width_data =
*reinterpret_cast<const dim_t*>(target_width->Data());
MutableDataRef* output = outputs[0];
output->Resize(
{img->Dims()[0], target_height_data, target_width_data, img->Dims()[3]});
CropToBoundingBox(offset_height_data, offset_width_data, target_height_data,
target_width_data, img, output);
}
}
const Algo* Impl_CenterCrop() {
static const Algo center_crop = {&ComputeCenterCrop, nullptr};
return ¢er_crop;
}
const Algo* Impl_CropToBoundingBox() {
static const Algo crop_to_bounding_box = {&ComputeCropToBoundingBox, nullptr};
return &crop_to_bounding_box;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include <cstring>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
using ::testing::ElementsAreArray;
namespace ml_adj {
namespace crop {
namespace {
std::vector<float> GetIotaVec(dim_t d1, dim_t d2, dim_t d3, dim_t d4) {
std::vector<float> res;
res.resize(d1 * d2 * d3 * d4);
std::iota(res.begin(), res.end(), 0);
return res;
}
struct CropCenterTestParams {
std::vector<dim_t> img_dims;
std::vector<float> img_data;
double frac;
std::vector<float> expected_data;
std::vector<dim_t> expected_shape;
};
class CropCenterTest : public testing::TestWithParam<CropCenterTestParams> {};
TEST_P(CropCenterTest, FloatPixelType) {
const CropCenterTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef frac(etype_t::f64);
frac.Resize({1});
ASSERT_EQ(frac.Bytes(), sizeof(double));
std::memcpy(frac.Data(), ¶ms.frac, frac.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* center_crop = Impl_CenterCrop();
center_crop->process({&img, &frac}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
EXPECT_THAT(absl::MakeSpan(out_data, output.NumElements()),
ElementsAreArray(params.expected_data));
}
INSTANTIATE_TEST_SUITE_P(
CropTests, CropCenterTest,
testing::ValuesIn({
CropCenterTestParams{{1, 4, 4, 1},
GetIotaVec(1, 4, 4, 1),
0.5,
{5, 6, 9, 10},
{1, 2, 2, 1}},
CropCenterTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0.5,
{6, 7, 8, 11, 12, 13, 16, 17, 18},
{1, 3, 3, 1}},
CropCenterTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
0.5,
{0, 1, 2, 3, 4, 5, 6, 7, 8},
{1, 3, 3, 1}},
CropCenterTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0.9,
GetIotaVec(1, 5, 5, 1),
{1, 5, 5, 1}},
CropCenterTestParams{
{1, 5, 5, 1}, GetIotaVec(1, 5, 5, 1), 0.2, {12}, {1, 1, 1, 1}},
CropCenterTestParams{{1, 2, 2, 2},
GetIotaVec(1, 2, 2, 2),
.7,
{0, 1, 2, 3, 4, 5, 6, 7},
{1, 2, 2, 2}},
CropCenterTestParams{
{1, 3, 3, 2}, GetIotaVec(1, 3, 3, 2), .1, {8, 9}, {1, 1, 1, 2}},
CropCenterTestParams{
{2, 3, 3, 1}, GetIotaVec(2, 3, 3, 1), .1, {4, 13}, {2, 1, 1, 1}},
CropCenterTestParams{{2, 3, 3, 2},
GetIotaVec(2, 3, 3, 2),
.1,
{8, 9, 26, 27},
{2, 1, 1, 2}},
}));
struct CropToBoundingBoxTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
dim_t offset_height;
dim_t offset_width;
dim_t target_height;
dim_t target_width;
const std::vector<dim_t> expected_shape;
const std::vector<float> expected_data;
};
class CropToBoundingBoxTest
: public testing::TestWithParam<CropToBoundingBoxTestParams> {};
TEST_P(CropToBoundingBoxTest, FloatPixelType) {
const CropToBoundingBoxTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef offset_height(etype_t::i32);
offset_height.Resize({1});
ASSERT_EQ(offset_height.Bytes(), sizeof(int));
std::memcpy(offset_height.Data(), ¶ms.offset_height,
offset_height.Bytes());
OwningVectorRef offset_width(etype_t::i32);
offset_width.Resize({1});
ASSERT_EQ(offset_width.Bytes(), sizeof(int));
std::memcpy(offset_width.Data(), ¶ms.offset_width, offset_width.Bytes());
OwningVectorRef target_height(etype_t::i32);
target_height.Resize({1});
ASSERT_EQ(target_height.Bytes(), sizeof(int));
std::memcpy(target_height.Data(), ¶ms.target_height,
target_height.Bytes());
OwningVectorRef target_width(etype_t::i32);
target_width.Resize({1});
ASSERT_EQ(target_width.Bytes(), sizeof(int));
std::memcpy(target_width.Data(), ¶ms.target_width, target_width.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* crop_to_bounding_box = Impl_CropToBoundingBox();
crop_to_bounding_box->process(
{&img, &offset_height, &offset_width, &target_height, &target_width},
{&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
EXPECT_THAT(absl::MakeSpan(out_data, output.NumElements()),
ElementsAreArray(params.expected_data));
}
INSTANTIATE_TEST_SUITE_P(
CropTests, CropToBoundingBoxTest,
testing::ValuesIn({
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0,
0,
2,
2,
{1, 2, 2, 1},
{0, 1,
5, 6}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
3,
3,
2,
2,
{1, 2, 2, 1},
{18, 19,
23, 24}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0,
3,
2,
2,
{1, 2, 2, 1},
{3, 4,
8, 9}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
2,
1,
3,
3,
{1, 3, 3, 1},
{11, 12, 13,
16, 17, 18,
21, 22, 23}},
CropToBoundingBoxTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
0,
0,
3,
3,
{1, 3, 3, 1},
{0, 1, 2,
3, 4, 5,
6, 7, 8}},
CropToBoundingBoxTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
1,
1,
1,
1,
{1, 1, 1, 1},
{4}},
CropToBoundingBoxTestParams{{1, 5, 5, 3},
GetIotaVec(1, 5, 5, 3),
2,
2,
2,
2,
{1, 2, 2, 3},
{36, 37, 38, 39, 40, 41,
51, 52, 53, 54, 55, 56}},
CropToBoundingBoxTestParams{{2, 5, 5, 2},
GetIotaVec(2, 5, 5, 2),
2,
2,
2,
2,
{2, 2, 2, 2},
{24, 25, 26, 27, 34, 35, 36, 37,
74, 75, 76, 77, 84, 85, 86, 87}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/crop.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/crop_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7bc6ad5c-bb5f-48df-87f8-8becd1d89c17 | cpp | tensorflow/tensorflow | per_image_standardization | tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.cc | tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cmath>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace per_image_standardization {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline void PerImageStandardization(dim_t batches, dim_t height, dim_t width,
dim_t num_channels, const float* input_data,
float* output_data) {
const dim_t num_pixels_per_image = height * width * num_channels;
const float inv_num_pixels_per_image = 1.0f / num_pixels_per_image;
for (ind_t b = 0; b < batches; ++b) {
const dim_t offset = b * num_pixels_per_image;
const float* input_ptr = input_data + offset;
float* output_ptr = output_data + offset;
float mean = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
mean += input_ptr[i];
}
mean *= inv_num_pixels_per_image;
float variance = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
const float diff = input_ptr[i] - mean;
variance += diff * diff * inv_num_pixels_per_image;
output_ptr[i] = diff;
}
const float inv_adjusted_stddev =
fmin(num_pixels_per_image, 1.0f / sqrt(variance));
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
output_ptr[i] *= inv_adjusted_stddev;
}
}
}
void ComputePerImageStandardization(const InputPack& inputs,
const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t img_num_channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({img_num_batches, img_height, img_width, img_num_channels});
float* output_data = reinterpret_cast<float*>(output->Data());
PerImageStandardization(img_num_batches, img_height, img_width,
img_num_channels, img_data, output_data);
}
}
const Algo* Impl_PerImageStandardization() {
static const Algo per_image_standardization = {
&ComputePerImageStandardization, nullptr};
return &per_image_standardization;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace per_image_standardization {
namespace {
struct PerImageStandardizationTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
};
class PerImageStandardizationTest
: public testing::TestWithParam<PerImageStandardizationTestParams> {};
TEST_P(PerImageStandardizationTest, FloatPixelType) {
const PerImageStandardizationTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* per_image_standardization = Impl_PerImageStandardization();
per_image_standardization->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), img.Dims());
constexpr float kAbsError = 0.01f;
float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
PerImageStandardizationTests, PerImageStandardizationTest,
testing::ValuesIn({
PerImageStandardizationTestParams{
{1, 2, 2, 1},
{1, 2,
3, 4},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407}},
PerImageStandardizationTestParams{
{2, 2, 2, 1},
{1, 2,
3, 4,
1, 2,
4, 8},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407,
-1.0257553, -0.65275335,
0.09325048, 1.5852581}},
PerImageStandardizationTestParams{
{2, 2, 2, 2},
{1, 2,
1, 3,
1, 4,
1, 5,
1, 2,
2, 2,
3, 2,
4, 2},
{-0.8451542, -0.16903085,
-0.8451542, 0.50709254,
-0.8451542, 1.1832159,
-0.8451542, 1.8593392,
-1.5075567, -0.30151135,
-0.30151135, -0.30151135,
0.904534, -0.30151135,
2.1105793, -0.30151135}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
517e8f1b-746b-423f-b004-526620287e60 | cpp | tensorflow/tensorflow | rgb_to_yuv | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.cc | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/image_utils.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rgb_to_yuv {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
constexpr float kRgb2YuvKernel[] = {0.299f, 0.587f, 0.114f,
-0.14714119f, -0.28886916f, 0.43601035f,
0.61497538f, -0.51496512f, -0.10001026f};
constexpr int kRgb2YuvKernelSize =
sizeof(kRgb2YuvKernel) / sizeof(kRgb2YuvKernel[0]);
void ComputeRgbToYuv(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* input_data = reinterpret_cast<const float*>(img->Data());
const dim_t batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
TFLITE_DCHECK(channels == 3);
MutableDataRef* output = outputs[0];
output->Resize({batches, height, width, channels});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertColorSpace(batches, height, width, input_data, output_data,
&kRgb2YuvKernel[0], kRgb2YuvKernelSize);
}
}
const Algo* Impl_RgbToYuv() {
static const Algo rgb_to_yuv = {&ComputeRgbToYuv, nullptr};
return &rgb_to_yuv;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rgb_to_yuv {
namespace {
struct Rgb2YuvTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class Rgb2YuvTest : public ::testing::TestWithParam<Rgb2YuvTestParams> {};
TEST_P(Rgb2YuvTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const Rgb2YuvTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rgb_to_yuv = Impl_RgbToYuv();
rgb_to_yuv->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
Rgb2YuvTests, Rgb2YuvTest,
testing::ValuesIn({
Rgb2YuvTestParams{{1, 3, 2, 3},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{
92.5f,
58.3f,
-71.5f,
93.5f,
58.3f,
-71.5f,
102.5f,
58.3f,
-71.5f,
103.5f,
58.3f,
-71.5f,
112.5f,
58.3f,
-71.5f,
113.5f,
58.3f,
-71.5f,
},
{1, 3, 2, 3}},
Rgb2YuvTestParams{{2, 3, 2, 3},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232,
11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f,
92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f},
{2, 3, 2, 3}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_yuv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3b452d23-eecd-4019-aa11-415a1c1afaff | cpp | tensorflow/tensorflow | yuv_to_rgb | tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.cc | tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/image_utils.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
constexpr float kYuv2RgbKernel[] = {1.0f, 0.0f,
1.13988303f,
1.0f, -0.394642334f,
-0.58062185f,
1.0f, 2.03206185f, 0.0f};
constexpr int kYuv2RgbKernelDim =
sizeof(kYuv2RgbKernel) / sizeof(kYuv2RgbKernel[0]);
void ComputeYuvToRgb(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* input_data = reinterpret_cast<const float*>(img->Data());
const dim_t batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({batches, height, width, channels});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertColorSpace(batches, height, width, input_data, output_data,
&kYuv2RgbKernel[0], kYuv2RgbKernelDim);
}
}
const Algo* Impl_YuvToRgb() {
static const Algo yuv_to_rgb = {&ComputeYuvToRgb, nullptr};
return &yuv_to_rgb;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
struct YuvToRgbTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class YuvToRgbTest : public ::testing::TestWithParam<YuvToRgbTestParams> {};
TEST_P(YuvToRgbTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const YuvToRgbTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* yuv_to_rgb = Impl_YuvToRgb();
yuv_to_rgb->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
YuvToRgbTests, YuvToRgbTest,
testing::ValuesIn({
YuvToRgbTestParams{{1, 3, 2, 3},
{
92.5f,
58.3f,
-71.5f,
93.5f,
58.3f,
-71.5f,
102.5f,
58.3f,
-71.5f,
103.5f,
58.3f,
-71.5f,
112.5f,
58.3f,
-71.5f,
113.5f,
58.3f,
-71.5f,
},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{1, 3, 2, 3}},
YuvToRgbTestParams{{2, 3, 2, 3},
{92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f,
92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232,
11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{2, 3, 2, 3}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84951f39-a11a-4836-802f-02491812829a | cpp | tensorflow/tensorflow | rgb_to_grayscale | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.cc | tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rgb_to_grayscale {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline void ConvertRgbToGrayscale(dim_t batches, dim_t height, dim_t width,
const float* input_data, float* output_data) {
const dim_t output_num_pixels = batches * width * height;
constexpr float kRgb2GrayscaleKernel[] = {0.2989f, 0.5870f, 0.1140f};
const float* src_ptr = input_data;
float* dst_ptr = output_data;
for (int i = 0; i < output_num_pixels; ++i) {
*dst_ptr = kRgb2GrayscaleKernel[0] * src_ptr[0] +
kRgb2GrayscaleKernel[1] * src_ptr[1] +
kRgb2GrayscaleKernel[2] * src_ptr[2];
src_ptr += 3;
dst_ptr++;
}
}
void ComputeRgbToGrayscale(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
TFLITE_DCHECK(channels == 3);
MutableDataRef* output = outputs[0];
output->Resize({img_num_batches, img_height, img_width, 1});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertRgbToGrayscale(img_num_batches, img_height, img_width, img_data,
output_data);
}
}
const Algo* Impl_RgbToGrayscale() {
static const Algo rgb_to_grayscale = {&ComputeRgbToGrayscale, nullptr};
return &rgb_to_grayscale;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rgb_to_grayscale {
namespace {
struct RgbToGrayscaleTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class RgbToGrayscaleTest
: public ::testing::TestWithParam<RgbToGrayscaleTestParams> {};
TEST_P(RgbToGrayscaleTest, FloatPixelType) {
const RgbToGrayscaleTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rgb_to_grayscale = Impl_RgbToGrayscale();
rgb_to_grayscale->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
constexpr float kAbsError = 0.1f;
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
RgbToGrayscaleTests, RgbToGrayscaleTest,
testing::ValuesIn({
RgbToGrayscaleTestParams{{1, 3, 2, 3},
{11, 111, 211,
12, 112, 212,
21, 121, 221,
22, 122, 222,
31, 131, 231,
32, 132, 232},
{92.5f, 93.5f, 102.5f,
103.5f, 112.5f, 113.5f},
{1, 3, 2, 1}},
RgbToGrayscaleTestParams{{2, 3, 2, 3},
{11, 111, 211,
12, 112, 212,
21, 121, 221,
22, 122, 222,
31, 131, 231,
32, 132, 232,
51, 311, 411,
52, 312, 412,
61, 321, 421,
62, 322, 422,
71, 331, 431,
72, 332, 432},
{92.5f, 93.5f, 102.5f,
103.5f, 112.5f, 113.5f,
244.7f, 245.7f, 254.7f,
255.7f, 264.7f, 265.7f},
{2, 3, 2, 1}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rgb_to_grayscale_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
20ab480d-f2a4-4968-9625-c3ddbf78e082 | cpp | tensorflow/tensorflow | flip_up_down | tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down.cc | tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down_test.cc | #include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace flip_up_down {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
void FlipUpDown(dim_t batches, dim_t input_height, dim_t input_width,
const char* input_data, char* output_data, dim_t chunk_size) {
const dim_t row_stride = input_width * chunk_size;
const dim_t batch_stride = row_stride * input_height;
for (int b = 0; b < batches; ++b) {
const char* src_data_prt = input_data + b * batch_stride;
char* dst_data_prt = output_data + b * batch_stride;
for (int y = 0; y < input_height; ++y) {
const char* src_ptr_row =
src_data_prt + (input_height - y - 1) * row_stride;
char* dst_ptr_row = dst_data_prt + y * row_stride;
std::memcpy(dst_ptr_row, src_ptr_row, row_stride);
}
}
}
void ComputeFlipUpDown(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const char* img_data = reinterpret_cast<const char*>(img->Data());
const dim_t num_batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t num_channels = img->Dims()[3];
const dim_t chunk_size = TypeWidth(img->Type()) * num_channels;
MutableDataRef* output = outputs[0];
output->Resize({num_batches, height, width, num_channels});
char* output_data = reinterpret_cast<char*>(output->Data());
FlipUpDown(num_batches, height, width, img_data, output_data, chunk_size);
}
}
const Algo* Impl_FlipUpDown() {
static const Algo flip_up_down = {&ComputeFlipUpDown, nullptr};
return &flip_up_down;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace flip_up_down {
namespace {
struct FlipUpDownTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class FlipUpDownTest : public ::testing::TestWithParam<FlipUpDownTestParams> {};
TEST_P(FlipUpDownTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const FlipUpDownTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* flip_up_down = Impl_FlipUpDown();
flip_up_down->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
FlipUpDownTests, FlipUpDownTest,
testing::ValuesIn({
FlipUpDownTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
{31, 32, 33,
21, 22, 23,
11, 12, 13},
{1, 3, 3, 1}},
FlipUpDownTestParams{{1, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{31, 4, 32, 5, 33, 6,
21, 3, 22, 4, 23, 5,
11, 2, 12, 3, 13, 4},
{1, 3, 3, 2}},
FlipUpDownTestParams{{2, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6,
13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{31, 4, 32, 5, 33, 6,
21, 3, 22, 4, 23, 5,
11, 2, 12, 3, 13, 4,
33, 6, 32, 5, 31, 4,
23, 5, 22, 4, 21, 3,
13, 4, 12, 3, 11, 2},
{2, 3, 3, 2}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/flip_up_down_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8027a9a8-1fb7-4aea-ae54-cfe1d96f544a | cpp | tensorflow/tensorflow | owning_vector_ref | tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.cc | tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <cstddef>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
void OwningVectorRef::Resize(dims_t&& dims) {
dims_ = dims;
num_elements_ = 0;
for (dim_t d : dims_) {
if (d <= 0) {
break;
}
if (num_elements_ == 0) {
num_elements_ = d;
} else {
num_elements_ *= d;
}
}
raw_data_buffer_.resize(num_elements_ * TypeWidth(Type()));
}
const void* OwningVectorRef::Data() const { return raw_data_buffer_.data(); }
void* OwningVectorRef::Data() { return raw_data_buffer_.data(); }
ind_t OwningVectorRef::NumElements() const { return num_elements_; }
size_t OwningVectorRef::Bytes() const {
return NumElements() * TypeWidth(Type());
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
TEST(OwningVectorRefTest, ConstructFloat32) {
OwningVectorRef t(etype_t::f32);
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 0);
EXPECT_EQ(t.Bytes(), 0);
EXPECT_THAT(t.Dims(), IsEmpty());
}
TEST(OwningVectorRefTest, ResizeFromEmptyFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({2, 2}));
EXPECT_EQ(t.NumElements(), 4);
ASSERT_EQ(t.Bytes(), 4 * sizeof(float));
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, ResizeDownFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
t.Resize({3});
ASSERT_THAT(t.Dims(), ElementsAreArray<dim_t>({3}));
EXPECT_EQ(t.NumElements(), 3);
ASSERT_EQ(t.Bytes(), 3 * sizeof(float));
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, IgnoresDimsForNumElementsAfterFirstNonPositive) {
OwningVectorRef t(etype_t::f32);
t.Resize({3, 0, 0, 2});
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 3);
EXPECT_EQ(t.Bytes(), 3 * sizeof(float));
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({3, 0, 0, 2}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d694b9e1-5657-4324-bf6e-f7b39fa399c0 | cpp | tensorflow/tensorflow | quantized_tensor_element_type | tensorflow/lite/experimental/shlo/quantized_tensor_element_type.cc | tensorflow/lite/experimental/shlo/quantized_tensor_element_type_test.cc | #include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include <sstream>
#include <string>
#include <type_traits>
#include <variant>
#include "tensorflow/lite/experimental/shlo/data_type.h"
namespace shlo_ref {
std::string ToString(const QuantizedElementTypePerTensor& t) {
std::stringstream sstr;
sstr << "QuantizedPerTensor[" << ToString(t.StorageType()) << ", "
<< ToString(t.ExpressedType()) << "]";
return sstr.str();
}
std::string ToString(const QuantizedElementTypePerAxis& t) {
std::stringstream sstr;
sstr << "QuantizedPerAxis[" << ToString(t.StorageType()) << ", "
<< ToString(t.ExpressedType()) << ", " << t.QuantizedDimension() << "]";
return sstr.str();
}
QuantizedElementTypePerTensor BaselineType(
const QuantizedElementTypePerTensor& type) {
QuantizedElementTypePerTensor baseline = type;
std::visit(
[](auto& scale) -> void {
scale = std::remove_reference_t<decltype(scale)>(1);
},
baseline.Scale());
std::visit(
[](auto& zero_point) -> void {
zero_point = std::remove_reference_t<decltype(zero_point)>(0);
},
baseline.ZeroPoint());
return baseline;
}
QuantizedElementTypePerAxis BaselineType(
const QuantizedElementTypePerAxis& type) {
QuantizedElementTypePerAxis baseline = type;
std::visit(
[](auto& scales) -> void {
using T = std::remove_reference_t<decltype(scales[0])>;
absl::c_fill(scales, static_cast<T>(1));
},
baseline.Scales());
std::visit(
[](auto& zero_points) -> void {
using T = std::remove_reference_t<decltype(zero_points[0])>;
absl::c_fill(zero_points, static_cast<T>(0));
},
baseline.ZeroPoints());
return baseline;
}
} | #include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include <array>
#include <cstdint>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
namespace shlo_ref {
namespace {
using testing::Each;
using testing::ElementsAreArray;
using testing::FloatEq;
using testing::Pointwise;
TEST(Quantization, IsValidQuantizationTypePairWorks) {
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI32));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kBF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI32));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kBF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kF32));
}
struct QuantizationPair {
DataType storage_type;
DataType expressed_type;
};
std::vector<QuantizationPair> ValidQuantizationTypePairs() {
return {QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kBF16},
QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kF16},
QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kF32},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kBF16},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kF16},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kF32},
QuantizationPair{.storage_type = DataType::kSI16,
.expressed_type = DataType::kF32}};
}
struct PerTensorTest : testing::TestWithParam<QuantizationPair> {
static constexpr auto ExtractValueAsInt = [](auto v) {
return static_cast<int32_t>(v);
};
static constexpr auto ExtractValueAsFloat = [](auto v) {
return static_cast<float>(v);
};
};
TEST_P(PerTensorTest, BuildPerTensorWorks) {
const QuantizationPair& config = GetParam();
QuantizedElementTypePerTensor type(config.storage_type, 1,
config.expressed_type, 2.5);
EXPECT_EQ(type.StorageType(), config.storage_type);
EXPECT_EQ(type.ExpressedType(), config.expressed_type);
EXPECT_EQ(std::visit(ExtractValueAsInt, type.ZeroPoint()), 1);
EXPECT_THAT(std::visit(ExtractValueAsFloat, type.Scale()), FloatEq(2.5));
}
TEST_P(PerTensorTest, BaselineTypeWorks) {
float scale = 0.5f;
int32_t zero_point = 3;
const QuantizationPair& config = GetParam();
QuantizedElementTypePerTensor element(config.storage_type, zero_point,
config.expressed_type, scale);
const auto baseline = BaselineType(element);
EXPECT_EQ(baseline.StorageType(), element.StorageType());
EXPECT_EQ(baseline.ExpressedType(), element.ExpressedType());
EXPECT_EQ(std::visit(ExtractValueAsInt, baseline.ZeroPoint()), 0);
EXPECT_THAT(std::visit(ExtractValueAsFloat, baseline.Scale()), FloatEq(1));
}
INSTANTIATE_TEST_SUITE_P(PerTensor, PerTensorTest,
testing::ValuesIn(ValidQuantizationTypePairs()));
struct PerAxisTest : testing::TestWithParam<QuantizationPair> {
static constexpr auto ExtractValueAsInt = [](auto v) {
return std::vector<int32_t>(v.begin(), v.end());
};
static constexpr auto ExtractValueAsFloat = [](auto v) {
return std::vector<float>(v.begin(), v.end());
};
};
TEST_P(PerAxisTest, BuildPerAxisWorks) {
const QuantizationPair& config = GetParam();
const std::vector<int32_t> ref_zero_points{1, 2, 3};
const std::vector<float> ref_scales{1.5, 2.5, 3.5};
QuantizedElementTypePerAxis type(config.storage_type, ref_zero_points,
config.expressed_type, ref_scales,
1);
EXPECT_EQ(type.StorageType(), config.storage_type);
EXPECT_EQ(type.ExpressedType(), config.expressed_type);
EXPECT_THAT(std::visit(ExtractValueAsInt, type.ZeroPoints()),
ElementsAreArray(ref_zero_points));
EXPECT_THAT(std::visit(ExtractValueAsFloat, type.Scales()),
Pointwise(FloatEq(), ref_scales));
}
TEST_P(PerAxisTest, BaselineTypeWorks) {
const QuantizationPair& config = GetParam();
float scales[3] = {0.5f, 0.6f, 0.2f};
int32_t zero_points[3] = {3, 1, 2};
const QuantizedElementTypePerAxis element(config.storage_type, scales,
config.expressed_type, zero_points,
3u);
const auto baseline = BaselineType(element);
const auto extracted_zero_points =
std::visit(ExtractValueAsInt, baseline.ZeroPoints());
const auto extracted_scales =
std::visit(ExtractValueAsFloat, baseline.Scales());
EXPECT_EQ(baseline.StorageType(), element.StorageType());
EXPECT_EQ(baseline.ExpressedType(), element.ExpressedType());
EXPECT_EQ(baseline.QuantizedDimension(), element.QuantizedDimension());
EXPECT_THAT(extracted_zero_points, Each(0));
EXPECT_THAT(extracted_zero_points.size(), std::size(zero_points));
EXPECT_THAT(extracted_scales, Each(FloatEq(1.0f)));
EXPECT_THAT(extracted_scales.size(), std::size(scales));
}
INSTANTIATE_TEST_SUITE_P(PerAxis, PerAxisTest,
testing::ValuesIn(ValidQuantizationTypePairs()));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/quantized_tensor_element_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/quantized_tensor_element_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c512995-bf4a-4627-b748-73069394ca01 | cpp | tensorflow/tensorflow | elementwise_unary | tensorflow/lite/experimental/shlo/legacy/src/elementwise_unary.cc | tensorflow/lite/experimental/shlo/legacy/test/elementwise_unary_test.cc | #include <bit>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <version>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/bf16.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/f16.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
template <typename Value>
absl::Status CheckParameters(const Value& operand, Value& result) {
if (operand.baseline_type() != result.baseline_type()) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_type(operand) = baseline_type(result)");
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (!(operand.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError("Expected per-tensor quantization");
}
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value,
typename Op>
absl::Status ElementwiseUnaryOp(const Value& operand, Value& result, Op&& op) {
if (auto check = CheckParameters(operand, result); !check.ok()) {
return check;
}
using S = Storage<storage_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
size_t n = operand.num_elements();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != operand.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
for (size_t i = 0; i < n; ++i) {
auto x = S::Get(operand_buffer, i);
auto y = op(x);
S::Set(result_buffer, i, y);
}
} else {
static_assert(std::is_same_v<Value, QuantizedTensor>);
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
using ET = typename Storage<expressed_type>::Type;
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
for (size_t i = 0; i < n; ++i) {
auto operand_storage = S::Get(operand_buffer, i);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
operand_storage, operand_quant_param, result_scale_inv,
result_quant_param.zero_point, op);
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
#define DEFINE_ELEMENTWISE_UNARY_OP(name, element_type, expression) \
absl::Status name(const Tensor& operand, Tensor& result) { \
return ElementwiseUnaryOp<element_type, element_type>( \
operand, result, [](auto x) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name, storage_type, \
expressed_type, expression) \
absl::Status name(const QuantizedTensor& operand, QuantizedTensor& result) { \
return ElementwiseUnaryOp<storage_type, expressed_type>( \
operand, result, [](auto x) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_UNARY_OP_BOOL(name, expression) \
DEFINE_ELEMENTWISE_UNARY_OP(name##_i1, ElementType::kI1, expression);
#define DEFINE_ELEMENTWISE_UNARY_OP_INT(name, expression) \
DEFINE_ELEMENTWISE_UNARY_OP(name##_si8, ElementType::kSI8, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_si16, ElementType::kSI16, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_si32, ElementType::kSI32, expression);
#define DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(name, expression) \
DEFINE_ELEMENTWISE_UNARY_OP(name##_bf16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_f16, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_OP(name##_f32, ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si8_bf16, ElementType::kSI8, \
ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si8_f16, ElementType::kSI8, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si8_f32, ElementType::kSI8, \
ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP( \
name##_q_si16_bf16, ElementType::kSI16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si16_f16, ElementType::kSI16, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si16_f32, ElementType::kSI16, \
ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP( \
name##_q_si32_bf16, ElementType::kSI32, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si32_f16, ElementType::kSI32, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_UNARY_QUANTIZED_OP(name##_q_si32_f32, ElementType::kSI32, \
ElementType::kF32, expression);
#define CALL_UNARY_OP_BOOL_HELPER(name, operand, result) \
case ElementType::kI1: \
return name##_i1(operand, result);
#define CALL_UNARY_OP_INT_HELPER(name, operand, result) \
case ElementType::kSI8: \
return name##_si8(operand, result); \
case ElementType::kSI16: \
return name##_si16(operand, result); \
case ElementType::kSI32: \
return name##_si32(operand, result);
#define CALL_UNARY_OP_FLOAT_HELPER(name, operand, result) \
case ElementType::kBF16: \
return name##_bf16(operand, result); \
case ElementType::kF16: \
return name##_f16(operand, result); \
case ElementType::kF32: \
return name##_f32(operand, result);
#define CALL_UNARY_OP_BOOL_INT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_BOOL_HELPER(name, operand, result); \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_INT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_FLOAT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_FLOAT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_INT_FLOAT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
CALL_UNARY_OP_FLOAT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_OP_BOOL_INT_FLOAT(name, operand, result) \
{ \
auto element_type = operand.element_type(); \
switch (element_type) { \
CALL_UNARY_OP_BOOL_HELPER(name, operand, result); \
CALL_UNARY_OP_INT_HELPER(name, operand, result); \
CALL_UNARY_OP_FLOAT_HELPER(name, operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_UNARY_QUANTIZED_OP(name, operand, result) \
{ \
auto storage_type = operand.storage_type(); \
auto expressed_type = operand.expressed_type(); \
switch (storage_type) { \
case ElementType::kSI8: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si8_bf16(operand, result); \
case ElementType::kF16: \
return name##_q_si8_f16(operand, result); \
case ElementType::kF32: \
return name##_q_si8_f32(operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI16: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si16_bf16(operand, result); \
case ElementType::kF16: \
return name##_q_si16_f16(operand, result); \
case ElementType::kF32: \
return name##_q_si16_f32(operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI32: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si32_bf16(operand, result); \
case ElementType::kF16: \
return name##_q_si32_f16(operand, result); \
case ElementType::kF32: \
return name##_q_si32_f32(operand, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
default: \
return absl::InvalidArgumentError("Unexpected storage type"); \
} \
}
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_INT(Abs, ((x > 0) ? x : -x));
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Abs, ((x > 0) ? x : -x));
}
absl::Status Abs(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT_FLOAT(Abs, operand, result);
}
absl::Status Abs(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Abs, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Cbrt, std::cbrt(static_cast<float>(x)));
}
absl::Status Cbrt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Cbrt, operand, result);
}
absl::Status Cbrt(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Cbrt, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Ceil, std::ceil(static_cast<float>(x)));
}
absl::Status Ceil(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Ceil, operand, result);
}
absl::Status Ceil(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Ceil, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Cosine, std::cos(static_cast<float>(x)));
}
absl::Status Cosine(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Cosine, operand, result);
}
absl::Status Cosine(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Cosine, operand, result);
}
namespace {
template <typename Int>
inline Int CountLeadingZeros(Int x) {
using UInt = typename std::make_unsigned<Int>::type;
#if __cpp_lib_bitops >= 201907L
return std::countl_zero(static_cast<UInt>(x));
#else
if (!x) {
return 8 * sizeof(x);
}
Int result = 0;
auto mask = UInt(1) << (8 * (sizeof(x) - 1) + 7);
for (auto t = static_cast<UInt>(x); t > 0; t <<= 1) {
if (t & mask) break;
result++;
}
return result;
#endif
}
DEFINE_ELEMENTWISE_UNARY_OP_INT(CountLeadingZeros, CountLeadingZeros(x));
}
absl::Status CountLeadingZeros(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT(CountLeadingZeros, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Exponential, std::exp(static_cast<float>(x)));
}
absl::Status Exponential(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Exponential, operand, result);
}
absl::Status Exponential(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Exponential, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(ExponentialMinusOne,
std::expm1(static_cast<float>(x)));
}
absl::Status ExponentialMinusOne(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(ExponentialMinusOne, operand, result);
}
absl::Status ExponentialMinusOne(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(ExponentialMinusOne, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Floor, std::floor(static_cast<float>(x)));
}
absl::Status Floor(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Floor, operand, result);
}
absl::Status Floor(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Floor, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Log, std::log(static_cast<float>(x)));
}
absl::Status Log(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Log, operand, result);
}
absl::Status Log(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Log, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(LogPlusOne,
std::log1p(static_cast<float>(x)));
}
absl::Status LogPlusOne(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(LogPlusOne, operand, result);
}
absl::Status LogPlusOne(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(LogPlusOne, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Logistic,
1.0f / (1.0f +
std::exp(static_cast<float>(-x))));
}
absl::Status Logistic(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Logistic, operand, result);
}
absl::Status Logistic(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Logistic, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_INT(Negate, -x);
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Negate, -x);
}
absl::Status Negate(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT_FLOAT(Negate, operand, result);
}
absl::Status Negate(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Negate, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_BOOL(Not, !x);
DEFINE_ELEMENTWISE_UNARY_OP_INT(Not, ~x);
}
absl::Status Not(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_BOOL_INT(Not, operand, result);
}
namespace {
template <typename Int>
Int Popcount(Int x) {
#if __cpp_lib_bitops >= 201907L
return std::popcount(static_cast<uint32_t>(x));
#else
using UInt = typename std::make_unsigned<Int>::type;
Int result = 0;
UInt mask = 0x1;
for (auto t = static_cast<UInt>(x); t > 0; t >>= 1) {
result += (t & mask);
}
return result;
#endif
}
DEFINE_ELEMENTWISE_UNARY_OP_INT(Popcnt, Popcount(x));
}
absl::Status Popcnt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT(Popcnt, operand, result);
}
namespace {
template <typename Float>
inline Float RoundNearestAfz(Float x) {
return std::round(static_cast<float>(x));
}
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(RoundNearestAfz, RoundNearestAfz(x));
}
absl::Status RoundNearestAfz(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(RoundNearestAfz, operand, result);
}
absl::Status RoundNearestAfz(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(RoundNearestAfz, operand, result);
}
namespace {
template <typename Float>
inline Float RoundNearestEven(Float x) {
return x - static_cast<Float>(std::remainder(static_cast<float>(x), 1.0f));
}
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(RoundNearestEven, RoundNearestEven(x));
}
absl::Status RoundNearestEven(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(RoundNearestEven, operand, result);
}
absl::Status RoundNearestEven(const QuantizedTensor& operand,
QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(RoundNearestEven, operand, result);
}
namespace {
template <typename Float>
inline Float Rsqrt(Float x) {
return Float{1} / static_cast<Float>(std::sqrt(static_cast<float>(x)));
}
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Rsqrt, Rsqrt(x));
}
absl::Status Rsqrt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Rsqrt, operand, result);
}
absl::Status Rsqrt(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Rsqrt, operand, result);
}
namespace {
template <typename Number>
inline Number Sign(Number x) {
if constexpr (std::is_integral<Number>::value) {
return x < 0 ? -1 : (x > 0 ? 1 : 0);
} else {
static_assert(std::is_floating_point<Number>::value ||
std::is_same_v<Number, BF16> || std::is_same_v<Number, F16>);
if (std::isnan(x)) {
return NAN;
}
return (x < 0 ? -1 : (x > 0 ? 1 : 0));
}
}
DEFINE_ELEMENTWISE_UNARY_OP_INT(Sign, Sign(x));
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Sign, Sign(x));
}
absl::Status Sign(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_INT_FLOAT(Sign, operand, result);
}
absl::Status Sign(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Sign, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Sine, std::sin(static_cast<float>(x)));
}
absl::Status Sine(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Sine, operand, result);
}
absl::Status Sine(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Sine, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Sqrt, std::sqrt(static_cast<float>(x)));
}
absl::Status Sqrt(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Sqrt, operand, result);
}
absl::Status Sqrt(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Sqrt, operand, result);
}
namespace {
DEFINE_ELEMENTWISE_UNARY_OP_FLOAT(Tanh, std::tanh(static_cast<float>(x)));
}
absl::Status Tanh(const Tensor& operand, Tensor& result) {
CALL_UNARY_OP_FLOAT(Tanh, operand, result);
}
absl::Status Tanh(const QuantizedTensor& operand, QuantizedTensor& result) {
CALL_UNARY_QUANTIZED_OP(Tanh, operand, result);
}
} | #include <cmath>
#include <cstdint>
#include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/matchers.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(absl::Status (*op)(const Tensor&, Tensor&),
std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<element_type>::Type>&& input_values,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor input(TensorType(Shape(shape), element_type), std::data(input_values));
Tensor expected(TensorType(Shape(shape), element_type),
std::data(expected_values));
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(op(input, result));
EXPECT_THAT(result, IsAlmostSame(expected)) << "input: " << input;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
absl::Status (*op)(const QuantizedTensor&, QuantizedTensor&),
std::initializer_list<DimensionSize>&& shape,
QuantizedParameter&& quantized_parameter,
std::vector<typename Storage<expressed_type>::Type>&& input_values,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto input_quant_values = QuantizeVector<storage_type, expressed_type>(
input_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor input(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
input_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(op(input, result));
EXPECT_THAT(result, IsAlmostSame(expected)) << "input: " << input;
}
TEST(ElementwiseUnary, Abs) {
test<ElementType::kSI8>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kSI16>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kSI32>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kBF16>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kF16>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
test<ElementType::kF32>(Abs, {5}, {0, 1, -2, 3, -4}, {0, 1, 2, 3, 4});
}
TEST(ElementwiseBinary, AbsQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Abs, {5}, {.scale = 1, .zero_point = 0}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF16>(
Abs, {5}, {.scale = 1e-1, .zero_point = 1}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF32>(
Abs, {5}, {.scale = 1e-1, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kF32>(
Abs, {5}, {.scale = 1e-3, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, 1, 2, 3, 4});
}
TEST(ElementwiseUnary, Cbrt) {
test<ElementType::kBF16>(
Cbrt, {4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kF16>(
Cbrt, {4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kF32>(
Cbrt, {4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
}
TEST(ElementwiseUnary, CbrtQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kSI8, ElementType::kF16>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = -2}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kSI8, ElementType::kF32>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
test<ElementType::kSI16, ElementType::kF32>(
Cbrt, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1, -2, 3},
{0, 1, -1.25992104989487316476f, 1.44224957030740838232f});
}
TEST(ElementwiseUnary, Ceil) {
test<ElementType::kBF16>(Ceil, {4}, {0, 1.1, -2.7, 3.5}, {0, 2, -2, 4});
test<ElementType::kF16>(Ceil, {4}, {0, 1.1, -2.7, 3.5}, {0, 2, -2, 4});
test<ElementType::kF32>(Ceil, {4}, {0, 1.1, -2.7, 3.5}, {0, 2, -2, 4});
}
TEST(ElementwiseUnary, CeilQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Ceil, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -2.7, 3.5},
{0, 2, -2, 4});
test<ElementType::kSI8, ElementType::kF16>(
Ceil, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1.1, -2.7, 3.5},
{0, 2, -2, 4});
test<ElementType::kSI8, ElementType::kF32>(
Ceil, {4}, {.scale = 1e-1, .zero_point = -4}, {0, 1.1, -2.7, 3.5},
{0, 2, -2, 4});
test<ElementType::kSI16, ElementType::kF32>(
Ceil, {4}, {.scale = 1e-2, .zero_point = -4}, {0, 1.11, -2.77, 3.55},
{0, 2, -2, 4});
}
TEST(ElementwiseUnary, Cosine) {
test<ElementType::kBF16>(Cosine, {4}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kF16>(Cosine, {4}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kF32>(Cosine, {4}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
}
TEST(ElementwiseUnary, CosineQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Cosine, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kSI8, ElementType::kF16>(
Cosine, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kSI8, ElementType::kF32>(
Cosine, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
test<ElementType::kSI16, ElementType::kF32>(
Cosine, {4}, {.scale = 1e-4, .zero_point = 0}, {0, 1.1, -1.1, 2.3},
{1, 0.45359612142557738777f, 0.45359612142557738777f,
-0.66627602127982419331f});
}
TEST(ElementwiseUnary, CountLeadingZeros) {
test<ElementType::kSI8>(CountLeadingZeros, {4}, {0, 1, 127, -1},
{8, 7, 1, 0});
test<ElementType::kSI16>(CountLeadingZeros, {4}, {0, 1, 32767, -1},
{16, 15, 1, 0});
test<ElementType::kSI32>(CountLeadingZeros, {4}, {0, 1, 2147483647, -1},
{32, 31, 1, 0});
}
TEST(ElementwiseUnary, Exponential) {
test<ElementType::kBF16>(Exponential, {4}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kF16>(Exponential, {4}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kF32>(Exponential, {4}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
}
TEST(ElementwiseUnary, ExponentialQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Exponential, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF16>(
Exponential, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF32>(
Exponential, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
test<ElementType::kSI16, ElementType::kF32>(
Exponential, {4}, {.scale = 1e-2, .zero_point = 0}, {0, 0.5, 1, 1.5},
{1, 1.64872127070012814684f, 2.71828182845904523536f,
4.48168907033806482260f});
}
TEST(ElementwiseUnary, ExponentialMinusOne) {
test<ElementType::kBF16>(ExponentialMinusOne, {4}, {0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kF16>(ExponentialMinusOne, {4}, {0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kF32>(ExponentialMinusOne, {4}, {0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
}
TEST(ElementwiseUnary, ExponentialMinusOneQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
ExponentialMinusOne, {4}, {.scale = 1e-1, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF16>(
ExponentialMinusOne, {4}, {.scale = 1e-1, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kSI8, ElementType::kF32>(
ExponentialMinusOne, {4}, {.scale = 1e-1, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
test<ElementType::kSI16, ElementType::kF32>(
ExponentialMinusOne, {4}, {.scale = 1e-2, .zero_point = 0},
{0, 0.5, 1, 1.5},
{0, 0.64872127070012814684f, 1.71828182845904523536f,
3.48168907033806482260f});
}
TEST(ElementwiseUnary, Floor) {
test<ElementType::kBF16>(Floor, {4}, {0, 1.1, -2.7, 3.5}, {0, 1, -3, 3});
test<ElementType::kF16>(Floor, {4}, {0, 1.1, -2.7, 3.5}, {0, 1, -3, 3});
test<ElementType::kF32>(Floor, {4}, {0, 1.1, -2.7, 3.5}, {0, 1, -3, 3});
}
TEST(ElementwiseUnary, FloorQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Floor, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1.1, -2.7, 3.5},
{0, 1, -3, 3});
test<ElementType::kSI8, ElementType::kF16>(
Floor, {4}, {.scale = 1e-1, .zero_point = 4}, {0, 1.1, -2.7, 3.5},
{0, 1, -3, 3});
test<ElementType::kSI8, ElementType::kF32>(
Floor, {4}, {.scale = 1e-1, .zero_point = -4}, {0, 1.1, -2.7, 3.5},
{0, 1, -3, 3});
test<ElementType::kSI16, ElementType::kF32>(
Floor, {4}, {.scale = 1e-2, .zero_point = -4}, {0, 1.11, -2.77, 3.55},
{0, 1, -3, 3});
}
TEST(ElementwiseUnary, Log) {
test<ElementType::kBF16>(Log, {4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF16>(Log, {4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF32>(Log, {4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
}
TEST(ElementwiseUnary, LogQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Log, {4}, {.scale = 1e-1, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF16>(
Log, {4}, {.scale = 1e-1, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF32>(
Log, {4}, {.scale = 1e-1, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI16, ElementType::kF32>(
Log, {4}, {.scale = 1e-3, .zero_point = -4}, {0.1, 0.5, 1, 1.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
}
TEST(ElementwiseUnary, LogPlusOne) {
test<ElementType::kBF16>(LogPlusOne, {4}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF16>(LogPlusOne, {4}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
test<ElementType::kF32>(LogPlusOne, {4}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f,
0, 0.40546510810816438197f});
}
TEST(ElementwiseUnary, LogPlusOneQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
LogPlusOne, {4}, {.scale = 1e-1, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF16>(
LogPlusOne, {4}, {.scale = 1e-1, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI8, ElementType::kF32>(
LogPlusOne, {4}, {.scale = 1e-1, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
test<ElementType::kSI16, ElementType::kF32>(
LogPlusOne, {4}, {.scale = 1e-4, .zero_point = 0}, {-0.9, -0.5, 0, 0.5},
{-2.30258509299404568401f, -0.69314718055994530941f, 0,
0.40546510810816438197f});
}
TEST(ElementwiseUnary, Logistic) {
test<ElementType::kBF16>(Logistic, {4}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f,
0.5, 0.62245933120185456464f});
test<ElementType::kF16>(Logistic, {4}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f,
0.5, 0.62245933120185456464f});
test<ElementType::kF32>(Logistic, {4}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f,
0.5, 0.62245933120185456464f});
}
TEST(ElementwiseUnary, LogisticQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Logistic, {4}, {.scale = 1e-1, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
test<ElementType::kSI8, ElementType::kF16>(
Logistic, {4}, {.scale = 1e-1, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
test<ElementType::kSI8, ElementType::kF32>(
Logistic, {4}, {.scale = 1e-1, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
test<ElementType::kSI16, ElementType::kF32>(
Logistic, {4}, {.scale = 1e-3, .zero_point = 0}, {-1, -0.5, 0, 0.5},
{0.26894142136999512074f, 0.37754066879814543536f, 0.5,
0.62245933120185456464f});
}
TEST(ElementwiseUnary, Negate) {
test<ElementType::kSI8>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kSI16>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kSI32>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kBF16>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kF16>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
test<ElementType::kF32>(Negate, {5}, {0, 1, -2, 3, -4}, {0, -1, 2, -3, 4});
}
TEST(ElementwiseBinary, NegateQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Negate, {5}, {.scale = 1, .zero_point = 0}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
test<ElementType::kSI8, ElementType::kF16>(
Negate, {5}, {.scale = 1e-1, .zero_point = 1}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
test<ElementType::kSI8, ElementType::kF32>(
Negate, {5}, {.scale = 1e-1, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
test<ElementType::kSI16, ElementType::kF32>(
Negate, {5}, {.scale = 1e-3, .zero_point = -1}, {0, 1, -2, 3, -4},
{0, -1, 2, -3, 4});
}
TEST(ElementwiseUnary, Not) {
test<ElementType::kI1>(Not, {2}, {0, 1}, {1, 0});
test<ElementType::kSI8>(Not, {5}, {-2, -1, 0, 1, 2},
{1, 0, int8_t(0xFF), int8_t(0xFE), int8_t(0xFD)});
test<ElementType::kSI16>(
Not, {5}, {-2, -1, 0, 1, 2},
{1, 0, int16_t(0xFFFF), int16_t(0xFFFE), int16_t(0xFFFD)});
test<ElementType::kSI32>(
Not, {5}, {-2, -1, 0, 1, 2},
{1, 0, int32_t(0xFFFFFFFFU), int32_t(0xFFFFFFFEU), int32_t(0xFFFFFFFDU)});
}
TEST(ElementwiseUnary, Popcnt) {
test<ElementType::kSI8>(Popcnt, {4}, {0, 1, 2, 127}, {0, 1, 1, 7});
test<ElementType::kSI16>(Popcnt, {4}, {0, 1, 2, 127}, {0, 1, 1, 7});
test<ElementType::kSI32>(Popcnt, {4}, {0, 1, 2, 127}, {0, 1, 1, 7});
}
TEST(ElementwiseUnary, RoundNearestAfz) {
test<ElementType::kBF16>(RoundNearestAfz, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kF16>(RoundNearestAfz, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kF32>(RoundNearestAfz, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-3.0, 0.0, 1.0, 1.0, 3.0});
}
TEST(ElementwiseBinary, RoundNearestAfzQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
RoundNearestAfz, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kSI8, ElementType::kF16>(
RoundNearestAfz, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kSI8, ElementType::kF32>(
RoundNearestAfz, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
test<ElementType::kSI16, ElementType::kF32>(
RoundNearestAfz, {5}, {.scale = 1e-2, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-3.0, 0.0, 1.0, 1.0, 3.0});
}
TEST(ElementwiseUnary, RoundNearestEven) {
test<ElementType::kBF16>(RoundNearestEven, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kF16>(RoundNearestEven, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kF32>(RoundNearestEven, {5}, {-2.5, 0.4, 0.5, 0.6, 2.5},
{-2.0, 0.0, 0.0, 1.0, 2.0});
}
TEST(ElementwiseBinary, RoundNearestEvenQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
RoundNearestEven, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kSI8, ElementType::kF16>(
RoundNearestEven, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kSI8, ElementType::kF32>(
RoundNearestEven, {5}, {.scale = 1e-1, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
test<ElementType::kSI16, ElementType::kF32>(
RoundNearestEven, {5}, {.scale = 1e-2, .zero_point = 0},
{-2.5, 0.4, 0.5, 0.6, 2.5}, {-2.0, 0.0, 0.0, 1.0, 2.0});
}
TEST(ElementwiseUnary, Rsqrt) {
test<ElementType::kBF16>(Rsqrt, {4}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
test<ElementType::kF16>(Rsqrt, {4}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
test<ElementType::kF32>(Rsqrt, {4}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
}
TEST(ElementwiseUnary, RsqrtQuantized) {
test<ElementType::kSI16, ElementType::kF32>(
Rsqrt, {4}, {.scale = 1e-3, .zero_point = 0}, {1.0, 4.0, 9.0, 25.0},
{1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 5.0});
}
TEST(ElementwiseUnary, Sign) {
test<ElementType::kSI8>(Sign, {3}, {-2, 0, 2}, {-1, 0, 1});
test<ElementType::kSI16>(Sign, {3}, {-2, 0, 2}, {-1, 0, 1});
test<ElementType::kSI32>(Sign, {3}, {-2, 0, 2}, {-1, 0, 1});
test<ElementType::kBF16>(
Sign, {8}, {+NAN, -NAN, +INFINITY, -INFINITY, -2.0, -0.0, +0.0, 2.0},
{NAN, NAN, 1, -1, -1, 0, 0, 1});
test<ElementType::kF16>(
Sign, {8}, {+NAN, -NAN, +INFINITY, -INFINITY, -2.0, -0.0, +0.0, 2.0},
{NAN, NAN, 1, -1, -1, 0, 0, 1});
test<ElementType::kF32>(
Sign, {8}, {+NAN, -NAN, +INFINITY, -INFINITY, -2.0, -0.0, +0.0, 2.0},
{NAN, NAN, 1, -1, -1, 0, 0, 1});
}
TEST(ElementwiseUnary, SignQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Sign, {4}, {.scale = 1e-1, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
test<ElementType::kSI8, ElementType::kF16>(
Sign, {4}, {.scale = 1e-1, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
test<ElementType::kSI8, ElementType::kF32>(
Sign, {4}, {.scale = 1e-1, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
test<ElementType::kSI16, ElementType::kF32>(
Sign, {4}, {.scale = 1e-2, .zero_point = 0}, {-2.0, -0.0, +0.0, 2.0},
{-1, 0, 0, 1});
}
TEST(ElementwiseUnary, Sine) {
test<ElementType::kBF16>(Sine, {5}, {0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI},
{0, 1, 0, -1, 0});
test<ElementType::kF16>(Sine, {5}, {0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI},
{0, 1, 0, -1, 0});
test<ElementType::kF32>(Sine, {5}, {0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI},
{0, 1, 0, -1, 0});
}
TEST(ElementwiseUnary, SineQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Sine, {5}, {.scale = 1e-1, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
test<ElementType::kSI8, ElementType::kF16>(
Sine, {5}, {.scale = 1e-1, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
test<ElementType::kSI8, ElementType::kF32>(
Sine, {5}, {.scale = 1e-1, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
test<ElementType::kSI16, ElementType::kF32>(
Sine, {5}, {.scale = 1e-2, .zero_point = 0},
{0, M_PI_2, M_PI, 3 * M_PI_2, 2 * M_PI}, {0, 1, 0, -1, 0});
}
TEST(ElementwiseUnary, Sqrt) {
test<ElementType::kBF16>(Sqrt, {4}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kF16>(Sqrt, {4}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kF32>(Sqrt, {4}, {0, 1, 4, 9}, {0, 1, 2, 3});
}
TEST(ElementwiseUnary, SqrtQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Sqrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kSI8, ElementType::kF16>(
Sqrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kSI8, ElementType::kF32>(
Sqrt, {4}, {.scale = 1e-1, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
test<ElementType::kSI16, ElementType::kF32>(
Sqrt, {4}, {.scale = 1e-2, .zero_point = 0}, {0, 1, 4, 9}, {0, 1, 2, 3});
}
TEST(ElementwiseUnary, Tanh) {
test<ElementType::kBF16>(Tanh, {3}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kF16>(Tanh, {3}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kF32>(Tanh, {3}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
}
TEST(ElementwiseUnary, TanhQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Tanh, {3}, {.scale = 1e-1, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kSI8, ElementType::kF16>(
Tanh, {3}, {.scale = 1e-1, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kSI8, ElementType::kF32>(
Tanh, {3}, {.scale = 1e-1, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
test<ElementType::kSI16, ElementType::kF32>(
Tanh, {3}, {.scale = 1e-2, .zero_point = 0}, {-1, 0, 1},
{-0.76159416, 0.0, 0.76159416});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/elementwise_unary.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/elementwise_unary_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04d01f92-5183-4ab5-b5f1-fbc14e745c81 | cpp | tensorflow/tensorflow | compare | tensorflow/lite/experimental/shlo/ops/compare.cc | tensorflow/lite/experimental/shlo/ops/compare_test.cc | #include "tensorflow/lite/experimental/shlo/ops/compare.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace {
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeCompare(F&& func, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const DimensionSize num_elements = lhs.NumElements();
const StorageT lhs_zero_point =
lhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT lhs_scale =
lhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT rhs_zero_point =
rhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT rhs_scale =
rhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT* lhs_data = lhs.GetDataAs<storage_type>();
const StorageT* rhs_data = rhs.GetDataAs<storage_type>();
bool* output_data = output.GetDataAs<DataType::kI1>();
for (DimensionSize i = 0; i < num_elements;
++i, ++lhs_data, ++rhs_data, ++output_data) {
const ExpressedT dequantized_lhs =
Dequantize(*lhs_data, lhs_zero_point, lhs_scale);
const ExpressedT dequantized_rhs =
Dequantize(*rhs_data, rhs_zero_point, rhs_scale);
*output_data = func(dequantized_lhs, dequantized_rhs);
}
}
}
CompareOp Create(CompareOp::Attributes attributes) {
return {.attributes = attributes};
}
absl::Status Prepare(CompareOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("compare"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("compare"), output, IsBoolTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("compare"), lhs, rhs));
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
return absl::OkStatus();
}
absl::Status Evaluate(CompareOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
#define SHLO_REF_COMPARISON_DIRECTION_CASE(DIRECTION, COMPARISON_OP) \
case CompareOp::ComparisonDirection::DIRECTION: { \
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) { \
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization, \
lhs.tensor_element_type(), COMPARISON_OP, lhs, \
rhs, output); \
} else if (IsQuantizedPerTensorTensor(lhs)) { \
DISPATCH_QUANTIZED( \
DequantizeCompare, \
lhs.quantized_per_tensor_element_type().StorageType(), \
lhs.quantized_per_tensor_element_type().ExpressedType(), \
COMPARISON_OP, lhs, rhs, output) \
} \
break; \
}
switch (op.attributes.comparison_direction) {
SHLO_REF_COMPARISON_DIRECTION_CASE(kEq, std::equal_to<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kNe, std::not_equal_to<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kGe, std::greater_equal<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kGt, std::greater<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kLe, std::less_equal<void>());
SHLO_REF_COMPARISON_DIRECTION_CASE(kLt, std::less<void>());
}
return absl::FailedPreconditionError(
"stablehlo.compare: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/compare.h"
#include <string>
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CompareOp> {
static std::string Get() { return "Compare"; }
};
struct Compare {
template <class T>
constexpr bool operator()(const T a, const T b) const {
switch (comparison_direction) {
case CompareOp::ComparisonDirection::kEq:
return a == b;
case CompareOp::ComparisonDirection::kNe:
return a != b;
case CompareOp::ComparisonDirection::kGe:
return a >= b;
case CompareOp::ComparisonDirection::kGt:
return a > b;
case CompareOp::ComparisonDirection::kLe:
return a <= b;
case CompareOp::ComparisonDirection::kLt:
return a < b;
}
return false;
}
CompareOp::ComparisonDirection comparison_direction;
};
const char* ToString(CompareOp::ComparisonDirection comparison_direction) {
switch (comparison_direction) {
case CompareOp::ComparisonDirection::kEq:
return "eq";
case CompareOp::ComparisonDirection::kNe:
return "ne";
case CompareOp::ComparisonDirection::kGe:
return "ge";
case CompareOp::ComparisonDirection::kGt:
return "gt";
case CompareOp::ComparisonDirection::kLe:
return "le";
case CompareOp::ComparisonDirection::kLt:
return "lt";
}
}
template <>
struct SupportedOpAttributes<CompareOp> {
static CompareOp::Attributes Get() {
return {.comparison_direction = CompareOp::ComparisonDirection::kEq};
}
};
template <>
struct SupportedOpOutputDataType<CompareOp> {
static constexpr DataType kStorageType = DataType::kI1;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Compare, BinaryElementwiseOpShapePropagationTest,
CompareOp, TestParamNames);
GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(
BinaryElementwiseSameBaselineElementTypeConstraintTest);
template <class Op, class SupportedTypes>
using CompareBaselineConstraintTypesCrossProduct =
MapTypes<OpTupleFactory<Op>::template WithOp,
FilterTypes<NegatePred<SameTypes>::template Predicate,
CrossProductTypes<SupportedTypes, SupportedTypes>>>;
using CompareBaselineContraintTypes =
CompareBaselineConstraintTypesCrossProduct<
CompareOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes,
BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
template <class T>
class CompareSameBaselineElementTypeConstraintTest : public ::testing::Test {};
TYPED_TEST_SUITE(CompareSameBaselineElementTypeConstraintTest,
CompareBaselineContraintTypes, TestParamNames);
TYPED_TEST(CompareSameBaselineElementTypeConstraintTest,
DifferentInputOutputStorageTypesRaiseAnError) {
using Op = std::tuple_element_t<0, TypeParam>;
using LhsTypeDesc = std::tuple_element_t<1, TypeParam>;
using RhsTypeDesc = std::tuple_element_t<2, TypeParam>;
const Shape shape({2, 3, 4});
Tensor lhs_tensor{.type = TensorTypeFor(LhsTypeDesc{}, shape),
.data = nullptr};
Tensor rhs_tensor{.type = TensorTypeFor(RhsTypeDesc{}, shape),
.data = nullptr};
Tensor output_tensor{.type = TensorTypeFor(TestParam<DataType::kI1>{}, shape),
.data = nullptr};
auto op = Create(typename Op::Attributes{});
const absl::Status status =
Prepare(op, lhs_tensor, rhs_tensor, output_tensor);
EXPECT_THAT(status, shlo_ref::testing::StatusIs(
absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(
status.message(),
::testing::ContainsRegex(
"stablehlo.[_a-z]+: baseline type constraint is not satisfied"));
}
using UnsupportedTypes =
WithOpTypes<CompareOp, ConcatTypes<PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Compare, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, ArithmeticTestTypes>;
template <class T>
struct CompareTest : ::testing::Test {};
TYPED_TEST_SUITE(CompareTest, SupportedTypes, TestParamNames);
TYPED_TEST(CompareTest, SupportedTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
absl::BitGen bit_gen;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = DataType::kI1},
.data = output_data.data()};
const CompareOp::ComparisonDirection comparison_direction =
static_cast<CompareOp::ComparisonDirection>(absl::Uniform(bit_gen, 0, 6));
Compare compare_ref{comparison_direction};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), compare_ref);
auto op = Create(
CompareOp::Attributes{.comparison_direction = comparison_direction});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedCompareTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCompareTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCompareTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
absl::BitGen bit_gen;
const Shape shape({2, 2, 2});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, zero_point + 1,
zero_point + 5);
Vector<StorageType<DataType::kI1>> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = DataType::kI1},
.data = output_data.data()};
const CompareOp::ComparisonDirection comparison_direction =
CompareOp::ComparisonDirection::kEq;
Compare compare_ref{comparison_direction};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale, compare_ref](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
return compare_ref(dequantized_lhs, dequantized_rhs);
});
auto op = Create(
CompareOp::Attributes{.comparison_direction = comparison_direction});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data))
<< "lhs " << ::testing::PrintToString(lhs_data) << "\n"
<< "rhs " << ::testing::PrintToString(rhs_data) << "\n"
<< "dir " << ToString(comparison_direction) << "\n";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/compare.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/compare_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03b95d9e-0506-4114-a488-66d1d3a06da7 | cpp | tensorflow/tensorflow | iota | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/iota.cc | tensorflow/lite/experimental/shlo/legacy/test/iota_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/iota.h"
#include <cstdint>
#include <tuple>
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
class LegalizeIota : public OpConversionPattern<mhlo::IotaOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::IotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
bool IsIotaLegal(mhlo::IotaOp op) {
auto e_type = llvm::cast<ShapedType>(op.getType()).getElementType();
return !(e_type.isF32() || e_type.isSignlessInteger(32) ||
e_type.isSignlessInteger(64));
}
std::tuple<DenseElementsAttr, DenseElementsAttr, DenseElementsAttr>
BuildRangeParams(Type e_type, int64_t iota_dim_size, OpBuilder& b) {
if (e_type.isInteger(32)) {
return std::tuple(BuildScalarDense<int>(e_type, 0),
BuildScalarDense<int>(e_type, iota_dim_size),
BuildScalarDense<int>(e_type, 1));
} else if (e_type.isInteger(64)) {
return std::tuple(BuildScalarDense<int64_t>(e_type, 0),
BuildScalarDense<int64_t>(e_type, iota_dim_size),
BuildScalarDense<int64_t>(e_type, 1));
}
return std::tuple(BuildScalarDense<float>(e_type, 0.0),
BuildScalarDense<float>(e_type, iota_dim_size),
BuildScalarDense<float>(e_type, 1.0));
}
LogicalResult LegalizeIota::matchAndRewrite(
mhlo::IotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsIotaLegal(op)) {
return rewriter.notifyMatchFailure(op, "Must be i32, i64 or f32");
}
auto type = llvm::cast<ShapedType>(op.getType());
auto e_type = type.getElementType();
const int64_t iota_dim_size = type.getDimSize(op.getIotaDimension());
auto [start, limit, delta] =
BuildRangeParams(e_type, iota_dim_size, rewriter);
auto start_op = rewriter.create<arith::ConstantOp>(op->getLoc(), start);
auto limit_op = rewriter.create<arith::ConstantOp>(op->getLoc(), limit);
auto delta_op = rewriter.create<arith::ConstantOp>(op->getLoc(), delta);
auto range_type = RankedTensorType::get({iota_dim_size}, e_type);
auto range_op = rewriter.create<TFL::RangeOp>(op->getLoc(), range_type,
start_op, limit_op, delta_op);
if (type.getRank() == 1) {
rewriter.replaceOp(op, range_op);
return success();
}
llvm::SmallVector<int64_t> reshape_shape(type.getRank(), 1);
reshape_shape[op.getIotaDimension()] = iota_dim_size;
Value reshape_shape_cst = rewriter.create<arith::ConstantOp>(
op->getLoc(), rewriter.getI64TensorAttr(reshape_shape));
reshape_shape_cst = rewriter.create<TFL::CastOp>(
op->getLoc(),
llvm::cast<ShapedType>(reshape_shape_cst.getType())
.clone(rewriter.getI32Type()),
reshape_shape_cst);
auto reshape_type = RankedTensorType::get(reshape_shape, e_type);
auto reshape_op = rewriter.create<TFL::ReshapeOp>(
op->getLoc(), reshape_type, range_op, reshape_shape_cst);
auto broad_cast_shape_cst = rewriter.create<arith::ConstantOp>(
op->getLoc(), rewriter.getI64TensorAttr(type.getShape()));
rewriter.replaceOpWithNewOp<TFL::BroadcastToOp>(op, type, reshape_op,
broad_cast_shape_cst);
return success();
}
class LegalizeDynamicIotaOp : public OpConversionPattern<mhlo::DynamicIotaOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::DynamicIotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
bool IsDynamicIotaLegal(mhlo::DynamicIotaOp op) {
auto type = llvm::cast<ShapedType>(op.getType());
auto element_type = type.getElementType();
return (!element_type.isF32() && !element_type.isSignlessInteger(32) &&
!element_type.isSignlessInteger(64)) ||
type.getRank() > 1 || op.getIotaDimension() != 0;
}
LogicalResult LegalizeDynamicIotaOp::matchAndRewrite(
mhlo::DynamicIotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsDynamicIotaLegal(op)) {
return failure();
}
auto type = llvm::cast<ShapedType>(op.getType());
Type element_type = type.getElementType();
auto [start, unused_limit, delta] =
BuildRangeParams(element_type, 0, rewriter);
auto start_op = rewriter.create<arith::ConstantOp>(op.getLoc(), start);
auto delta_op = rewriter.create<arith::ConstantOp>(op.getLoc(), delta);
auto output_shape = op.getOperand();
if (mlir::isa<FloatType>(element_type)) {
auto cast_type =
mlir::cast<ShapedType>(output_shape.getType()).clone(element_type);
output_shape =
rewriter.create<TFL::CastOp>(op.getLoc(), cast_type, output_shape);
}
DenseIntElementsAttr scalar_attr = DenseIntElementsAttr::get(
RankedTensorType::get({0}, rewriter.getI32Type()),
llvm::ArrayRef<int32_t>({}));
auto scalar_shape =
rewriter.create<arith::ConstantOp>(op.getLoc(), scalar_attr);
auto limit_scalar = rewriter.create<TFL::ReshapeOp>(
op.getLoc(), RankedTensorType::get({}, element_type), output_shape,
scalar_shape);
const uint64_t dimension = op.getIotaDimension();
auto range_type =
RankedTensorType::get({type.getShape()[dimension]}, element_type);
rewriter.replaceOpWithNewOp<TFL::RangeOp>(op, range_type, start_op,
limit_scalar, delta_op);
return success();
}
}
void PopulateIotaPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeIota, LegalizeDynamicIotaOp>(ctx);
target.addDynamicallyLegalOp<mhlo::IotaOp>(IsIotaLegal);
target.addDynamicallyLegalOp<mhlo::DynamicIotaOp>(IsDynamicIotaLegal);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(std::initializer_list<DimensionSize>&& shape,
DimensionSize iota_dimension,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor expected(TensorType(Shape(shape), element_type),
expected_values.data());
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(Iota(iota_dimension, result));
EXPECT_EQ(result, expected) << "\niota_dimension: " << iota_dimension;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
QuantizedParameter&& quantized_parameter,
std::initializer_list<DimensionSize>&& shape, DimensionSize iota_dimension,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
decltype(expected_quant_values) result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(Iota(iota_dimension, result));
EXPECT_EQ(result, expected) << "\niota_dimension: " << iota_dimension;
}
TEST(Iota, Unquantized) {
test<ElementType::kSI8>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kBF16>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kBF16>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kF16>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kF16>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kF32>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kF32>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
}
TEST(Iota, Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/iota.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/iota_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9fb23443-70bb-4a1a-b176-8ffde412eac7 | cpp | tensorflow/tensorflow | clamp | tensorflow/lite/experimental/shlo/legacy/src/clamp.cc | tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc | #include <algorithm>
#include <cstddef>
#include <type_traits>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
template <typename Value>
absl::Status CheckParameters(const Value& min, const Value& operand,
const Value& max, Value& result) {
if (!(min.rank() == 0 or min.shape() == operand.shape())) {
return absl::InvalidArgumentError(
"Constraint violation: rank(min) = 0 or shape(min) = shape(operand)");
} else if (!(max.rank() == 0 or max.shape() == operand.shape())) {
return absl::InvalidArgumentError(
"Constraint violation: rank(max) = 0 or shape(max) = shape(operand)");
} else if (!(min.baseline_element_type() ==
operand.baseline_element_type() and
min.baseline_element_type() == max.baseline_element_type())) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_element_type(min) = "
"baseline_element_type(operand) = baseline_element_type(max)");
} else if (!(operand.baseline_type() == result.baseline_type())) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_type(operand) = baseline_type(result)");
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (!(min.is_per_tensor_quantized() and max.is_per_tensor_quantized() and
operand.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError("Expected per-tensor quantization");
}
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value>
absl::Status Clamp(const Value& min, const Value& operand, const Value& max,
Value& result) {
if (auto check = CheckParameters(min, operand, max, result); !check.ok()) {
return check;
}
using S = Storage<storage_type>;
const bool min_is_tensor = (min.rank() > 0);
const bool max_is_tensor = (max.rank() > 0);
const size_t n = result.num_elements();
auto operand_buffer = operand.buffer();
auto min_buffer = min.buffer();
auto max_buffer = max.buffer();
auto result_buffer = result.buffer();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != result.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
typename S::Type min_value;
typename S::Type max_value;
for (size_t i = 0; i < n; ++i) {
if (min_is_tensor || (i == 0)) {
min_value = S::Get(min_buffer, i);
}
if (max_is_tensor || (i == 0)) {
max_value = S::Get(max_buffer, i);
}
auto operand_value = S::Get(operand_buffer, i);
auto result_value =
std::min(max_value, std::max(min_value, operand_value));
S::Set(result_buffer, i, result_value);
}
} else {
static_assert(std::is_same_v<Value, QuantizedTensor>);
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
using ET = typename Storage<expressed_type>::Type;
const QuantizedParameter& min_quant_param =
min.type().element_type().parameters(0);
const QuantizedParameter& max_quant_param =
max.type().element_type().parameters(0);
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
ET min_expressed;
ET max_expressed;
for (size_t i = 0; i < n; ++i) {
if (min_is_tensor || (i == 0)) {
auto min_storage = S::Get(min_buffer, i);
min_expressed = Dequantize<storage_type, expressed_type>(
min_storage, min_quant_param);
}
if (max_is_tensor || (i == 0)) {
auto max_storage = S::Get(max_buffer, i);
max_expressed = Dequantize<storage_type, expressed_type>(
max_storage, max_quant_param);
}
auto operand_storage = S::Get(operand_buffer, i);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
operand_storage, operand_quant_param, result_scale_inv,
result_quant_param.zero_point, [=](auto x) {
return std::min(max_expressed, std::max(min_expressed, x));
});
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
}
absl::Status Clamp(const Tensor& min, const Tensor& operand, const Tensor& max,
Tensor& result) {
DISPATCH_INT_FLOAT(Clamp, result.element_type(), min, operand, max, result);
}
absl::Status Clamp(const QuantizedTensor& min, const QuantizedTensor& operand,
const QuantizedTensor& max, QuantizedTensor& result) {
DISPATCH_QUANTIZED(Clamp, result.storage_type(), result.expressed_type(), min,
operand, max, result);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<element_type>::Type>&& min_values,
std::vector<typename Storage<element_type>::Type>&& operand_values,
std::vector<typename Storage<element_type>::Type>&& max_values,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape();
Tensor min(TensorType(std::move(min_shape), element_type), min_values.data());
Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape();
Tensor max(TensorType(std::move(max_shape), element_type), max_values.data());
Tensor operand(TensorType(Shape(shape), element_type), operand_values.data());
Tensor expected(TensorType(Shape(shape), element_type),
expected_values.data());
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(Clamp(min, operand, max, result));
EXPECT_EQ(result, expected)
<< "min: " << min << "\nmax: " << max << "\noperand: " << operand;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
QuantizedParameter&& quantized_parameter,
std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<expressed_type>::Type>&& min_values,
std::vector<typename Storage<expressed_type>::Type>&& operand_values,
std::vector<typename Storage<expressed_type>::Type>&& max_values,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto min_quant_values = QuantizeVector<storage_type, expressed_type>(
min_values, quantized_parameter);
auto operand_quant_values = QuantizeVector<storage_type, expressed_type>(
operand_values, quantized_parameter);
auto max_quant_values = QuantizeVector<storage_type, expressed_type>(
max_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape();
QuantizedTensor min(
QuantizedTensorType(std::move(min_shape),
QuantizedTensorElementType(element_type)),
min_quant_values.data());
Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape();
QuantizedTensor max(
QuantizedTensorType(std::move(max_shape),
QuantizedTensorElementType(element_type)),
max_quant_values.data());
QuantizedTensor operand(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
operand_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(Clamp(min, operand, max, result));
EXPECT_EQ(result, expected)
<< "min: " << min << "\nmax: " << max << "\noperand: " << operand;
}
TEST(Clamp, Unquantized) {
test<ElementType::kSI8>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kBF16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kF16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kF32>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kBF16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kF16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kF32>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
}
TEST(Clamp, Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI8, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI8, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/clamp.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f985559-39e3-4126-a682-992bb1d338c5 | cpp | tensorflow/tensorflow | uniform_dequantize_quantize | tensorflow/lite/experimental/shlo/legacy/src/uniform_dequantize_quantize.cc | tensorflow/lite/experimental/shlo/legacy/test/uniform_dequantize_quantize_test.cc | #include <cstddef>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
absl::Status CheckDequantizeParameters(const QuantizedTensor& operand,
Tensor& result) {
if (operand.shape() != result.shape()) {
return absl::InvalidArgumentError("Inconsistent input/output shapes");
} else if (operand.expressed_type() != result.element_type()) {
return absl::InvalidArgumentError("Inconsistent element types");
} else if (!operand.is_per_tensor_quantized()) {
return absl::InvalidArgumentError("Unsupported input quantization");
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type>
absl::Status UniformDequantize(const QuantizedTensor& operand, Tensor& result) {
if (auto check = CheckDequantizeParameters(operand, result); !check.ok()) {
return check;
}
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
size_t n = operand.num_elements();
using S = Storage<storage_type>;
using E = Storage<expressed_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
for (size_t i = 0; i < n; ++i) {
auto operand_storage = S::Get(operand_buffer, i);
auto operand_expressed = Dequantize<storage_type, expressed_type>(
operand_storage, operand_quant_param);
auto result_expressed = operand_expressed;
E::Set(result_buffer, i, result_expressed);
}
return absl::OkStatus();
}
absl::Status CheckQuantizeParameters(const Tensor& operand,
QuantizedTensor& result) {
if (operand.shape() != result.shape()) {
return absl::InvalidArgumentError("Inconsistent input/output shapes");
} else if (operand.element_type() != result.expressed_type()) {
return absl::InvalidArgumentError("Inconsistent element types");
} else if (!result.is_per_tensor_quantized()) {
return absl::InvalidArgumentError("Unsupported output quantization");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type>
absl::Status UniformQuantize(const Tensor& operand, QuantizedTensor& result) {
if (auto check = CheckQuantizeParameters(operand, result); !check.ok()) {
return check;
}
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
size_t n = operand.num_elements();
using S = Storage<storage_type>;
using E = Storage<expressed_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
using ET = typename E::Type;
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
for (size_t i = 0; i < n; ++i) {
auto operand_expressed = E::Get(operand_buffer, i);
auto result_expressed = operand_expressed;
auto result_storage = QuantizePartial<storage_type, expressed_type>(
result_expressed, result_scale_inv, result_quant_param.zero_point);
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result); !status.ok()) {
return status;
}
return absl::OkStatus();
}
}
absl::Status UniformDequantize(const QuantizedTensor& operand, Tensor& result) {
DISPATCH_QUANTIZED(UniformDequantize, operand.storage_type(),
operand.expressed_type(), operand, result);
}
absl::Status UniformQuantize(const Tensor& operand, QuantizedTensor& result) {
DISPATCH_QUANTIZED(UniformQuantize, result.storage_type(),
result.expressed_type(), operand, result);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/matchers.h"
namespace stablehlo {
namespace testing {
template <ElementType storage_type, ElementType expressed_type>
void test(std::initializer_list<DimensionSize>&& shape,
QuantizedParameter&& quantized_parameter,
std::vector<typename Storage<expressed_type>::Type>&& input_values) {
Tensor input(TensorType(Shape(shape), expressed_type), input_values.data());
std::vector<typename Storage<storage_type>::Type> quant_values(
input_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor quant(
QuantizedTensorType(Shape(shape), std::move(element_type)),
quant_values.data());
std::vector<typename Storage<expressed_type>::Type> result_values(
input_values.size());
Tensor result(TensorType(Shape(shape), expressed_type), result_values.data());
ASSERT_OK(UniformQuantize(input, quant));
ASSERT_OK(UniformDequantize(quant, result));
EXPECT_THAT(result, IsAlmostSame(input));
}
TEST(QuantizeDequantize, All) {
test<ElementType::kSI8, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kBF16>(
{4}, {.scale = 1e-1, .zero_point = -5}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI8, ElementType::kF16>({4}, {.scale = 1, .zero_point = 5},
{-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kF16>(
{4}, {.scale = 1e-1, .zero_point = -10}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI8, ElementType::kF32>({4}, {.scale = 1, .zero_point = 5},
{-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kF32>(
{4}, {.scale = 1e-1, .zero_point = +10}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI16, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI16, ElementType::kBF16>(
{4}, {.scale = 1e-1, .zero_point = 5}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI16, ElementType::kBF16>(
{4}, {.scale = 1e-2, .zero_point = -5}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI16, ElementType::kF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI16, ElementType::kF16>(
{4}, {.scale = 1e-1, .zero_point = -10}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI16, ElementType::kF16>(
{4}, {.scale = 1e-2, .zero_point = 10}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = +7}, {-2, -1, 0, 1, 2});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1e-1, .zero_point = -7}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1e-2, .zero_point = 0}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1e-3, .zero_point = 0}, {-2.222, -1.111, 0, 1.111, 2.222});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1, .zero_point = +7}, {-2, -1, 0, 1, 2});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1e-1, .zero_point = -7}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1e-2, .zero_point = 10}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1e-3, .zero_point = -0},
{-2.222, -1.111, 0, 1.111, 2.222});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1, .zero_point = +7}, {-2, -1, 0, 1, 2});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1e-1, .zero_point = -7}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1e-2, .zero_point = 10}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1e-3, .zero_point = -0},
{-2.222, -1.111, 0, 1.111, 2.222});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/uniform_dequantize_quantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/uniform_dequantize_quantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18395d74-7e93-44e8-a767-d615878ef438 | cpp | tensorflow/tensorflow | is_finite | tensorflow/lite/experimental/shlo/ops/is_finite.cc | tensorflow/lite/experimental/shlo/ops/is_finite_test.cc | #include "tensorflow/lite/experimental/shlo/ops/is_finite.h"
#include <algorithm>
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace {
absl::Status CheckParameters(const Tensor& operand, const Tensor& result) {
if (operand.shape() != result.shape()) {
return absl::InvalidArgumentError(
"operand and result must have the same shape");
}
if (!operand.IsQuantized() && !IsFloat(operand.tensor_element_type())) {
return absl::InvalidArgumentError(
"operand must be floating-point type or per-tensor quantized");
}
if (operand.IsPerAxisQuantized()) {
return absl::InvalidArgumentError("operand cannot be per-axis quantized");
}
if (result.IsQuantized()) {
return absl::InvalidArgumentError("result cannot be quantized");
}
if (!IsBool(result.tensor_element_type())) {
return absl::InvalidArgumentError("result must be an I1 tensor");
}
if (operand.NumElements() != result.NumElements()) {
return absl::InvalidArgumentError(
"operand and result must have the same size");
}
return absl::OkStatus();
}
template <DataType data_type>
absl::Status EvaluateImpl(const Tensor& operand, bool* output) {
const auto* in = operand.GetDataAs<data_type>();
const auto num_elements = operand.NumElements();
for (DimensionSize i = 0; i < num_elements; ++i) {
output[i] = std::isfinite(static_cast<float>(in[i]));
}
return absl::OkStatus();
}
absl::Status EvaluateImpl(const Tensor& operand, Tensor& result) {
bool* output = result.GetDataAs<DataType::kI1>();
if (!operand.IsQuantized()) {
DISPATCH_FLOAT(EvaluateImpl, operand.tensor_element_type(), operand,
output);
} else {
const auto num_elements = result.NumElements();
std::fill(output, output + num_elements, true);
}
return absl::OkStatus();
}
}
IsFiniteOp Create(const IsFiniteOp::Attributes& attributes) {
return IsFiniteOp();
}
absl::Status Prepare(IsFiniteOp& op, const Tensor& operand, Tensor& result) {
return CheckParameters(operand, result);
}
absl::Status Evaluate(IsFiniteOp& op, const Tensor& operand, Tensor& result) {
if (!operand.data) {
return absl::InvalidArgumentError("No operand.data");
}
if (!result.data) {
return absl::InvalidArgumentError("No result.data");
}
return EvaluateImpl(operand, result);
}
} | #include "tensorflow/lite/experimental/shlo/ops/is_finite.h"
#include <cmath>
#include <cstddef>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
#include "tensorflow/lite/experimental/shlo/tensor_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor_with_data.h"
namespace shlo_ref {
namespace {
using ::shlo_ref::testing::TensorEq;
struct Params {
TensorWithData operand;
TensorWithData expected;
};
class IsFiniteTest : public ::testing::TestWithParam<Params> {};
TEST_P(IsFiniteTest, IsFinite) {
const auto& params = GetParam();
IsFiniteOp op = Create(IsFiniteOp::Attributes{});
Tensor result{.type = params.expected.tensor().type};
ASSERT_OK(Prepare(op, params.operand.tensor(), result));
std::vector<std::byte> result_data(result.SizeInBytes());
result.data = result_data.data();
EXPECT_OK(Evaluate(op, params.operand.tensor(), result));
EXPECT_THAT(result, TensorEq(params.expected.tensor()));
}
INSTANTIATE_TEST_SUITE_P(
Unquantized, IsFiniteTest,
::testing::Values(
Params{TensorWithData::Create<DataType::kBF16>(
Shape{{7}},
{BF16{+NAN}, BF16{-NAN}, BF16{-INFINITY}, BF16{+INFINITY},
BF16{-1.0f}, BF16{0.0f}, BF16{1.0f}}),
TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {false, false, false, false, true, true, true})},
Params{
TensorWithData::Create<DataType::kF16>(
Shape{{7}}, {F16{+NAN}, F16{-NAN}, F16{-INFINITY},
F16{+INFINITY}, F16{-1.0f}, F16{0.0f}, F16{1.0f}}),
TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {false, false, false, false, true, true, true})},
Params{
TensorWithData::Create<DataType::kF32>(
Shape{{7}},
{+NAN, -NAN, -INFINITY, +INFINITY, -1.0f, 0.0f, 1.0f}),
TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {false, false, false, false, true, true, true})}));
INSTANTIATE_TEST_SUITE_P(
Quantized, IsFiniteTest,
::testing::Values(Params{
.operand = TensorWithData::Create<DataType::kSI16, DataType::kF32>(
Shape{{7}}, {0.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}, 0.1f, 0),
.expected = TensorWithData::Create<DataType::kI1>(
Shape{{7}}, {true, true, true, true, true, true, true})}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/is_finite.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/is_finite_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d211a25-07e9-497a-8042-7901507b166f | cpp | tensorflow/tensorflow | elementwise_binary | tensorflow/lite/experimental/shlo/legacy/src/elementwise_binary.cc | tensorflow/lite/experimental/shlo/legacy/test/elementwise_binary_test.cc | #include <cmath>
#include <cstddef>
#include <type_traits>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
template <typename Value>
absl::Status CheckParameters(const Value& lhs, const Value& rhs,
Value& result) {
if (!(lhs.baseline_type() == rhs.baseline_type() and
lhs.baseline_type() == result.baseline_type())) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_type(on_true) = "
"baseline_type(on_false) = baseline_type(result)");
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (!(lhs.is_per_tensor_quantized() and rhs.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError("Expected per=tensor quantization");
}
}
if (lhs.layout().has_strides() || rhs.layout().has_strides() ||
result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value,
typename Op>
absl::Status ElementwiseBinaryOp(const Value& lhs, const Value& rhs,
Value& result, Op&& op) {
if (auto check = CheckParameters(lhs, rhs, result); !check.ok()) {
return check;
}
using S = Storage<storage_type>;
auto lhs_buffer = lhs.buffer();
auto rhs_buffer = rhs.buffer();
auto result_buffer = result.buffer();
size_t n = lhs.num_elements();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != result.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
for (size_t i = 0; i < n; ++i) {
auto x = S::Get(lhs_buffer, i);
auto y = S::Get(rhs_buffer, i);
auto z = op(x, y);
S::Set(result_buffer, i, z);
}
} else {
static_assert(std::is_same_v<Value, QuantizedTensor>);
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
const QuantizedParameter& lhs_quant_param =
lhs.type().element_type().parameters(0);
const QuantizedParameter& rhs_quant_param =
rhs.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
using ET = typename Storage<expressed_type>::Type;
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
for (size_t i = 0; i < n; ++i) {
auto lhs_storage = S::Get(lhs_buffer, i);
auto rhs_storage = S::Get(rhs_buffer, i);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
lhs_storage, rhs_storage, lhs_quant_param, rhs_quant_param,
result_scale_inv, result_quant_param.zero_point, op);
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
#define DEFINE_ELEMENTWISE_BINARY_OP(name, element_type, expression) \
absl::Status name(const Tensor& lhs, const Tensor& rhs, Tensor& result) { \
return ElementwiseBinaryOp<element_type, element_type>( \
lhs, rhs, result, [](auto x, auto y) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name, storage_type, \
expressed_type, expression) \
absl::Status name(const QuantizedTensor& lhs, const QuantizedTensor& rhs, \
QuantizedTensor& result) { \
return ElementwiseBinaryOp<storage_type, expressed_type>( \
lhs, rhs, result, [](auto x, auto y) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_BINARY_OP_BOOL(name, expression) \
DEFINE_ELEMENTWISE_BINARY_OP(name##_i1, ElementType::kI1, expression);
#define DEFINE_ELEMENTWISE_BINARY_OP_INT(name, expression) \
DEFINE_ELEMENTWISE_BINARY_OP(name##_si8, ElementType::kSI8, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_si16, ElementType::kSI16, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_si32, ElementType::kSI32, expression);
#define DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(name, expression) \
DEFINE_ELEMENTWISE_BINARY_OP(name##_bf16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_f16, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_f32, ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name##_q_si8_bf16, ElementType::kSI8, \
ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name##_q_si8_f16, ElementType::kSI8, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name##_q_si8_f32, ElementType::kSI8, \
ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si16_bf16, ElementType::kSI16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si16_f16, ElementType::kSI16, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si16_f32, ElementType::kSI16, ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si32_bf16, ElementType::kSI32, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si32_f16, ElementType::kSI32, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si32_f32, ElementType::kSI32, ElementType::kF32, expression);
#define CALL_BINARY_OP_BOOL_HELPER(name, lhs, rhs, result) \
case ElementType::kI1: \
return name##_i1(lhs, rhs, result);
#define CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result) \
case ElementType::kSI8: \
return name##_si8(lhs, rhs, result); \
case ElementType::kSI16: \
return name##_si16(lhs, rhs, result); \
case ElementType::kSI32: \
return name##_si32(lhs, rhs, result);
#define CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result) \
case ElementType::kBF16: \
return name##_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_f32(lhs, rhs, result);
#define CALL_BINARY_OP_BOOL_INT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_BOOL_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_INT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_INT_FLOAT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_FLOAT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_BOOL_INT_FLOAT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_BOOL_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_QUANTIZED_OP(name, lhs, rhs, result) \
{ \
auto storage_type = lhs.storage_type(); \
auto expressed_type = lhs.expressed_type(); \
switch (storage_type) { \
case ElementType::kSI8: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si8_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_q_si8_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_q_si8_f32(lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI16: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si16_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_q_si16_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_q_si16_f32(lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI32: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si32_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_q_si32_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_q_si32_f32(lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
default: \
return absl::InvalidArgumentError("Unexpected storage type"); \
} \
}
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Add, x or y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Add, x + y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Add, x + y);
}
absl::Status Add(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Add, lhs, rhs, result);
}
absl::Status Add(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Add, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(And, x&& y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(And, x& y);
}
absl::Status And(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT(And, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Atan2, std::atan2(static_cast<float>(x),
static_cast<float>(y)));
}
absl::Status Atan2(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_FLOAT(Atan2, lhs, rhs, result);
}
absl::Status Atan2(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Atan2, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Divide, x / y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Divide, x / y);
}
absl::Status Divide(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Divide, lhs, rhs, result);
}
absl::Status Divide(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Divide, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Maximum, x or y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Maximum, (x > y) ? x : y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Maximum, (x > y) ? x : y);
}
absl::Status Maximum(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Maximum, lhs, rhs, result);
}
absl::Status Maximum(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Maximum, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Minimum, x and y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Minimum, (x > y) ? y : x);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Minimum, (x > y) ? y : x);
}
absl::Status Minimum(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Minimum, lhs, rhs, result);
}
absl::Status Minimum(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Minimum, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Multiply, x and y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Multiply, x* y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Multiply, x* y);
}
absl::Status Multiply(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Multiply, lhs, rhs, result);
}
absl::Status Multiply(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Multiply, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Or, x or y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Or, x | y);
}
absl::Status Or(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT(Or, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Power, std::pow(static_cast<float>(x),
static_cast<int>(y)));
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Power, std::powf(static_cast<float>(x),
static_cast<float>(y)));
}
absl::Status Power(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Power, lhs, rhs, result);
}
absl::Status Power(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Power, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Remainder, x % y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Remainder, std::fmod(static_cast<float>(x),
static_cast<float>(y)));
}
absl::Status Remainder(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Remainder, lhs, rhs, result);
}
absl::Status Remainder(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Remainder, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(ShiftLeft, x << y);
}
absl::Status ShiftLeft(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT(ShiftLeft, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(ShiftRightArithmetic, x >> y);
}
absl::Status ShiftRightArithmetic(const Tensor& lhs, const Tensor& rhs,
Tensor& result) {
CALL_BINARY_OP_INT(ShiftRightArithmetic, lhs, rhs, result);
}
namespace {
template <typename Int>
inline Int ShiftRightLogical(Int x, Int y) {
using UInt = typename std::make_unsigned<Int>::type;
return static_cast<UInt>(x) >> y;
}
DEFINE_ELEMENTWISE_BINARY_OP_INT(ShiftRightLogical, ShiftRightLogical(x, y));
}
absl::Status ShiftRightLogical(const Tensor& lhs, const Tensor& rhs,
Tensor& result) {
CALL_BINARY_OP_INT(ShiftRightLogical, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Subtract, x - y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Subtract, x - y);
}
absl::Status Subtract(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Subtract, lhs, rhs, result);
}
absl::Status Subtract(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Subtract, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Xor, x xor y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Xor, x ^ y);
}
absl::Status Xor(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT(Xor, lhs, rhs, result);
}
} | #include <cmath>
#include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/matchers.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(absl::Status (*op)(const Tensor&, const Tensor&, Tensor&),
std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<element_type>::Type>&& input1_values,
std::vector<typename Storage<element_type>::Type>&& input2_values,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor input1(TensorType(Shape(shape), element_type),
std::data(input1_values));
Tensor input2(TensorType(Shape(shape), element_type),
std::data(input2_values));
Tensor expected(TensorType(Shape(shape), element_type),
std::data(expected_values));
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(op(input1, input2, result));
EXPECT_THAT(result, IsAlmostSame(expected))
<< "input1: " << input1 << "\ninput2: " << input2;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
absl::Status (*op)(const QuantizedTensor&, const QuantizedTensor&,
QuantizedTensor&),
std::initializer_list<DimensionSize>&& shape,
QuantizedParameter&& quantized_parameter,
std::vector<typename Storage<expressed_type>::Type>&& input1_values,
std::vector<typename Storage<expressed_type>::Type>&& input2_values,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto input1_quant_values = QuantizeVector<storage_type, expressed_type>(
input1_values, quantized_parameter);
auto input2_quant_values = QuantizeVector<storage_type, expressed_type>(
input2_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor input1(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
input1_quant_values.data());
QuantizedTensor input2(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
input2_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(op(input1, input2, result));
EXPECT_THAT(result, IsAlmostSame(expected))
<< "input1: " << input1 << "\ninput2: " << input2;
}
TEST(ElementwiseBinary, Add) {
test<ElementType::kI1>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 1, 1});
test<ElementType::kSI8>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kSI16>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kSI32>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kBF16>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kF16>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kF32>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
}
TEST(ElementwiseBinary, AddQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Add, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI16, ElementType::kBF16>(
Add, {4}, {.scale = 2, .zero_point = 2}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI32, ElementType::kBF16>(
Add, {4}, {.scale = 0.5, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 30, -10});
test<ElementType::kSI8, ElementType::kF16>(
Add, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI16, ElementType::kF16>(
Add, {4}, {.scale = 2, .zero_point = 2}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI32, ElementType::kF16>(
Add, {4}, {.scale = 0.5, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 30, -10});
test<ElementType::kSI8, ElementType::kF32>(
Add, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI16, ElementType::kF32>(
Add, {4}, {.scale = 2, .zero_point = 2}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI32, ElementType::kF32>(
Add, {4}, {.scale = 0.5, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 30, -10});
}
TEST(ElementwiseBinary, And) {
test<ElementType::kI1>(And, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {0, 0, 1, 0});
test<ElementType::kSI8>(And, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {1, 0, 4, 0});
test<ElementType::kSI16>(And, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {1, 0, 4, 0});
test<ElementType::kSI32>(And, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {1, 0, 4, 0});
}
TEST(ElementwiseBinary, Atan2) {
test<ElementType::kBF16>(Atan2, {4}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kF16>(Atan2, {4}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kF32>(Atan2, {4}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
}
TEST(ElementwiseBinary, Atan2Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Atan2, {4}, {.scale = 1e-1, .zero_point = 0}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI8, ElementType::kF16>(
Atan2, {4}, {.scale = 1e-1, .zero_point = 2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI8, ElementType::kF32>(
Atan2, {4}, {.scale = 1e-1, .zero_point = -2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI16, ElementType::kBF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 0}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI16, ElementType::kF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI16, ElementType::kF32>(
Atan2, {4}, {.scale = 1e-3, .zero_point = -2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI32, ElementType::kBF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 0}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI32, ElementType::kF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI32, ElementType::kF32>(
Atan2, {4}, {.scale = 1e-3, .zero_point = -2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
}
TEST(ElementwiseBinary, Divide) {
test<ElementType::kSI8>(Divide, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{1, 2, -1, -2});
test<ElementType::kSI16>(Divide, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{11, 18, -8, 15});
test<ElementType::kSI32>(Divide, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{11, 18, -8, 15});
test<ElementType::kBF16>(Divide, {4}, {22, 53, -33, -77}, {2, 4, 4, -5},
{11, 13.25, -8.25, 15.4});
test<ElementType::kF16>(Divide, {4}, {22, 53, -33, -77}, {2, 4, 4, -5},
{11, 13.25, -8.25, 15.4});
test<ElementType::kF32>(Divide, {4}, {22, 53, -33, -77}, {2, 4, 4, -5},
{11, 13.25, -8.25, 15.4});
}
TEST(ElementwiseBinary, DivideQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Divide, {4}, {.scale = 1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI8, ElementType::kF16>(
Divide, {4}, {.scale = 1, .zero_point = 5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI8, ElementType::kF32>(
Divide, {4}, {.scale = 1, .zero_point = -5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI16, ElementType::kBF16>(
Divide, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI16, ElementType::kF16>(
Divide, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI16, ElementType::kF32>(
Divide, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {111, 133.25, -83.25, 155.4});
test<ElementType::kSI32, ElementType::kBF16>(
Divide, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI32, ElementType::kF16>(
Divide, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI32, ElementType::kF32>(
Divide, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {111, 133.25, -83.25, 155.4});
}
TEST(ElementwiseBinary, Maximum) {
test<ElementType::kI1>(Maximum, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 1, 1});
test<ElementType::kSI8>(Maximum, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{2, 5, 3, 3});
test<ElementType::kSI16>(Maximum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{22, 55, 4, -5});
test<ElementType::kSI32>(Maximum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{22, 55, 4, -5});
test<ElementType::kBF16>(Maximum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 5.3, 4.4, -5.5});
test<ElementType::kF16>(Maximum, {4}, {22, 55, -33, -77},
{2.5, 3.5, 4.5, -5.5}, {22, 55, 4.5, -5.5});
test<ElementType::kF32>(Maximum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 5.3, 4.4, -5.5});
}
TEST(ElementwiseBinary, MaximumQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Maximum, {4}, {.scale = 1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI8, ElementType::kF16>(
Maximum, {4}, {.scale = 1, .zero_point = 5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI8, ElementType::kF32>(
Maximum, {4}, {.scale = 1, .zero_point = -5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI16, ElementType::kBF16>(
Maximum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI16, ElementType::kF16>(
Maximum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI16, ElementType::kF32>(
Maximum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {222, 533, 4, -5});
test<ElementType::kSI32, ElementType::kBF16>(
Maximum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI32, ElementType::kF16>(
Maximum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI32, ElementType::kF32>(
Maximum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {222, 533, 4, -5});
}
TEST(ElementwiseBinary, Minimum) {
test<ElementType::kI1>(Minimum, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{0, 0, 1, 0});
test<ElementType::kSI8>(Minimum, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{2, 2, -3, -7});
test<ElementType::kSI16>(Minimum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{2, 3, -33, -77});
test<ElementType::kSI32>(Minimum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{2, 3, -33, -77});
test<ElementType::kBF16>(Minimum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 4.4, -3.3, -7.7});
test<ElementType::kF16>(Minimum, {4}, {22, 55, -33, -77},
{2.5, 3.5, 4.5, -5.5}, {2.5, 3.5, -33, -77});
test<ElementType::kF32>(Minimum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 4.4, -3.3, -7.7});
}
TEST(ElementwiseBinary, MinimumQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Minimum, {4}, {.scale = 1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI8, ElementType::kF16>(
Minimum, {4}, {.scale = 1, .zero_point = 5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI8, ElementType::kF32>(
Minimum, {4}, {.scale = 1, .zero_point = -5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI16, ElementType::kBF16>(
Minimum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI16, ElementType::kF16>(
Minimum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI16, ElementType::kF32>(
Minimum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {2, 4, -333, -777});
test<ElementType::kSI32, ElementType::kBF16>(
Minimum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI32, ElementType::kF16>(
Minimum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI32, ElementType::kF32>(
Minimum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {2, 4, -333, -777});
}
TEST(ElementwiseBinary, Multiply) {
test<ElementType::kI1>(Multiply, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{0, 0, 1, 0});
test<ElementType::kSI8>(Multiply, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{4, 10, -9, -21});
test<ElementType::kSI16>(Multiply, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{44, 165, -132, 385});
test<ElementType::kSI32>(Multiply, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{44, 165, -132, 385});
test<ElementType::kBF16>(Multiply, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {4.84, 23.32, -14.52, 42.35});
test<ElementType::kF16>(Multiply, {4}, {22, 55, -33, -77},
{2.5, 3.5, 4.5, -5.5}, {55, 192.5, -148.5, 423.5});
test<ElementType::kF32>(Multiply, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {4.84, 23.32, -14.52, 42.35});
}
TEST(ElementwiseBinary, MultiplyQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI8, ElementType::kF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI8, ElementType::kF32>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI16, ElementType::kBF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI16, ElementType::kF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI16, ElementType::kF32>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI32, ElementType::kBF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI32, ElementType::kF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI32, ElementType::kF32>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
}
TEST(ElementwiseBinary, Or) {
test<ElementType::kI1>(Or, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 1, 1});
test<ElementType::kSI8>(Or, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {3, 0, 5, 1});
test<ElementType::kSI16>(Or, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {3, 0, 5, 1});
test<ElementType::kSI32>(Or, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {3, 0, 5, 1});
}
TEST(ElementwiseBinary, Power) {
test<ElementType::kSI8>(Power, {6}, {-2, 1, -3, 5, -3, 4}, {0, 1, 2, 3, 3, 2},
{1, 1, 9, 125, -27, 16});
test<ElementType::kSI16>(Power, {6}, {-2, 1, -36, 5, 3, 5},
{0, 1, 2, 3, 4, 5}, {1, 1, 1296, 125, 81, 3125});
test<ElementType::kSI32>(Power, {6}, {-2, 1, -36, 5, 3, 10},
{0, 1, 2, 3, 4, 5}, {1, 1, 1296, 125, 81, 100000});
test<ElementType::kBF16>(Power, {6}, {-2, -0, -36, 5, 3, 1000},
{2, 2, 1.1, 2, -1, 10},
{4, 0, -NAN, 25, 0.3333333333333333f, 1e+30});
test<ElementType::kF16>(Power, {6}, {-2, -0, -36, 5, 3, 10000},
{2, 2, 1.1, 2, -1, 10},
{4, 0, -NAN, 25, 0.3333333333333333f, INFINITY});
test<ElementType::kF32>(Power, {6}, {-2, -0, -36, 5, 3, 10000},
{2, 2, 1.1, 2, -1, 10},
{4, 0, -NAN, 25, 0.3333333333333333f, INFINITY});
}
TEST(ElementwiseBinary, Remainder) {
test<ElementType::kSI8>(Remainder, {4}, {17, 18, 19, 20}, {3, 4, 5, 7},
{2, 2, 4, 6});
test<ElementType::kSI16>(Remainder, {4}, {17, 18, 19, 20}, {3, 4, 5, 7},
{2, 2, 4, 6});
test<ElementType::kSI32>(Remainder, {4}, {17, -17, 17, -17}, {3, 3, -3, -3},
{2, -2, 2, -2});
test<ElementType::kBF16>(Remainder, {4}, {17, 18, 19, 20}, {3, 4, 5, 7},
{2, 2, 4, 6});
test<ElementType::kF16>(Remainder, {4}, {17, -17, 17, -17}, {3, 3, -3, -3},
{2, -2, 2, -2});
test<ElementType::kF32>(Remainder, {4}, {17.1, -17.1, 17.1, -17.1},
{3, 3, -3, -3}, {2.1, -2.1, 2.1, -2.1});
}
TEST(ElementwiseBinary, RemainderQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {7.1, -7.1, 7.1, -7.1},
{3, 3, -3, -3}, {1.1, -1.1, 1.1, -1.1});
test<ElementType::kSI8, ElementType::kF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {7.1, -7.1, 7.1, -7.1},
{3, 3, -3, -3}, {1.1, -1.1, 1.1, -1.1});
test<ElementType::kSI8, ElementType::kF32>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {7.1, -7.1, 7.1, -7.1},
{3, 3, -3, -3}, {1.1, -1.1, 1.1, -1.1});
test<ElementType::kSI16, ElementType::kBF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 4}, {17, 18, 19, 20},
{3, 4, 5, 7}, {2, 2, 4, 6});
test<ElementType::kSI16, ElementType::kF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {17, -17, 17, -17},
{3, 3, -3, -3}, {2, -2, 2, -2});
test<ElementType::kSI16, ElementType::kF32>(
Remainder, {4}, {.scale = 1e-2, .zero_point = -10},
{17.1, -17.1, 17.1, -17.1}, {3, 3, -3, -3}, {2.1, -2.1, 2.1, -2.1});
test<ElementType::kSI32, ElementType::kBF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 4}, {17, 18, 19, 20},
{3, 4, 5, 7}, {2, 2, 4, 6});
test<ElementType::kSI32, ElementType::kF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {17, -17, 17, -17},
{3, 3, -3, -3}, {2, -2, 2, -2});
test<ElementType::kSI32, ElementType::kF32>(
Remainder, {4}, {.scale = 1e-2, .zero_point = -10},
{17.1, -17.1, 17.1, -17.1}, {3, 3, -3, -3}, {2.1, -2.1, 2.1, -2.1});
}
TEST(ElementwiseBinary, ShiftLeft) {
test<ElementType::kSI8>(ShiftLeft, {3}, {-1, 0, 1}, {1, 2, 3}, {-2, 0, 8});
test<ElementType::kSI16>(ShiftLeft, {3}, {-1, 0, 1}, {1, 2, 3}, {-2, 0, 8});
test<ElementType::kSI32>(ShiftLeft, {3}, {-1, 0, 1}, {1, 2, 3}, {-2, 0, 8});
}
TEST(ElementwiseBinary, ShiftRightArithmetic) {
test<ElementType::kSI8>(ShiftRightArithmetic, {3}, {-1, 0, 8}, {1, 2, 3},
{-1, 0, 1});
test<ElementType::kSI16>(ShiftRightArithmetic, {3}, {-1, 0, 8}, {1, 2, 3},
{-1, 0, 1});
test<ElementType::kSI32>(ShiftRightArithmetic, {3}, {-1, 0, 8}, {1, 2, 3},
{-1, 0, 1});
}
TEST(ElementwiseBinary, ShiftRightLogical) {
test<ElementType::kSI8>(ShiftRightLogical, {3}, {-1, 0, 8}, {1, 2, 3},
{0x7F, 0, 1});
test<ElementType::kSI16>(ShiftRightLogical, {3}, {-1, 0, 8}, {1, 2, 3},
{0x7FFF, 0, 1});
test<ElementType::kSI32>(ShiftRightLogical, {3}, {-1, 0, 8}, {1, 2, 3},
{0x7FFFFFFF, 0, 1});
}
TEST(ElementwiseBinary, Subtract) {
test<ElementType::kSI8>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kSI16>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kSI32>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kBF16>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kF16>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kF32>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
}
TEST(ElementwiseBinary, SubtractQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Subtract, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI8, ElementType::kF16>(
Subtract, {4}, {.scale = 1, .zero_point = 2}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI8, ElementType::kF32>(
Subtract, {4}, {.scale = 1, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI16, ElementType::kBF16>(
Subtract, {4}, {.scale = 1e-1, .zero_point = 0}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI16, ElementType::kF16>(
Subtract, {4}, {.scale = 1e-1, .zero_point = 2}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI16, ElementType::kF32>(
Subtract, {4}, {.scale = 1e-1, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI32, ElementType::kBF16>(
Subtract, {4}, {.scale = 1e-3, .zero_point = 0}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI32, ElementType::kF16>(
Subtract, {4}, {.scale = 1e-3, .zero_point = 2}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI32, ElementType::kF32>(
Subtract, {4}, {.scale = 1e-3, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
}
TEST(ElementwiseBinary, Xor) {
test<ElementType::kI1>(Xor, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 0, 1});
test<ElementType::kSI8>(Xor, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {2, 0, 1, 1});
test<ElementType::kSI16>(Xor, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {2, 0, 1, 1});
test<ElementType::kSI32>(Xor, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {2, 0, 1, 1});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/elementwise_binary.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/elementwise_binary_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
282aa471-5c12-4468-8a0c-38230ff0f653 | cpp | tensorflow/tensorflow | broadcast_in_dim | tensorflow/lite/experimental/shlo/legacy/src/broadcast_in_dim.cc | tensorflow/lite/experimental/shlo/legacy/test/broadcast_in_dim_test.cc | #include <algorithm>
#include <iterator>
#include <type_traits>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
bool IsUnique(absl::Span<const DimensionSize> span) {
std::vector<DimensionSize> temp(span.begin(), span.end());
auto i = std::unique(temp.begin(), temp.end());
return std::distance(temp.begin(), i) == span.size();
}
template <typename Value>
absl::Status CheckParameters(
const Value& operand, absl::Span<const DimensionSize> broadcast_dimensions,
Value& result) {
if (!operand.is_per_axis_quantized()) {
if (!(result.element_type() == operand.element_type())) {
return absl::InvalidArgumentError(
"Constraint violation: element_type(result) = element_type(operand) "
"if !is_per_axis_quantized(operand)");
}
}
if (!(broadcast_dimensions.size() == operand.rank())) {
return absl::InvalidArgumentError(
"Constraint violation: size(broadcast_dimensions) = rank(operand)");
} else if (!(*std::min_element(broadcast_dimensions.begin(),
broadcast_dimensions.end()) >= 0 and
*std::max_element(broadcast_dimensions.begin(),
broadcast_dimensions.end()) < result.rank())) {
return absl::InvalidArgumentError(
"Constraint violation: 0 <= broadcast_dimensions < rank(result)");
} else if (!(IsUnique(broadcast_dimensions))) {
return absl::InvalidArgumentError(
"Constraint violation: is_unique(broadcast_dimensions)");
} else {
for (auto d : operand.axes()) {
if (!(operand.dim(d) == 1 or
operand.dim(d) == result.dim(broadcast_dimensions[d]))) {
return absl::InvalidArgumentError(
"Constraint violation: dim(operand, d) = 1 or dim(operand, d) = "
"dim(result, broadcast_dimensions[d])");
}
}
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (operand.is_per_axis_quantized()) {
if (!(operand.is_per_axis_quantized() and
result.storage_type() == operand.storage_type() and
result.expressed_type() == operand.expressed_type() and
result.storage_min() == operand.storage_min() and
result.storage_max() == operand.storage_max())) {
return absl::InvalidArgumentError(
"Constraint violation: element_type(result) = "
"element_type(operand) with exceptions if "
"is_per_axis_quantized(operand)");
}
}
if (result.is_per_axis_quantized()) {
if (!(*result.quantized_dimension() ==
broadcast_dimensions[*operand.quantized_dimension()])) {
return absl::InvalidArgumentError(
"quantization_dimension(result) = "
"broadcast_dimensions[quantization_dimension(operand)]");
}
if (operand.dim(*operand.quantized_dimension()) == 1) {
auto n = result.dim(*result.quantized_dimension());
for (auto i = 0; i < n; ++i) {
if (!(result.scales(i) == operand.scales(0) and
result.zero_points(i) == operand.zero_points(0))) {
return absl::InvalidArgumentError(
"If dim(operand, quantization_dimension(operand)) = 1, then "
"scales(result)[i] = scales(operand)[0] and "
"zero_points(result)[i] = zero_points(operand)[0] for i in "
"range(dim(result, quantization_dimension(result)))");
}
}
}
}
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value>
absl::Status BroadcastInDim(
const Value& operand, absl::Span<const DimensionSize> broadcast_dimensions,
Value& result) {
if (auto check = CheckParameters(operand, broadcast_dimensions, result);
!check.ok()) {
return check;
}
using S = Storage<storage_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != operand.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
TensorIndex operand_index(operand.shape());
for (TensorIndexIterator result_index_iter{result.shape()};
result_index_iter.has_next(); ++result_index_iter) {
for (auto d = 0; d < operand.rank(); ++d) {
if (operand.dim(d) == 1) {
operand_index.set(d, 0);
} else {
auto b = broadcast_dimensions[d];
operand_index.set(d, (*result_index_iter)[b]);
}
}
auto linearized_operand_index = operand_index.linearize();
auto linearized_result_index = result_index_iter->linearize();
auto value = S::Get(operand_buffer, linearized_operand_index);
S::Set(result_buffer, linearized_result_index, value);
}
} else if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
if (!(operand.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError(
"Only per-tensor quantization is currently supported");
}
using ET = typename Storage<expressed_type>::Type;
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
TensorIndex operand_index(operand.shape());
for (TensorIndexIterator result_index_iter{result.shape()};
result_index_iter.has_next(); ++result_index_iter) {
for (auto d = 0; d < operand.rank(); ++d) {
if (operand.dim(d) == 1) {
operand_index.set(d, 0);
} else {
auto b = broadcast_dimensions[d];
operand_index.set(d, (*result_index_iter)[b]);
}
}
auto linearized_operand_index = operand_index.linearize();
auto linearized_result_index = result_index_iter->linearize();
auto operand_storage = S::Get(operand_buffer, linearized_operand_index);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
operand_storage, operand_quant_param, result_scale_inv,
result_quant_param.zero_point, [](auto x) { return x; });
S::Set(result_buffer, linearized_result_index, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
}
absl::Status BroadcastInDim(
const Tensor& operand, absl::Span<const DimensionSize> broadcast_dimensions,
Tensor& result) {
DISPATCH_BOOL_INT_FLOAT(BroadcastInDim, result.element_type(), operand,
broadcast_dimensions, result);
}
absl::Status BroadcastInDim(
const QuantizedTensor& operand,
absl::Span<const DimensionSize> broadcast_dimensions,
QuantizedTensor& result) {
DISPATCH_QUANTIZED(BroadcastInDim, result.storage_type(),
result.expressed_type(), operand, broadcast_dimensions,
result);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(std::initializer_list<DimensionSize>&& operand_shape,
std::vector<typename Storage<element_type>::Type>&& operand_values,
std::initializer_list<DimensionSize>&& broadcast_dimensions_values,
std::initializer_list<DimensionSize>&& result_shape,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor operand(TensorType(Shape(operand_shape), element_type),
operand_values.data());
Tensor expected(TensorType(Shape(result_shape), element_type),
expected_values.data());
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(result_shape), element_type),
result_values.data());
absl::Span<const DimensionSize> broadcast_dimensions(
broadcast_dimensions_values);
ASSERT_OK(BroadcastInDim(operand, broadcast_dimensions, result));
EXPECT_EQ(result, expected)
<< "operand: " << operand
<< "\nbroadcast_dimensions: " << ToString(broadcast_dimensions);
}
template <ElementType storage_type, ElementType expressed_type>
void test(
QuantizedParameter&& quantized_parameter,
std::initializer_list<DimensionSize>&& operand_shape,
std::vector<typename Storage<expressed_type>::Type>&& operand_values,
std::initializer_list<DimensionSize>&& broadcast_dimensions_values,
std::initializer_list<DimensionSize>&& result_shape,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto operand_quant_values = QuantizeVector<storage_type, expressed_type>(
operand_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor operand(
QuantizedTensorType(Shape(operand_shape),
QuantizedTensorElementType(element_type)),
operand_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(result_shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(result_shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
absl::Span<const DimensionSize> broadcast_dimensions(
broadcast_dimensions_values);
auto res = BroadcastInDim(operand, broadcast_dimensions, result);
ASSERT_OK(BroadcastInDim(operand, broadcast_dimensions, result));
EXPECT_EQ(result, expected)
<< "operand: " << operand
<< "\nbroadcast_dimensions: " << ToString(broadcast_dimensions);
}
TEST(BroadcastInDim, Unquantized) {
test<ElementType::kI1>({1, 3}, {true, false, true}, {2, 1}, {2, 3, 2},
{true, true, false, false, true, true, true, true,
false, false, true, true});
test<ElementType::kSI8>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kBF16>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kF16>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kF32>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
}
TEST(BroadcastInDim, Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/broadcast_in_dim.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/broadcast_in_dim_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c22eae69-295e-43e4-812d-9bd98c6b6375 | cpp | tensorflow/tensorflow | and | tensorflow/lite/experimental/shlo/ops/and.cc | tensorflow/lite/experimental/shlo/ops/and_test.cc | #include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
AndOp Create(AndOp::Attributes) { return {}; }
absl::Status Prepare(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("and"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
And<DataType::kSI32> and_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
and_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
And<DataType::kI1> and_func;
detail::EvaluateNoQuantization<DataType::kI1>(and_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.and: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<AndOp> {
static std::string Get() { return "And"; }
};
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
template <>
struct SupportedOpDataType<AndOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseOpShapePropagationTest,
AndOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
AndOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
And, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<AndOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct AndTest : ::testing::Test {};
TYPED_TEST_SUITE(AndTest, SupportedTypes, TestParamNames);
TYPED_TEST(AndTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
And<TypeParam::kStorage>());
auto op = Create(AndOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/and.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/and_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0e775194-4101-4d2f-930c-95c3dddc8a6b | cpp | tensorflow/tensorflow | cbrt | tensorflow/lite/experimental/shlo/ops/cbrt.cc | tensorflow/lite/experimental/shlo/ops/cbrt_test.cc | #include "tensorflow/lite/experimental/shlo/ops/cbrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Cbrt {
template <class T>
T operator()(T v) const {
return std::cbrt(v);
}
};
template <>
F16 Cbrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cbrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CbrtOp Create(CbrtOp::Attributes) { return {}; }
absl::Status Prepare(CbrtOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cbrt"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cbrt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CbrtOp& op, const Tensor& input, Tensor& output) {
Cbrt cbrt;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), cbrt, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
cbrt, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/cbrt.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CbrtOp> {
static std::string Get() { return "Cbrt"; }
};
namespace {
struct Cbrt {
template <class T>
T operator()(T v) const {
return std::cbrt(v);
}
} cbrt_ref;
template <>
F16 Cbrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cbrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Cbrt, UnaryElementwiseOpShapePropagationTest,
CbrtOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Cbrt, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<CbrtOp>, TestParamNames);
using UnsupportedTypes = WithOpTypes<
CbrtOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Cbrt, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CbrtTest : ::testing::Test {};
TYPED_TEST_SUITE(CbrtTest, FloatTestTypes, TestParamNames);
TYPED_TEST(CbrtTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), cbrt_ref);
auto op = Create(CbrtOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedCbrtTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCbrtTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCbrtTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = cbrt_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(CbrtOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/cbrt.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/cbrt_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85734204-9025-4511-b9a2-be5a6946b858 | cpp | tensorflow/tensorflow | minimum | tensorflow/lite/experimental/shlo/ops/minimum.cc | tensorflow/lite/delegates/xnnpack/minimum_test.cc | #include "tensorflow/lite/experimental/shlo/ops/minimum.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Minimum {
template <class T>
constexpr auto operator()(const T a, const T b) {
return a < b ? a : b;
}
};
MinimumOp Create(MinimumOp::Attributes) { return {}; }
absl::Status Prepare(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("minimum"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("minimum"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("minimum"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MinimumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Minimum minimum;
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), minimum, lhs, rhs,
output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
minimum, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.minimum: Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Minimum, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
TEST(Minimum, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MINIMUM, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/minimum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/minimum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ceb21061-0cca-4cc6-964c-1a8e710c929c | cpp | tensorflow/tensorflow | maximum | tensorflow/lite/experimental/shlo/ops/maximum.cc | tensorflow/lite/delegates/xnnpack/maximum_test.cc | #include "tensorflow/lite/experimental/shlo/ops/maximum.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Maximum {
template <class T>
constexpr auto operator()(const T a, const T b) {
return a > b ? a : b;
}
};
MaximumOp Create(MaximumOp::Attributes) { return {}; }
absl::Status Prepare(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("maximum"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("maximum"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("maximum"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Maximum maximum;
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), maximum, lhs, rhs,
output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
maximum, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.maximum: Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Maximum, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/maximum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/maximum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71b6dde1-1ef4-4e06-aefe-a9e1ff3a3bc2 | cpp | tensorflow/tensorflow | not | tensorflow/lite/experimental/shlo/ops/not.cc | tensorflow/lite/experimental/shlo/ops/not_test.cc | #include "tensorflow/lite/experimental/shlo/ops/not.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Not {
template <class T>
T operator()(T v) const {
return static_cast<T>(~v);
}
};
template <>
bool Not::operator()(bool v) const {
return !v;
}
NotOp Create(NotOp::Attributes) { return {}; }
absl::Status Prepare(NotOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("not"), input, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("not"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(NotOp& op, const Tensor& input, Tensor& output) {
Not not_func;
if (IsIntTensor(input) || IsBoolTensor(input)) {
DISPATCH_BOOL_INT(detail::EvaluateNoQuantization,
input.tensor_element_type(), not_func, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.not: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/not.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<NotOp> {
static std::string Get() { return "Not"; }
};
template <>
struct SupportedOpDataType<NotOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
struct Not {
template <class T>
T operator()(T v) const {
return ~v;
}
} not_ref;
template <>
bool Not::operator()(bool v) const {
return !v;
}
INSTANTIATE_TYPED_TEST_SUITE_P(Not, UnaryElementwiseOpShapePropagationTest,
NotOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Not, UnaryElementwiseSameBaselineElementTypeConstraintTest,
BaselineMismatchSignedIntegerTypes<NotOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<NotOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Not, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct BoolAndIntNotTest : ::testing::Test {};
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
TYPED_TEST_SUITE(BoolAndIntNotTest, SupportedTypes, TestParamNames);
TYPED_TEST(BoolAndIntNotTest, BoolAndIntTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), not_ref);
auto op = Create(NotOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/not.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/not_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
170e0abe-49ca-444d-bb86-8a79cf0f6208 | cpp | tensorflow/tensorflow | xor | tensorflow/lite/experimental/shlo/ops/xor.cc | tensorflow/lite/experimental/shlo/ops/xor_test.cc | #include "tensorflow/lite/experimental/shlo/ops/xor.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct Xor : std::bit_xor<void> {};
template <>
struct Xor<DataType::kI1> {
template <class T>
bool operator()(T lhs, T rhs) const {
return static_cast<bool>(lhs) != static_cast<bool>(rhs);
}
};
XorOp Create(XorOp::Attributes) { return {}; }
absl::Status Prepare(XorOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("xor"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("xor"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("xor"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(XorOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
Xor<DataType::kSI32> xor_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
xor_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
Xor<DataType::kI1> xor_func;
detail::EvaluateNoQuantization<DataType::kI1>(xor_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.xor: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/xor.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<XorOp> {
static std::string Get() { return "Xor"; }
};
template <DataType>
struct Xor : std::bit_xor<void> {};
template <>
struct Xor<DataType::kI1> {
template <class T>
bool operator()(T lhs, T rhs) const {
return static_cast<bool>(lhs) != static_cast<bool>(rhs);
}
};
template <>
struct SupportedOpDataType<XorOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Xor, BinaryElementwiseOpShapePropagationTest,
XorOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
XorOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Xor, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<XorOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Xor, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct XorTest : ::testing::Test {};
TYPED_TEST_SUITE(XorTest, SupportedTypes, TestParamNames);
TYPED_TEST(XorTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Xor<TypeParam::kStorage>());
auto op = Create(XorOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/xor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/xor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f47e9a6-3306-4f59-a171-c8655f57841a | cpp | tensorflow/tensorflow | exponential_minus_one | tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc | tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc | #include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct ExponentialMinusOne {
template <class T>
T operator()(T v) const {
return std::expm1(v);
}
};
template <>
F16 ExponentialMinusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 ExponentialMinusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
ExponentialMinusOneOp Create(ExponentialMinusOneOp::Attributes) { return {}; }
absl::Status Prepare(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("exponential_minus_one"), input,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("exponential_minus_one"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output) {
ExponentialMinusOne exponential_minus_one;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(),
exponential_minus_one, input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
exponential_minus_one, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.exponential_minus_one: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<ExponentialMinusOneOp> {
static std::string Get() { return "ExponentialMinusOne"; }
};
namespace {
struct ExponentialMinusOne {
template <class T>
T operator()(T v) const {
return std::expm1(v);
}
} exponential_minus_one_ref;
template <>
F16 ExponentialMinusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 ExponentialMinusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOne,
UnaryElementwiseOpShapePropagationTest,
ExponentialMinusOneOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
ExponentialMinusOne, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<ExponentialMinusOneOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<ExponentialMinusOneOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOneOp,
UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct ExponentialMinusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(ExponentialMinusOneTest, FloatTestTypes, TestParamNames);
TYPED_TEST(ExponentialMinusOneTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(),
exponential_minus_one_ref);
auto op = Create(ExponentialMinusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedExponentialMinusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedExponentialMinusOneTest, QuantizedTestTypes,
TestParamNames);
TYPED_TEST(QuantizedExponentialMinusOneTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res =
exponential_minus_one_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(ExponentialMinusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef36b79c-43e5-4468-b0d3-2d13d55df0fe | cpp | tensorflow/tensorflow | negate | tensorflow/lite/experimental/shlo/ops/negate.cc | tensorflow/lite/experimental/shlo/ops/negate_test.cc | #include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Negate : std::negate<void> {};
NegateOp Create(NegateOp::Attributes) { return {}; }
absl::Status Prepare(NegateOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("negate"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("negate"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(NegateOp& op, const Tensor& input, Tensor& output) {
Negate negate;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), negate,
input, output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), negate, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.negate: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<NegateOp> {
static std::string Get() { return "Negate"; }
};
namespace {
struct Negate : std::negate<void> {
} negate_ref;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseOpShapePropagationTest,
NegateOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Negate, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<NegateOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<NegateOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct NegateTest : ::testing::Test {};
TYPED_TEST_SUITE(NegateTest, ArithmeticTestTypes, TestParamNames);
TYPED_TEST(NegateTest, ArithmeticTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), negate_ref);
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedNegateTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedNegateTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedNegateTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = negate_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/negate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/negate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc5f6db5-7e35-4909-8459-f3482866bc8a | cpp | tensorflow/tensorflow | or | tensorflow/lite/experimental/shlo/ops/or.cc | tensorflow/lite/experimental/shlo/ops/or_test.cc | #include "tensorflow/lite/experimental/shlo/ops/or.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct Or : std::bit_or<void> {};
template <>
struct Or<DataType::kI1> : std::logical_or<void> {};
OrOp Create(OrOp::Attributes) { return {}; }
absl::Status Prepare(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("or"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("or"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("or"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
Or<DataType::kSI32> or_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
or_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
Or<DataType::kI1> or_func;
detail::EvaluateNoQuantization<DataType::kI1>(or_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.or: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/or.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<OrOp> {
static std::string Get() { return "Or"; }
};
template <DataType>
struct Or : std::bit_or<void> {};
template <>
struct Or<DataType::kI1> : std::logical_or<void> {};
template <>
struct SupportedOpDataType<OrOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Or, BinaryElementwiseOpShapePropagationTest,
OrOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
OrOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Or, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<OrOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Or, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct OrTest : ::testing::Test {};
TYPED_TEST_SUITE(OrTest, SupportedTypes, TestParamNames);
TYPED_TEST(OrTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Or<TypeParam::kStorage>());
auto op = Create(OrOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/or.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/or_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0e99cd6-1bb8-4bd4-a429-26688c4f695e | cpp | tensorflow/tensorflow | logistic | tensorflow/lite/experimental/shlo/ops/logistic.cc | tensorflow/lite/delegates/xnnpack/logistic_test.cc | #include "tensorflow/lite/experimental/shlo/ops/logistic.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Logistic {
template <class T>
T operator()(T v) const {
constexpr T one = static_cast<T>(1);
return one / (one + std::exp(-v));
}
};
template <>
F16 Logistic::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 Logistic::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
LogisticOp Create(LogisticOp::Attributes) { return {}; }
absl::Status Prepare(LogisticOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("logistic"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("logistic"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(LogisticOp& op, const Tensor& input, Tensor& output) {
Logistic logistic;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), logistic,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
logistic, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.logistic: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Logistic, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).RelativeTolerance(1.0e+4f).Test(
BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
TEST(Logistic, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_LOGISTIC, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/logistic.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/logistic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
efb6078c-2e87-4404-afaf-515287b1e7eb | cpp | tensorflow/tensorflow | multiply | tensorflow/lite/experimental/shlo/ops/multiply.cc | tensorflow/lite/experimental/shlo/ops/multiply_test.cc | #include "tensorflow/lite/experimental/shlo/ops/multiply.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType expressed_type>
struct Multiply : std::multiplies<void> {};
template <>
struct Multiply<DataType::kI1> {
template <class T>
T operator()(const T& lhs, const T& rhs) const {
return static_cast<T>(lhs && rhs);
}
};
MultiplyOp Create(MultiplyOp::Attributes) { return {}; }
absl::Status Prepare(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("multiply"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("multiply"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("multiply"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsBoolTensor(lhs)) {
detail::EvaluateNoQuantization<DataType::kI1>(Multiply<DataType::kI1>(),
lhs, rhs, output);
return absl::OkStatus();
} else if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
Multiply<DataType::kF32> multiply;
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), multiply, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
Multiply<DataType::kF32> multiply;
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
multiply, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.multiply: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/multiply.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<MultiplyOp> {
static std::string Get() { return "Multiply"; }
};
template <DataType expressed_type>
struct Multiply : std::multiplies<void> {};
template <>
struct Multiply<DataType::kI1> {
template <class T>
T operator()(const T& lhs, const T& rhs) const {
return static_cast<T>(lhs && rhs);
}
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Multiply,
BinaryElementwiseOpShapePropagationTest,
MultiplyOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
MultiplyOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes,
BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Multiply, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes = WithOpTypes<MultiplyOp, PerAxisQuantizedTestTypes>;
INSTANTIATE_TYPED_TEST_SUITE_P(Multiply, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<BoolTestType, ArithmeticTestTypes>;
template <class T>
struct MultiplyTest : ::testing::Test {};
TYPED_TEST_SUITE(MultiplyTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(MultiplyTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Multiply<TypeParam::kStorage>());
auto op = Create(MultiplyOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedMultiplyTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedMultiplyTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedMultiplyTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Multiply<TypeParam::kExpressed>()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(MultiplyOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/multiply.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/multiply_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a581d489-5070-4705-82dd-ec7c21b84a0a | cpp | tensorflow/tensorflow | exponential | tensorflow/lite/experimental/shlo/ops/exponential.cc | tensorflow/lite/experimental/shlo/ops/exponential_test.cc | #include "tensorflow/lite/experimental/shlo/ops/exponential.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Exponential {
template <class T>
T operator()(T v) const {
return std::exp(v);
}
};
template <>
F16 Exponential::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Exponential::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
ExponentialOp Create(ExponentialOp::Attributes) { return {}; }
absl::Status Prepare(ExponentialOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cosine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cosine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(ExponentialOp& op, const Tensor& input, Tensor& output) {
Exponential exponential;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), exponential,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
exponential, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.tanh: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/exponential.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<ExponentialOp> {
static std::string Get() { return "Exponential"; }
};
namespace {
struct Exponential {
template <class T>
T operator()(T v) const {
return std::exp(v);
}
} exponential_ref;
template <>
F16 Exponential::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Exponential::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Exponential,
UnaryElementwiseOpShapePropagationTest,
ExponentialOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Exponential, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<ExponentialOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<ExponentialOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Exponential, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct ExponentialTest : ::testing::Test {};
TYPED_TEST_SUITE(ExponentialTest, FloatTestTypes, TestParamNames);
TYPED_TEST(ExponentialTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), exponential_ref);
auto op = Create(ExponentialOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedExponentialTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedExponentialTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedExponentialTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = exponential_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(ExponentialOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
59c1e898-a0e6-46ad-9852-de3d355afe0a | cpp | tensorflow/tensorflow | count_leading_zeros | tensorflow/lite/experimental/shlo/ops/count_leading_zeros.cc | tensorflow/lite/experimental/shlo/ops/count_leading_zeros_test.cc | #include "tensorflow/lite/experimental/shlo/ops/count_leading_zeros.h"
#include <cstdint>
#include <type_traits>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct CountLeadingZeros {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::countl_zero(static_cast<uint8_t>(v << 4 | 0xf)));
} else {
return absl::countl_zero(static_cast<std::make_unsigned_t<T>>(v));
}
}
};
CountLeadingZerosOp Create(CountLeadingZerosOp::Attributes) { return {}; }
absl::Status Prepare(CountLeadingZerosOp& op, const Tensor& input,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("count_leading_zeros"), input, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("count_leading_zeros"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CountLeadingZerosOp& op, const Tensor& input,
Tensor& output) {
CountLeadingZeros count_leading_zeros;
if (IsIntTensor(input)) {
DISPATCH_INT(detail::EvaluateNoQuantization, input.tensor_element_type(),
count_leading_zeros, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.count_leading_zeros: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/count_leading_zeros.h"
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/numeric/bits.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CountLeadingZerosOp> {
static std::string Get() { return "CountLeadingZeros"; }
};
template <>
struct SupportedOpDataType<CountLeadingZerosOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
struct CountLeadingZeros {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::countl_zero(static_cast<uint8_t>(v << 4 | 0xf)));
} else {
return absl::countl_zero(static_cast<std::make_unsigned_t<T>>(v));
}
}
} count_leading_zeros_ref;
template <class T>
struct CountLeadingZerosFunctorTest : ::testing::Test {};
using CountLeadingZerosTypes = ::testing::Types<int32_t, int16_t, int8_t, I4>;
TYPED_TEST_SUITE(CountLeadingZerosFunctorTest, CountLeadingZerosTypes);
TYPED_TEST(CountLeadingZerosFunctorTest, GivesCorrectResults) {
int64_t bit_count = 8 * sizeof(TypeParam);
if constexpr (std::is_same_v<I4, TypeParam>) {
bit_count = 4;
}
EXPECT_EQ(count_leading_zeros_ref(std::numeric_limits<TypeParam>::lowest()),
0);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(-1)), 0);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(0)), bit_count);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(1)), bit_count - 1);
EXPECT_EQ(count_leading_zeros_ref(static_cast<TypeParam>(2)), bit_count - 2);
EXPECT_EQ(count_leading_zeros_ref(std::numeric_limits<TypeParam>::max()), 1);
}
INSTANTIATE_TYPED_TEST_SUITE_P(CountLeadingZeros,
UnaryElementwiseOpShapePropagationTest,
CountLeadingZerosOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
CountLeadingZeros, UnaryElementwiseSameBaselineElementTypeConstraintTest,
BaselineMismatchSignedIntegerTypes<CountLeadingZerosOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<CountLeadingZerosOp, ConcatTypes<BoolTestType, FloatTestTypes,
PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(CountLeadingZeros,
UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CountLeadingZerosTest : ::testing::Test {};
TYPED_TEST_SUITE(CountLeadingZerosTest, IntTestTypes, TestParamNames);
TYPED_TEST(CountLeadingZerosTest, IntTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = IotaBuffer<TypeParam::kStorage>(shape, -12);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), count_leading_zeros_ref);
auto op = Create(CountLeadingZerosOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/count_leading_zeros.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/count_leading_zeros_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5e3dedee-df81-4a6a-9c4f-321c9ec535fd | cpp | tensorflow/tensorflow | abs | tensorflow/lite/experimental/shlo/ops/abs.cc | tensorflow/lite/delegates/xnnpack/abs_test.cc | #include "tensorflow/lite/experimental/shlo/ops/abs.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Abs {
template <class T>
T operator()(const T& val) {
return val < static_cast<T>(0) ? static_cast<T>(-val) : val;
}
};
AbsOp Create(typename AbsOp::Attributes) { return AbsOp{}; }
absl::Status Prepare(AbsOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("abs"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("abs"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(AbsOp& op, const Tensor& input, Tensor& output) {
Abs abs;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), abs, input,
output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), abs, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Abs, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
TEST(Abs, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_ABS,
xnnpack_delegate.get());
}
TEST(Abs, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ABS, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/abs.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/abs_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
29b4b301-ba9b-4125-8cfa-0bae65f07385 | cpp | tensorflow/tensorflow | log_plus_one | tensorflow/lite/experimental/shlo/ops/log_plus_one.cc | tensorflow/lite/experimental/shlo/ops/log_plus_one_test.cc | #include "tensorflow/lite/experimental/shlo/ops/log_plus_one.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct LogPlusOne {
template <class T>
T operator()(T v) const {
return std::log1p(v);
}
};
template <>
F16 LogPlusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 LogPlusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
LogPlusOneOp Create(LogPlusOneOp::Attributes) { return {}; }
absl::Status Prepare(LogPlusOneOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("log_plus_one"), input,
IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("log_plus_one"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(LogPlusOneOp& op, const Tensor& input, Tensor& output) {
LogPlusOne log_plus_one;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), log_plus_one,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
log_plus_one, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.log_plus_one: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/log_plus_one.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<LogPlusOneOp> {
static std::string Get() { return "LogPlusOne"; }
};
namespace {
struct LogPlusOne {
template <class T>
T operator()(T v) const {
return std::log1p(v);
}
} log_plus_one_ref;
template <>
F16 LogPlusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 LogPlusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(LogPlusOne,
UnaryElementwiseOpShapePropagationTest,
LogPlusOneOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
LogPlusOne, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<LogPlusOneOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<LogPlusOneOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(LogPlusOne, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct LogPlusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(LogPlusOneTest, FloatTestTypes, TestParamNames);
TYPED_TEST(LogPlusOneTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(
shape, static_cast<StorageT>(-0.99));
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), log_plus_one_ref);
auto op = Create(LogPlusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedLogPlusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedLogPlusOneTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedLogPlusOneTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
Vector<StorageT> input_data =
RandomBuffer<TypeParam::kStorage>(shape, zero_point);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = log_plus_one_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(LogPlusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/log_plus_one.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/log_plus_one_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
20faff15-9cb2-45e3-8fb6-71e81ba3c0a8 | cpp | tensorflow/tensorflow | sine | tensorflow/lite/experimental/shlo/ops/sine.cc | tensorflow/lite/experimental/shlo/ops/sine_test.cc | #include "tensorflow/lite/experimental/shlo/ops/sine.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sine {
template <class T>
T operator()(T v) const {
return std::sin(v);
}
};
template <>
F16 Sine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
SineOp Create(SineOp::Attributes) { return {}; }
absl::Status Prepare(SineOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("sine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SineOp& op, const Tensor& input, Tensor& output) {
Sine sine;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sine, input,
output)
} else if (!input.IsQuantized() && IsFloat(input.StorageType())) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
sine, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/sine.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<SineOp> {
static std::string Get() { return "Sine"; }
};
namespace {
struct Sine {
template <class T>
T operator()(T v) const {
return std::sin(v);
}
} sine_ref;
template <>
F16 Sine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Sine, UnaryElementwiseOpShapePropagationTest,
SineOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Sine, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<SineOp>, TestParamNames);
using UnsupportedTypes = WithOpTypes<
SineOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Sine, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct FloatSineTest : ::testing::Test {};
TYPED_TEST_SUITE(FloatSineTest, FloatTestTypes, TestParamNames);
TYPED_TEST(FloatSineTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const TensorType tensor_type =
TensorType{.shape = shape, .element_type = TypeParam::kStorage};
Tensor input_tensor{.type = tensor_type, .data = input_data.data()};
Tensor output_tensor{.type = tensor_type, .data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), sine_ref);
auto op = Create(SineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedSineTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedSineTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedSineTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedPerTensorTensorType tensor_type = {
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, zero_point, TypeParam::kExpressed, scale)};
Tensor input_tensor{.type = tensor_type, .data = input_data.data()};
Tensor output_tensor{.type = tensor_type, .data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = sine_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(SineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b34f93e-e91e-4025-b82e-0a8ba602f71d | cpp | tensorflow/tensorflow | divide | tensorflow/lite/experimental/shlo/ops/divide.cc | tensorflow/lite/experimental/shlo/ops/divide_test.cc | #include "tensorflow/lite/experimental/shlo/ops/divide.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Divide : std::divides<void> {};
DivideOp Create(DivideOp::Attributes) { return {}; }
absl::Status Prepare(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("divide"), lhs,
IsIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("divide"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("divide"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Divide divide;
if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), divide, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
divide, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.divide: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/divide.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<DivideOp> {
static std::string Get() { return "Divide"; }
};
struct Divide : std::divides<void> {};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Divide, BinaryElementwiseOpShapePropagationTest,
DivideOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
DivideOp,
ConcatTypes<BaselineConstraintIntTypes, BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Divide, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<DivideOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Divide, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<ArithmeticTestTypes>;
template <class T>
struct DivideTest : ::testing::Test {};
TYPED_TEST_SUITE(DivideTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(DivideTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), Divide());
auto op = Create(DivideOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedDivideTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedDivideTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedDivideTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data = RandomBuffer<TypeParam::kStorage>(
shape, zero_point + 1, zero_point + 5);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Divide()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(DivideOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/divide.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/divide_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c9e068c-a6d4-43d8-b79a-da70f3a1b567 | cpp | tensorflow/tensorflow | cosine | tensorflow/lite/experimental/shlo/ops/cosine.cc | tensorflow/lite/experimental/shlo/ops/cosine_test.cc | #include "tensorflow/lite/experimental/shlo/ops/cosine.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Cosine {
template <class T>
T operator()(T v) const {
return std::cos(v);
}
};
template <>
F16 Cosine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cosine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CosineOp Create(CosineOp::Attributes) { return {}; }
absl::Status Prepare(CosineOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cosine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cosine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CosineOp& op, const Tensor& input, Tensor& output) {
Cosine cosine;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), cosine,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
cosine, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/cosine.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CosineOp> {
static std::string Get() { return "Cosine"; }
};
namespace {
struct Cosine {
template <class T>
T operator()(T v) const {
return std::cos(v);
}
} cosine_ref;
template <>
F16 Cosine::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cosine::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Cosine, UnaryElementwiseOpShapePropagationTest,
CosineOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Cosine, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<CosineOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<CosineOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Cosine, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CosineTest : ::testing::Test {};
TYPED_TEST_SUITE(CosineTest, FloatTestTypes, TestParamNames);
TYPED_TEST(CosineTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), cosine_ref);
auto op = Create(CosineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedCosineTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCosineTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCosineTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = cosine_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(CosineOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/cosine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/cosine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ea45c4e-ae1e-40a8-ad15-5bdd8477954e | cpp | tensorflow/tensorflow | popcnt | tensorflow/lite/experimental/shlo/ops/popcnt.cc | tensorflow/lite/experimental/shlo/ops/popcnt_test.cc | #include "tensorflow/lite/experimental/shlo/ops/popcnt.h"
#include <cstdint>
#include <type_traits>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Popcnt {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::popcount(static_cast<uint8_t>(v & 0xf)));
} else {
return absl::popcount(static_cast<std::make_unsigned_t<T>>(v));
}
}
};
PopcntOp Create(PopcntOp::Attributes) { return {}; }
absl::Status Prepare(PopcntOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("popcnt"), input, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("popcnt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(PopcntOp& op, const Tensor& input, Tensor& output) {
Popcnt popcnt;
if (IsIntTensor(input)) {
DISPATCH_INT(detail::EvaluateNoQuantization, input.tensor_element_type(),
popcnt, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.popcnt: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/popcnt.h"
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/numeric/bits.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<PopcntOp> {
static std::string Get() { return "Popcnt"; }
};
template <>
struct SupportedOpDataType<PopcntOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
struct Popcnt {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::popcount(static_cast<uint8_t>(v & 0xf)));
} else {
return absl::popcount(static_cast<std::make_unsigned_t<T>>(v));
}
}
} popcnt_ref;
using PopcntTypes = ::testing::Types<int32_t, int16_t, int8_t, I4>;
template <class T>
struct PopcntFunctorTest : ::testing::Test {};
TYPED_TEST_SUITE(PopcntFunctorTest, PopcntTypes);
TYPED_TEST(PopcntFunctorTest, GivesCorrectResults) {
int64_t bit_count = 8 * sizeof(TypeParam);
if constexpr (std::is_same_v<I4, TypeParam>) {
bit_count = 4;
}
EXPECT_EQ(popcnt_ref(std::numeric_limits<TypeParam>::lowest()), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(-1)), bit_count);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(0)), 0);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(1)), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(2)), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(3)), 2);
EXPECT_EQ(popcnt_ref(std::numeric_limits<TypeParam>::max()), bit_count - 1);
}
INSTANTIATE_TYPED_TEST_SUITE_P(Popcnt, UnaryElementwiseOpShapePropagationTest,
PopcntOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Popcnt, UnaryElementwiseSameBaselineElementTypeConstraintTest,
BaselineMismatchSignedIntegerTypes<PopcntOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<PopcntOp, ConcatTypes<BoolTestType, FloatTestTypes,
PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Popcnt, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct PopcntTest : ::testing::Test {};
TYPED_TEST_SUITE(PopcntTest, IntTestTypes, TestParamNames);
TYPED_TEST(PopcntTest, IntTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = IotaBuffer<TypeParam::kStorage>(shape, -12);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), popcnt_ref);
auto op = Create(PopcntOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/popcnt.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/popcnt_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6abba2e5-bea3-4193-8a00-66c00e7ee71b | cpp | tensorflow/tensorflow | subtract | tensorflow/lite/experimental/shlo/ops/subtract.cc | tensorflow/lite/experimental/shlo/ops/subtract_test.cc | #include "tensorflow/lite/experimental/shlo/ops/subtract.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Subtract : std::minus<void> {};
SubtractOp Create(SubtractOp::Attributes) { return {}; }
absl::Status Prepare(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("subtract"), lhs,
IsIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("subtract"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("subtract"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(SubtractOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Subtract subtract;
if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), subtract, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
subtract, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.subtract: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/subtract.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<SubtractOp> {
static std::string Get() { return "Subtract"; }
};
struct Subtract : std::minus<void> {};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Subtract,
BinaryElementwiseOpShapePropagationTest,
SubtractOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
SubtractOp,
ConcatTypes<BaselineConstraintIntTypes, BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Subtract, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<SubtractOp,
ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Subtract, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<ArithmeticTestTypes>;
template <class T>
struct SubtractTest : ::testing::Test {};
TYPED_TEST_SUITE(SubtractTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(SubtractTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), Subtract());
auto op = Create(SubtractOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedSubtractTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedSubtractTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedSubtractTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Subtract()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(SubtractOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/subtract.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/subtract_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02993a93-84f7-455b-baaa-72db6f6534c1 | cpp | tensorflow/tensorflow | sqrt | tensorflow/lite/experimental/shlo/ops/sqrt.cc | tensorflow/lite/delegates/xnnpack/sqrt_test.cc | #include "tensorflow/lite/experimental/shlo/ops/sqrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sqrt {
template <class T>
T operator()(T v) const {
return std::sqrt(v);
}
};
template <>
F16 Sqrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sqrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
SqrtOp Create(SqrtOp::Attributes) { return {}; }
absl::Status Prepare(SqrtOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("sqrt"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sqrt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SqrtOp& op, const Tensor& input, Tensor& output) {
Sqrt sqrt;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sqrt, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
sqrt, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.sqrt: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Sqrt, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_SQRT,
xnnpack_delegate.get());
}
TEST(Sqrt, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sqrt.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/sqrt_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
522e63ea-e056-4406-bce0-6eb4dfe25fe6 | cpp | tensorflow/tensorflow | tanh | tensorflow/lite/experimental/shlo/ops/tanh.cc | tensorflow/lite/delegates/xnnpack/tanh_test.cc | #include "tensorflow/lite/experimental/shlo/ops/tanh.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Tanh {
template <class T>
T operator()(T v) const {
return std::tanh(v);
}
};
template <>
F16 Tanh::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Tanh::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
TanhOp Create(TanhOp::Attributes) { return {}; }
absl::Status Prepare(TanhOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("tanh"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("tanh"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(TanhOp& op, const Tensor& input, Tensor& output) {
Tanh tanh;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), tanh, input,
output)
} else if (!input.IsQuantized() && IsFloat(input.StorageType())) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
tanh, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.tanh: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Tanh, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).RelativeTolerance(1.0e+4f).Test(
BuiltinOperator_TANH, xnnpack_delegate.get());
}
TEST(Tanh, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(1.0e+4f)
.Test(BuiltinOperator_TANH, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/tanh.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/tanh_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0e36476b-428c-4721-92c0-7e256182bea3 | cpp | tensorflow/tensorflow | kvcache | tensorflow/lite/experimental/genai/kvcache.cc | tensorflow/lite/experimental/genai/kvcache_test.cc | #include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/resource/cache_buffer.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace llm {
static const int kPositionTensor = 0;
static const int kKeyTensor = 1;
static const int kValueTensor = 2;
static const int kFullKeyTensor = 0;
static const int kFullValueTensor = 1;
static const int kRequiredNumDimensions = 4;
static const int kDefaultMaxNumCacheEntries = 2048;
static const int kDefaultNumTransformerLayers = 32;
static const int kDefaultTransformerLayerId = 0;
static const int KVCACHE_KEY_RESOURCE = 42;
static const int KVCACHE_VALUE_RESOURCE = 43;
struct OpData {
int num_layers;
int layer_index;
int max_num_entries;
int first_slot_index;
resource::CacheBuffer* key_cache_buffer;
resource::CacheBuffer* value_cache_buffer;
bool is_initialized;
uint8_t* key_cache_ptr;
uint8_t* value_cache_ptr;
};
void* KVCacheInit(TfLiteContext* context, const char* buffer, size_t length) {
OpData* op_data = new OpData();
op_data->max_num_entries = -1;
op_data->num_layers = -1;
op_data->layer_index = -1;
op_data->first_slot_index = -1;
op_data->key_cache_buffer = nullptr;
op_data->value_cache_buffer = nullptr;
op_data->is_initialized = false;
op_data->key_cache_ptr = nullptr;
op_data->value_cache_ptr = nullptr;
return op_data;
}
TfLiteStatus KVCachePrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
if (!op_data->is_initialized) {
const uint8_t* buffer =
reinterpret_cast<const uint8_t*>(node->custom_initial_data);
const size_t length = node->custom_initial_data_size;
auto flexbuffer_map = flexbuffers::GetRoot(buffer, length).AsMap();
int32_t max_num_entries = flexbuffer_map["kv_cache_max"].AsInt32();
int32_t num_layers = flexbuffer_map["num_layers"].AsInt32();
int32_t layer_index = flexbuffer_map["layer_index"].AsInt32();
op_data->max_num_entries =
max_num_entries > 0 ? max_num_entries : kDefaultMaxNumCacheEntries;
op_data->num_layers =
num_layers > 0 ? num_layers : kDefaultNumTransformerLayers;
op_data->layer_index =
layer_index > 0 ? layer_index : kDefaultTransformerLayerId;
op_data->first_slot_index = 0;
op_data->is_initialized = true;
}
const TfLiteTensor* position;
const TfLiteTensor* key;
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kPositionTensor, &position));
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kKeyTensor, &key));
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TF_LITE_ENSURE_EQ(context, position->type, kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, key->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, value->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, NumDimensions(position) == 1);
TF_LITE_ENSURE(
context, GetTensorShape(position).Dims(0) == GetTensorShape(key).Dims(1));
TF_LITE_ENSURE(context, NumDimensions(key) == kRequiredNumDimensions);
TF_LITE_ENSURE(context, GetTensorShape(key).Dims(0) == 1);
TF_LITE_ENSURE(context, HaveSameShapes(key, value));
TfLiteTensor* kfull;
TfLiteTensor* vfull;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullKeyTensor, &kfull));
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullValueTensor, &vfull));
kfull->allocation_type = kTfLiteCustom;
vfull->allocation_type = kTfLiteCustom;
kfull->type = kTfLiteFloat32;
vfull->type = kTfLiteFloat32;
TfLiteIntArray* input_dims = key->dims;
TfLiteIntArray* kcache_dims = TfLiteIntArrayCopy(input_dims);
TfLiteIntArray* vcache_dims = TfLiteIntArrayCopy(input_dims);
kcache_dims->data[1] = op_data->max_num_entries;
vcache_dims->data[1] = op_data->max_num_entries;
TfLiteIntArray* kcache_buffer_dims = TfLiteIntArrayCreate(5);
kcache_buffer_dims->data[0] = input_dims->data[0];
kcache_buffer_dims->data[1] = op_data->num_layers;
kcache_buffer_dims->data[2] = op_data->max_num_entries;
kcache_buffer_dims->data[3] = input_dims->data[2];
kcache_buffer_dims->data[4] = input_dims->data[3];
TfLiteIntArray* vcache_buffer_dims = TfLiteIntArrayCopy(kcache_buffer_dims);
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
if (resources.count(KVCACHE_KEY_RESOURCE) == 0) {
auto* cbuffer = new resource::CacheBuffer();
cbuffer->Initialize(*kcache_buffer_dims);
resources.emplace(KVCACHE_KEY_RESOURCE, cbuffer);
op_data->key_cache_buffer = cbuffer;
} else {
resource::ResourceBase* resourcePtr =
resources.at(KVCACHE_KEY_RESOURCE).get();
resource::CacheBuffer* cbuffer = (resource::CacheBuffer*)(resourcePtr);
op_data->key_cache_buffer = cbuffer;
}
if (resources.count(KVCACHE_VALUE_RESOURCE) == 0) {
auto* cbuffer = new resource::CacheBuffer();
cbuffer->Initialize(*vcache_buffer_dims);
resources.emplace(KVCACHE_VALUE_RESOURCE, cbuffer);
op_data->value_cache_buffer = cbuffer;
} else {
resource::ResourceBase* resourcePtr =
resources.at(KVCACHE_VALUE_RESOURCE).get();
resource::CacheBuffer* cbuffer = (resource::CacheBuffer*)(resourcePtr);
op_data->value_cache_buffer = cbuffer;
}
RuntimeShape shape(GetTensorShape(key));
const int elements_in_one_entry = shape.Dims(2) * shape.Dims(3);
const int elements_in_one_block =
op_data->max_num_entries * elements_in_one_entry;
uint8_t* k_ptr =
reinterpret_cast<uint8_t*>(op_data->key_cache_buffer->GetBuffer());
uint8_t* v_ptr =
reinterpret_cast<uint8_t*>(op_data->value_cache_buffer->GetBuffer());
k_ptr = k_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
v_ptr = v_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
size_t kcache_dims_flatsize = kcache_dims->data[0] * kcache_dims->data[1] *
kcache_dims->data[2] * kcache_dims->data[3];
size_t vcache_dims_flatsize = vcache_dims->data[0] * vcache_dims->data[1] *
vcache_dims->data[2] * vcache_dims->data[3];
RuntimeShape kfull_shape(GetTensorShape(kfull));
RuntimeShape vfull_shape(GetTensorShape(vfull));
if (kfull_shape.FlatSize() > 1 && vfull_shape.FlatSize() > 1) {
TF_LITE_ENSURE_EQ(context, kfull_shape.FlatSize(), kcache_dims_flatsize);
TF_LITE_ENSURE_EQ(context, vfull_shape.FlatSize(), vcache_dims_flatsize);
}
TF_LITE_ENSURE_EQ(context, elements_in_one_block, kcache_dims_flatsize);
TF_LITE_ENSURE_EQ(context, elements_in_one_block, vcache_dims_flatsize);
kfull->data.data = k_ptr;
vfull->data.data = v_ptr;
op_data->key_cache_ptr = k_ptr;
op_data->value_cache_ptr = v_ptr;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, kfull, kcache_dims));
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, vfull, vcache_dims));
TfLiteIntArrayFree(kcache_buffer_dims);
TfLiteIntArrayFree(vcache_buffer_dims);
return kTfLiteOk;
}
void KVCacheFree(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus KVCacheEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* position;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kPositionTensor, &position));
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kKeyTensor, &key));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TfLiteTensor* kfull;
TfLiteTensor* vfull;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullKeyTensor, &kfull));
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullValueTensor, &vfull));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
float* key_cache_ptr = op_data->key_cache_buffer->GetBuffer();
float* value_cache_ptr = op_data->value_cache_buffer->GetBuffer();
const int layer_index = op_data->layer_index;
const int64_t max_num_entries = op_data->max_num_entries;
int current_num_entries =
op_data->key_cache_buffer->GetNumEntries(layer_index);
RuntimeShape shape(GetTensorShape(key));
const int64_t num_slots_needed = shape.Dims(1);
const int elements_in_one_entry = shape.Dims(2) * shape.Dims(3);
const int elements_in_one_block =
op_data->max_num_entries * elements_in_one_entry;
const int64_t num_bytes_per_tensor = sizeof(float) * elements_in_one_entry;
uint8_t* k_ptr = reinterpret_cast<uint8_t*>(key_cache_ptr);
uint8_t* v_ptr = reinterpret_cast<uint8_t*>(value_cache_ptr);
k_ptr = k_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
v_ptr = v_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
TF_LITE_ENSURE_EQ(context, k_ptr, op_data->key_cache_ptr);
TF_LITE_ENSURE_EQ(context, v_ptr, op_data->value_cache_ptr);
TF_LITE_ENSURE_EQ(context, k_ptr, kfull->data.data);
TF_LITE_ENSURE_EQ(context, v_ptr, vfull->data.data);
const int64_t input_first_idx = position->data.i64[0];
const int64_t input_last_idx = input_first_idx + num_slots_needed - 1;
const int64_t cache_first_slot_idx = op_data->first_slot_index;
const int64_t cache_last_slot_idx =
cache_first_slot_idx + op_data->max_num_entries - 1;
const int slots_to_shift = std::min(
std::max(static_cast<int64_t>(0), input_last_idx - cache_last_slot_idx),
max_num_entries);
int64_t first_slot = input_first_idx - op_data->first_slot_index;
if (first_slot < 0) {
TF_LITE_KERNEL_LOG(
context,
"Can not specify a position before this cache's first slot index of %d",
op_data->first_slot_index);
return kTfLiteError;
}
int64_t byte_offset_for_output = first_slot * num_bytes_per_tensor;
int64_t num_slots_for_output = num_slots_needed;
if (slots_to_shift > 0 && slots_to_shift < max_num_entries) {
byte_offset_for_output = 0;
num_slots_for_output = max_num_entries;
const int bytes_offset =
sizeof(float) * elements_in_one_entry * slots_to_shift;
const int size_bytes_to_shift = sizeof(float) * elements_in_one_entry *
(max_num_entries - slots_to_shift);
memmove(k_ptr, k_ptr + bytes_offset, size_bytes_to_shift);
memmove(v_ptr, v_ptr + bytes_offset, size_bytes_to_shift);
}
op_data->first_slot_index = op_data->first_slot_index + slots_to_shift;
first_slot = input_first_idx - op_data->first_slot_index;
const int64_t bytes_offset_for_cache = first_slot * num_bytes_per_tensor;
memcpy(k_ptr + bytes_offset_for_cache, key->data.data, key->bytes);
memcpy(v_ptr + bytes_offset_for_cache, value->data.data, value->bytes);
current_num_entries =
std::min(first_slot + num_slots_needed, max_num_entries);
op_data->key_cache_buffer->SetNumEntries(layer_index, current_num_entries);
op_data->value_cache_buffer->SetNumEntries(layer_index, current_num_entries);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_KV_CACHE() {
static TfLiteRegistration r = {llm::KVCacheInit, llm::KVCacheFree,
llm::KVCachePrepare, llm::KVCacheEval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/experimental/genai/genai_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
static const int kDefaultMaxNumCacheEntries = 2048;
class SimpleCacheOpModel : public SingleOpModel {
public:
SimpleCacheOpModel(const TensorData& pos_tensor, const TensorData& k_tensor,
const TensorData& v_tensor) {
pos_ = AddInput(pos_tensor);
k_ = AddInput(k_tensor);
v_ = AddInput(v_tensor);
kfull_ = AddOutput(k_tensor.type);
vfull_ = AddOutput(v_tensor.type);
SetCustomOp("KV_Cache", {}, ops::custom::Register_KV_CACHE);
BuildInterpreter({GetShape(pos_), GetShape(k_), GetShape(v_)});
}
void SetPosition(const std::vector<int64_t>& data) {
PopulateTensor(pos_, data);
}
void SetKey(const std::vector<float>& data) { PopulateTensor(k_, data); }
void SetValue(const std::vector<float>& data) { PopulateTensor(v_, data); }
void ResizePosition(const std::vector<int>& dims) {
interpreter_->ResizeInputTensor(pos_, dims);
}
void ResizeKey(const std::vector<int>& dims) {
interpreter_->ResizeInputTensor(k_, dims);
}
void ResizeValue(const std::vector<int>& dims) {
interpreter_->ResizeInputTensor(v_, dims);
}
std::vector<float> GetFullK() {
const auto output = ExtractVector<float>(kfull_);
return output;
}
std::vector<float> GetFullV() {
const auto output = ExtractVector<float>(vfull_);
return output;
}
TfLiteStatus ReAllocate() { return interpreter_->AllocateTensors(); }
protected:
int pos_;
int k_;
int v_;
int kfull_;
int vfull_;
};
TEST(SimpleCacheOp1Test, BasicTest) {
SimpleCacheOpModel m({TensorType_INT64, {2}},
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 2, 2, 3}});
m.SetPosition({0, 1});
m.SetKey({{1, 0, -6, 2, 4, 3, 1, 0, -6, 2, 4, 3}});
m.SetValue({{4, 2, -4, 2, 4, 2, 4, 2, -4, 2, 4, 2}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> fullk = m.GetFullK();
std::vector<float> fullv = m.GetFullV();
ASSERT_EQ(fullk.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
ASSERT_EQ(fullv.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
}
TEST(SimpleCacheOp2Test, AddToCache) {
SimpleCacheOpModel m({TensorType_INT64, {2}},
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 2, 2, 3}});
m.SetPosition({0, 1});
std::vector<float> key = {1, 5, -6, 2, 4, 3, 8, 9, -8, 7, 2, 11};
m.SetKey(key);
std::vector<float> value = {2, 3, -4, 5, 6, 7, 1, 8, -12, 11, 14, 21};
m.SetValue(value);
const int key_size = 2 * 3;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> fullk = m.GetFullK();
std::vector<float> fullv = m.GetFullV();
for (int i = 0; i < key.size(); ++i) {
ASSERT_EQ(fullk[i], key[i]);
ASSERT_EQ(fullv[i], value[i]);
}
for (int i = key.size(); i < fullk.size(); ++i) {
ASSERT_EQ(fullk[i], 0.);
ASSERT_EQ(fullv[i], 0.);
}
ASSERT_EQ(fullk.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
ASSERT_EQ(fullv.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
for (int i = 0; i < 510; i++) {
int offset = 2 * i + 2;
m.SetPosition({offset, offset + 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
}
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int i = 0; i < 1022 * key_size; ++i) {
ASSERT_NE(fullv[i], 0);
}
for (int i = 1022 * key_size; i < fullk.size(); ++i) {
ASSERT_EQ(fullv[i], 0);
}
}
TEST(SimpleCacheOp2Test, ShiftSlotsInCache) {
SimpleCacheOpModel m({TensorType_INT64, {2}},
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 2, 2, 3}});
m.SetPosition({0, 1});
std::vector<float> key = {1, 5, -6, 2, 4, 3, 2, 6, -7, 3, 5, 4};
m.SetKey(key);
std::vector<float> value = {4, 2, -4, 2, 4, 2, 9, 8, -9, 8, 9, 1};
m.SetValue(value);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> fullk = m.GetFullK();
std::vector<float> fullv = m.GetFullV();
for (int i = 0; i < key.size(); ++i) {
ASSERT_EQ(fullk[i], key[i]);
ASSERT_EQ(fullv[i], value[i]);
}
for (int i = key.size(); i < fullk.size(); ++i) {
ASSERT_EQ(fullk[i], 0.);
ASSERT_EQ(fullv[i], 0.);
}
ASSERT_EQ(fullk.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
ASSERT_EQ(fullv.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
for (int i = 0; i < 1023; i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
int offset = 2 * i + 2;
m.SetPosition({offset, offset + 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
}
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int i = 0; i < fullk.size(); ++i) {
ASSERT_NE(fullk[i], 0);
ASSERT_NE(fullv[i], 0);
}
for (int j = 0; j < 6; ++j) {
int idxfull = fullk.size() - 6 + j;
int idx = 6 + j;
ASSERT_EQ(fullk[idxfull], key[idx]);
ASSERT_EQ(fullv[idxfull], value[idx]);
}
std::vector<float> key2 = {1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7};
m.SetKey(key2);
std::vector<float> value2 = {8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9};
m.SetValue(value2);
m.SetPosition({2048, 2049});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int j = 0; j < 12; ++j) {
int idxfull = fullk.size() - 12 + j;
ASSERT_EQ(fullk[idxfull], key2[j]);
ASSERT_EQ(fullv[idxfull], value2[j]);
}
m.ResizeKey({1, 1, 2, 3});
m.ResizeValue({1, 1, 2, 3});
m.ResizePosition({1});
m.ReAllocate();
std::vector<float> key3 = {4, 4, 4, 4, 4, 4};
m.SetKey(key3);
std::vector<float> value3 = {2, 2, 2, 2, 2, 2};
m.SetValue(value3);
m.SetPosition({2050});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int j = 0; j < 6; ++j) {
int idxfull = fullk.size() - 6 + j;
ASSERT_EQ(fullk[idxfull], key3[j]);
ASSERT_EQ(fullv[idxfull], value3[j]);
}
for (int j = 0; j < 6; ++j) {
int idxfull = fullk.size() - 12 + j;
ASSERT_EQ(fullk[idxfull], key2[6 + j]);
ASSERT_EQ(fullv[idxfull], value2[6 + j]);
}
std::vector<float> key4 = {5, 5, 5, 5, 5, 5};
m.SetKey(key3);
std::vector<float> value4 = {3, 3, 3, 3, 3, 3};
m.SetValue(value3);
m.SetPosition({0});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/genai/kvcache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/genai/kvcache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bffdc450-ccf8-4fbf-aa70-09c947ea0531 | cpp | tensorflow/tensorflow | external_kvcache | tensorflow/lite/experimental/genai/external_kvcache.cc | tensorflow/lite/experimental/genai/external_kvcache_test.cc | #include <cstdint>
#include <cstring>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace llm {
static const int kKeyTensor = 0;
static const int kValueTensor = 1;
static const int kPositionTensor = 2;
static const int kKeySliceTensor = 3;
static const int kValueSliceTensor = 4;
static const int kRequiredNumDimensions = 4;
TfLiteStatus ExternalKVCachePrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 5);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* k_cache;
const TfLiteTensor* v_cache;
const TfLiteTensor* position;
const TfLiteTensor* k_slice;
const TfLiteTensor* v_slice;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kKeyTensor, &k_cache));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueTensor, &v_cache));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kPositionTensor, &position));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kKeySliceTensor, &k_slice));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueSliceTensor, &v_slice));
TfLiteTensor* updated_k_cache;
TfLiteTensor* updated_v_cache;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kKeyTensor, &updated_k_cache));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kValueTensor, &updated_v_cache));
TF_LITE_ENSURE_EQ(context, k_cache->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, v_cache->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, position->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, k_slice->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, v_slice->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, updated_k_cache->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, updated_v_cache->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, HaveSameShapes(k_cache, v_cache));
TF_LITE_ENSURE(context, HaveSameShapes(k_slice, v_slice));
TF_LITE_ENSURE(context, HaveSameShapes(updated_k_cache, updated_v_cache));
TF_LITE_ENSURE(context, HaveSameShapes(k_cache, updated_k_cache));
TF_LITE_ENSURE(context, NumDimensions(k_slice) == kRequiredNumDimensions);
TF_LITE_ENSURE(context, NumDimensions(k_cache) == kRequiredNumDimensions);
TF_LITE_ENSURE(context, NumDimensions(position) == 1);
TF_LITE_ENSURE(context, GetTensorShape(position).Dims(0) ==
GetTensorShape(k_slice).Dims(1));
TF_LITE_ENSURE(context, GetTensorShape(k_slice).Dims(0) == 1);
return kTfLiteOk;
}
TfLiteStatus ExternalKVCacheEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* k_cache;
const TfLiteTensor* v_cache;
const TfLiteTensor* position;
const TfLiteTensor* k_slice;
const TfLiteTensor* v_slice;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kKeyTensor, &k_cache));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueTensor, &v_cache));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kPositionTensor, &position));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kKeySliceTensor, &k_slice));
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueSliceTensor, &v_slice));
TfLiteTensor* updated_k_cache;
TfLiteTensor* updated_v_cache;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kKeyTensor, &updated_k_cache));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kValueTensor, &updated_v_cache));
if (k_cache->data.raw != updated_k_cache->data.raw) {
memcpy(updated_k_cache->data.data, k_cache->data.data, k_cache->bytes);
}
if (v_cache->data.raw != updated_v_cache->data.raw) {
memcpy(updated_v_cache->data.data, v_cache->data.data, v_cache->bytes);
}
const int32_t elements_in_one_entry =
GetTensorShape(k_cache).Dims(2) * GetTensorShape(k_cache).Dims(3);
const int32_t cache_size = GetTensorShape(k_cache).Dims(1);
int32_t last_update_position = -1;
for (int i = 0; i < position->bytes / sizeof(int32_t); ++i) {
const int32_t update_position = position->data.i32[i];
if (update_position < last_update_position) {
break;
}
last_update_position = update_position;
TF_LITE_ENSURE(context, update_position < cache_size);
const int32_t cache_offset = update_position * elements_in_one_entry;
const int32_t update_offset = i * elements_in_one_entry;
TF_LITE_ENSURE(context,
(cache_offset + elements_in_one_entry) * sizeof(float) <=
k_cache->bytes);
memcpy(updated_k_cache->data.f + cache_offset,
k_slice->data.f + update_offset,
elements_in_one_entry * sizeof(float));
memcpy(updated_v_cache->data.f + cache_offset,
v_slice->data.f + update_offset,
elements_in_one_entry * sizeof(float));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_EXTERNAL_KV_CACHE() {
static TfLiteRegistration r = {nullptr, nullptr,
llm::ExternalKVCachePrepare,
llm::ExternalKVCacheEval};
return &r;
}
}
}
} | #include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/genai/genai_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::TestWithParam;
enum class TestType {
kSharedKV = 0,
kPingPongKV = 1,
};
class ExternalKVSingleOpModel : public SingleOpModel {
public:
ExternalKVSingleOpModel(const TensorData& k_cache, const TensorData& v_cache,
const TensorData& pos_tensor,
const TensorData& k_slice, const TensorData& v_slice,
TestType test_type)
: test_type_(test_type) {
k_cache_in_ = AddInput(k_cache);
v_cache_in_ = AddInput(v_cache);
position_ = AddInput(pos_tensor);
k_slice_ = AddInput(k_slice);
v_slice_ = AddInput(v_slice);
k_cache_out_ = AddOutput(k_cache);
v_cache_out_ = AddOutput(v_cache);
auto get_padded_cache_size = [&](const TensorData& cache) {
size_t size = static_cast<size_t>(std::accumulate(
cache.shape.begin(), cache.shape.end(), 1.0, std::multiplies<>()));
return size + kDefaultTensorAlignment;
};
SetCustomOp("EKV_Cache", {}, ops::custom::Register_EXTERNAL_KV_CACHE);
BuildInterpreter({GetShape(k_cache_in_), GetShape(v_cache_in_),
GetShape(position_), GetShape(k_slice_),
GetShape(v_slice_)});
k_cache_1_.resize(get_padded_cache_size(k_cache), 0.0);
v_cache_1_.resize(get_padded_cache_size(v_cache), 0.0);
k_cache_2_.resize(get_padded_cache_size(k_cache), 0.0);
v_cache_2_.resize(get_padded_cache_size(v_cache), 0.0);
}
TfLiteStatus Run(absl::Span<const int32_t> position,
absl::Span<const float> k_slice,
absl::Span<const float> v_slice) {
if (test_type_ == TestType::kSharedKV) {
TF_LITE_ENSURE_STATUS(SharedBufferPrepare());
} else {
TF_LITE_ENSURE_STATUS(PingPongBufferPrepare());
}
PopulateTensor(position_, position);
PopulateTensor(k_slice_, k_slice);
PopulateTensor(v_slice_, v_slice);
return SingleOpModel::Invoke();
};
std::vector<float> GetKCache() { return ExtractVector<float>(k_cache_out_); }
std::vector<float> GetVCache() { return ExtractVector<float>(v_cache_out_); }
protected:
TfLiteStatus SharedBufferPrepare() {
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(k_cache_1_, k_cache_in_));
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(v_cache_1_, v_cache_in_));
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(k_cache_1_, k_cache_out_));
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(v_cache_1_, v_cache_out_));
return interpreter_->AllocateTensors();
}
TfLiteStatus PingPongBufferPrepare() {
std::vector<float>* input_k_caches;
std::vector<float>* input_v_caches;
std::vector<float>* output_k_caches;
std::vector<float>* output_v_caches;
if (kv_flop_) {
input_k_caches = &k_cache_1_;
input_v_caches = &v_cache_1_;
output_k_caches = &k_cache_2_;
output_v_caches = &v_cache_2_;
} else {
input_k_caches = &k_cache_2_;
input_v_caches = &v_cache_2_;
output_k_caches = &k_cache_1_;
output_v_caches = &v_cache_1_;
}
kv_flop_ = !kv_flop_;
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(*input_k_caches, k_cache_in_));
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(*input_v_caches, v_cache_in_));
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(*output_k_caches, k_cache_out_));
TF_LITE_ENSURE_STATUS(
SetCustomAllocationFromCache(*output_v_caches, v_cache_out_));
return interpreter_->AllocateTensors();
}
TfLiteStatus SetCustomAllocationFromCache(std::vector<float>& cache,
int tensor_index) {
size_t total_bytes = cache.size() * sizeof(float);
size_t required_number_of_bytes = total_bytes - kDefaultTensorAlignment;
void* original_buffer = static_cast<void*>(cache.data());
void* aligned_buffer =
std::align(kDefaultTensorAlignment, required_number_of_bytes,
original_buffer, total_bytes);
if (aligned_buffer == nullptr ||
reinterpret_cast<intptr_t>(aligned_buffer) % kDefaultTensorAlignment !=
0) {
return kTfLiteError;
}
TfLiteCustomAllocation allocation = {.data = aligned_buffer,
.bytes = required_number_of_bytes};
return interpreter_->SetCustomAllocationForTensor(tensor_index, allocation);
};
int position_;
int k_cache_in_;
int v_cache_in_;
int k_slice_;
int v_slice_;
int k_cache_out_;
int v_cache_out_;
TestType test_type_;
std::vector<float> k_cache_1_;
std::vector<float> v_cache_1_;
std::vector<float> k_cache_2_;
std::vector<float> v_cache_2_;
bool kv_flop_ = true;
};
class EKVCacheTest : public TestWithParam<TestType> {};
TEST_P(EKVCacheTest, SingleSliceUpdateTest) {
ExternalKVSingleOpModel m(
{TensorType_FLOAT32, {1, 3, 2, 2}}, {TensorType_FLOAT32, {1, 3, 2, 2}},
{TensorType_INT32, {1}}, {TensorType_FLOAT32, {1, 1, 2, 2}},
{TensorType_FLOAT32, {1, 1, 2, 2}}, GetParam());
{
ASSERT_EQ(m.Run({0}, {10, 11, 12, 13},
{20, 21, 22, 23}),
kTfLiteOk);
std::vector<float> k = m.GetKCache();
ASSERT_THAT(k, ElementsAreArray({10, 11, 12, 13, 0, 0, 0, 0, 0, 0, 0, 0}));
std::vector<float> v = m.GetVCache();
ASSERT_THAT(v, ElementsAreArray({20, 21, 22, 23, 0, 0, 0, 0, 0, 0, 0, 0}));
}
{
ASSERT_EQ(m.Run({2}, {50, 51, 52, 53},
{60, 61, 62, 63}),
kTfLiteOk);
std::vector<float> k = m.GetKCache();
ASSERT_THAT(k,
ElementsAreArray({10, 11, 12, 13, 0, 0, 0, 0, 50, 51, 52, 53}));
std::vector<float> v = m.GetVCache();
ASSERT_THAT(v,
ElementsAreArray({20, 21, 22, 23, 0, 0, 0, 0, 60, 61, 62, 63}));
}
{
ASSERT_EQ(m.Run({1}, {70, 71, 72, 73},
{80, 81, 82, 83}),
kTfLiteOk);
std::vector<float> k = m.GetKCache();
ASSERT_THAT(
k, ElementsAreArray({10, 11, 12, 13, 70, 71, 72, 73, 50, 51, 52, 53}));
std::vector<float> v = m.GetVCache();
ASSERT_THAT(
v, ElementsAreArray({20, 21, 22, 23, 80, 81, 82, 83, 60, 61, 62, 63}));
}
{
ASSERT_EQ(m.Run({1}, {1, 2, 3, 4},
{1, 2, 3, 4}),
kTfLiteOk);
std::vector<float> k = m.GetKCache();
ASSERT_THAT(k,
ElementsAreArray({10, 11, 12, 13, 1, 2, 3, 4, 50, 51, 52, 53}));
std::vector<float> v = m.GetVCache();
ASSERT_THAT(v,
ElementsAreArray({20, 21, 22, 23, 1, 2, 3, 4, 60, 61, 62, 63}));
}
}
TEST_P(EKVCacheTest, MultipleSliceUpdateTest) {
ExternalKVSingleOpModel m(
{TensorType_FLOAT32, {1, 3, 2, 2}}, {TensorType_FLOAT32, {1, 3, 2, 2}},
{TensorType_INT32, {2}}, {TensorType_FLOAT32, {1, 2, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 2}}, GetParam());
{
ASSERT_EQ(m.Run({0, 1}, {1, 1, 1, 1, 2, 2, 2, 2},
{5, 5, 5, 5, 6, 6, 6, 6}),
kTfLiteOk);
std::vector<float> k = m.GetKCache();
ASSERT_THAT(k, ElementsAreArray({1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0}));
std::vector<float> v = m.GetVCache();
ASSERT_THAT(v, ElementsAreArray({5, 5, 5, 5, 6, 6, 6, 6, 0, 0, 0, 0}));
}
{
ASSERT_EQ(
m.Run({1, 2}, {10, 10, 10, 10, 11, 11, 11, 11},
{20, 20, 20, 20, 21, 21, 21, 21}),
kTfLiteOk);
std::vector<float> k = m.GetKCache();
ASSERT_THAT(k,
ElementsAreArray({1, 1, 1, 1, 10, 10, 10, 10, 11, 11, 11, 11}));
std::vector<float> v = m.GetVCache();
ASSERT_THAT(v,
ElementsAreArray({5, 5, 5, 5, 20, 20, 20, 20, 21, 21, 21, 21}));
}
}
TEST_P(EKVCacheTest, FailsOnOutOfBoundPosition) {
ExternalKVSingleOpModel m(
{TensorType_FLOAT32, {1, 3, 2, 2}}, {TensorType_FLOAT32, {1, 3, 2, 2}},
{TensorType_INT32, {1}}, {TensorType_FLOAT32, {1, 1, 2, 2}},
{TensorType_FLOAT32, {1, 1, 2, 2}}, GetParam());
ASSERT_EQ(m.Run({3}, {1, 2, 3, 4},
{1, 2, 3, 4}),
kTfLiteError);
}
INSTANTIATE_TEST_SUITE_P(EKVCacheTest, EKVCacheTest,
testing::Values(TestType::kSharedKV,
TestType::kPingPongKV));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/genai/external_kvcache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/genai/external_kvcache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02af6d36-38bc-425f-b15d-5894c9ee2f50 | cpp | tensorflow/tensorflow | resource_variable | tensorflow/lite/experimental/resource/resource_variable.cc | tensorflow/lite/experimental/resource/resource_variable_test.cc | #include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <cstdlib>
#include <cstring>
#include <map>
#include <memory>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
namespace tflite {
namespace resource {
ResourceVariable::ResourceVariable() {
memset(&tensor_, 0, sizeof(TfLiteTensor));
}
ResourceVariable::ResourceVariable(ResourceVariable&& other) {
tensor_ = other.tensor_;
is_initialized_ = other.is_initialized_;
memset(&other.tensor_, 0, sizeof(TfLiteTensor));
other.is_initialized_ = false;
}
ResourceVariable::~ResourceVariable() {
if (is_initialized_) {
free(tensor_.data.raw);
if (tensor_.dims) {
TfLiteIntArrayFree(tensor_.dims);
}
}
}
TfLiteStatus ResourceVariable::AssignFrom(const TfLiteTensor* tensor) {
char* old_raw = tensor_.data.raw;
size_t old_bytes = tensor_.bytes;
TfLiteIntArray* old_dims = tensor_.dims;
memset(&tensor_, 0, sizeof(tensor_));
tensor_.name = "ResourceVariable";
tensor_.allocation_type = kTfLiteDynamic;
tensor_.type = tensor->type;
tensor_.params = tensor->params;
tensor_.quantization = tensor->quantization;
if (TfLiteIntArrayEqual(old_dims, tensor->dims)) {
tensor_.dims = old_dims;
} else {
TfLiteIntArrayFree(old_dims);
tensor_.dims = TfLiteIntArrayCopy(tensor->dims);
}
tensor_.data.raw = old_raw;
if (old_bytes != tensor->bytes) {
TfLiteTensorRealloc(tensor->bytes, &tensor_);
} else {
tensor_.bytes = old_bytes;
}
memcpy(tensor_.data.raw, tensor->data.raw, tensor_.bytes);
is_initialized_ = true;
return kTfLiteOk;
}
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id) {
if (resources->count(resource_id) != 0) {
return;
}
resources->emplace(resource_id, std::make_unique<ResourceVariable>());
}
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id) {
auto it = resources->find(resource_id);
if (it != resources->end()) {
return static_cast<ResourceVariable*>(it->second.get());
}
return nullptr;
}
bool IsBuiltinResource(const TfLiteTensor* tensor) {
return tensor && tensor->type == kTfLiteResource &&
tensor->delegate == nullptr;
}
}
} | #include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace resource {
void InitTensor(const std::vector<int>& shape, TfLiteAllocationType alloc_type,
float default_value, TfLiteTensor* tensor) {
memset(tensor, 0, sizeof(TfLiteTensor));
int num_elements = 1;
for (auto dim : shape) num_elements *= dim;
if (shape.empty()) num_elements = 0;
float* buf = static_cast<float*>(malloc(sizeof(float) * num_elements));
for (int i = 0; i < num_elements; ++i) buf[i] = default_value;
const int bytes = num_elements * sizeof(buf[0]);
auto* dims = ConvertArrayToTfLiteIntArray(shape.size(), shape.data());
TfLiteTensorReset(TfLiteType::kTfLiteFloat32, nullptr, dims, {},
reinterpret_cast<char*>(buf), bytes, alloc_type, nullptr,
false, tensor);
}
TEST(ResourceTest, NonDynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, DynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteDynamic, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, AssignSameSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {1};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(ResourceTest, AssignDifferentSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {2};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float) * 2, value->bytes);
ASSERT_THAT(value, DimsAre({2}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(IsBuiltinResource, IsBuiltinResourceTest) {
TfLiteTensor tensor;
tensor.type = kTfLiteResource;
tensor.delegate = nullptr;
EXPECT_TRUE(IsBuiltinResource(&tensor));
EXPECT_FALSE(IsBuiltinResource(nullptr));
tensor.type = kTfLiteFloat32;
EXPECT_FALSE(IsBuiltinResource(&tensor));
tensor.type = kTfLiteResource;
TfLiteDelegate delegate;
tensor.delegate = &delegate;
EXPECT_FALSE(IsBuiltinResource(&tensor));
}
TEST(ResourceTest, GetMemoryUsage) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {100};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(100 * sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({100}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(100 * sizeof(float), var.GetMemoryUsage());
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/resource/resource_variable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/resource/resource_variable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98537c84-48af-439f-af8a-69b3af0a998f | cpp | tensorflow/tensorflow | cache_buffer | tensorflow/lite/experimental/resource/cache_buffer.cc | tensorflow/lite/experimental/resource/cache_buffer_test.cc | #include "tensorflow/lite/experimental/resource/cache_buffer.h"
#include <cstdlib>
#include <cstring>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace resource {
TfLiteStatus CacheBuffer::Initialize(const TfLiteIntArray& shape) {
dims_ = TfLiteIntArrayCopy(&shape);
const size_t buf_size = NumElements(&shape);
buffer_.reset(new float[buf_size]);
memset(buffer_.get(), 0, sizeof(float) * buf_size);
num_entries_.reset(new size_t[shape.data[1]]);
memset(num_entries_.get(), 0, sizeof(size_t) * shape.data[1]);
is_initialized_ = true;
return kTfLiteOk;
}
size_t CacheBuffer::GetSize() { return sizeof(float) * NumElements(dims_); }
size_t CacheBuffer::GetNumEntries(int idx) const { return num_entries_[idx]; }
CacheBuffer::~CacheBuffer() { TfLiteIntArrayFree(dims_); }
float* CacheBuffer::GetBuffer() { return buffer_.get(); }
void CacheBuffer::SetNumEntries(int idx, size_t count) {
TFLITE_DCHECK(count <= dims_->data[2]);
num_entries_[idx] = count;
}
}
} | #include "tensorflow/lite/experimental/resource/cache_buffer.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace resource {
TEST(CacheBufferTest, Initialize) {
TfLiteIntArray* shape = TfLiteIntArrayCreate(4);
shape->data[0] = 1;
shape->data[1] = 3;
shape->data[2] = 5;
shape->data[3] = 7;
CacheBuffer cache_buffer;
cache_buffer.Initialize(*shape);
EXPECT_EQ(cache_buffer.GetSize(), 420);
ASSERT_NE(cache_buffer.GetBuffer(), nullptr);
EXPECT_EQ(cache_buffer.GetNumEntries(0), 0);
EXPECT_EQ(cache_buffer.GetNumEntries(1), 0);
EXPECT_EQ(cache_buffer.GetNumEntries(2), 0);
cache_buffer.SetNumEntries(0, 3);
EXPECT_EQ(cache_buffer.GetNumEntries(0), 3);
TfLiteIntArrayFree(shape);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/resource/cache_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/resource/cache_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
626a0766-a0f3-40f1-ae8f-10783946c2f8 | cpp | tensorflow/tensorflow | audio_microfrontend | tensorflow/lite/experimental/microfrontend/audio_microfrontend.cc | tensorflow/lite/experimental/microfrontend/audio_microfrontend_test.cc | #include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace audio_microfrontend {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
typedef struct {
int sample_rate;
FrontendState* state;
int left_context;
int right_context;
int frame_stride;
bool zero_padding;
int out_scale;
bool out_float;
} TfLiteAudioMicrofrontendParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new TfLiteAudioMicrofrontendParams;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
data->sample_rate = m["sample_rate"].AsInt32();
struct FrontendConfig config;
config.window.size_ms = m["window_size"].AsInt32();
config.window.step_size_ms = m["window_step"].AsInt32();
config.filterbank.num_channels = m["num_channels"].AsInt32();
config.filterbank.upper_band_limit = m["upper_band_limit"].AsFloat();
config.filterbank.lower_band_limit = m["lower_band_limit"].AsFloat();
config.noise_reduction.smoothing_bits = m["smoothing_bits"].AsInt32();
config.noise_reduction.even_smoothing = m["even_smoothing"].AsFloat();
config.noise_reduction.odd_smoothing = m["odd_smoothing"].AsFloat();
config.noise_reduction.min_signal_remaining =
m["min_signal_remaining"].AsFloat();
config.pcan_gain_control.enable_pcan = m["enable_pcan"].AsBool();
config.pcan_gain_control.strength = m["pcan_strength"].AsFloat();
config.pcan_gain_control.offset = m["pcan_offset"].AsFloat();
config.pcan_gain_control.gain_bits = m["gain_bits"].AsInt32();
config.log_scale.enable_log = m["enable_log"].AsBool();
config.log_scale.scale_shift = m["scale_shift"].AsInt32();
data->state = new FrontendState;
FrontendPopulateState(&config, data->state, data->sample_rate);
data->left_context = m["left_context"].AsInt32();
data->right_context = m["right_context"].AsInt32();
data->frame_stride = m["frame_stride"].AsInt32();
data->zero_padding = m["zero_padding"].AsBool();
data->out_scale = m["out_scale"].AsInt32();
data->out_float = m["out_float"].AsBool();
return data;
}
void Free(TfLiteContext* context, void* buffer) {
auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(buffer);
FrontendFreeStateContents(data->state);
delete data->state;
delete data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16);
output->type = kTfLiteInt32;
if (data->out_float) {
output->type = kTfLiteFloat32;
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(2);
int num_frames = 0;
if (input->dims->data[0] >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step / data->frame_stride +
1;
}
output_size->data[0] = num_frames;
output_size->data[1] = data->state->filterbank.num_channels *
(1 + data->left_context + data->right_context);
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
void GenerateFeatures(TfLiteAudioMicrofrontendParams* data,
const TfLiteTensor* input, TfLiteTensor* output) {
const int16_t* audio_data = GetTensorData<int16_t>(input);
int64_t audio_size = input->dims->data[0];
T* filterbanks_flat = GetTensorData<T>(output);
int num_frames = 0;
if (audio_size >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step +
1;
}
std::vector<std::vector<T>> frame_buffer(num_frames);
int frame_index = 0;
while (audio_size > 0) {
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
data->state, audio_data, audio_size, &num_samples_read);
audio_data += num_samples_read;
audio_size -= num_samples_read;
if (output.values != nullptr) {
frame_buffer[frame_index].reserve(output.size);
int i;
for (i = 0; i < output.size; ++i) {
frame_buffer[frame_index].push_back(static_cast<T>(output.values[i]) /
data->out_scale);
}
++frame_index;
}
}
int index = 0;
std::vector<T> pad(data->state->filterbank.num_channels, 0);
int anchor;
for (anchor = 0; anchor < frame_buffer.size(); anchor += data->frame_stride) {
int frame;
for (frame = anchor - data->left_context;
frame <= anchor + data->right_context; ++frame) {
std::vector<T>* feature;
if (data->zero_padding && (frame < 0 || frame >= frame_buffer.size())) {
feature = &pad;
} else if (frame < 0) {
feature = &frame_buffer[0];
} else if (frame >= frame_buffer.size()) {
feature = &frame_buffer[frame_buffer.size() - 1];
} else {
feature = &frame_buffer[frame];
}
for (auto f : *feature) {
filterbanks_flat[index++] = f;
}
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
FrontendReset(data->state);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (data->out_float) {
GenerateFeatures<float>(data, input, output);
} else {
GenerateFeatures<int32>(data, input, output);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_AUDIO_MICROFRONTEND() {
static TfLiteRegistration r = {
audio_microfrontend::Init, audio_microfrontend::Free,
audio_microfrontend::Prepare, audio_microfrontend::Eval};
return &r;
}
}
}
} | #include "tensorflow/lite/experimental/microfrontend/audio_microfrontend.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using ::testing::ElementsAreArray;
class MicroFrontendOpModel : public SingleOpModel {
public:
MicroFrontendOpModel(int n_input, int n_frame, int n_frequency_per_frame,
int n_left_context, int n_right_context,
int n_frame_stride,
const std::vector<std::vector<int>>& input_shapes)
: n_input_(n_input),
n_frame_(n_frame),
n_frequency_per_frame_(n_frequency_per_frame),
n_left_context_(n_left_context),
n_right_context_(n_right_context),
n_frame_stride_(n_frame_stride) {
input_ = AddInput(TensorType_INT16);
output_ = AddOutput(TensorType_INT32);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("sample_rate", 1000);
fbb.Int("window_size", 25);
fbb.Int("window_step", 10);
fbb.Int("num_channels", 2);
fbb.Float("upper_band_limit", 450.0);
fbb.Float("lower_band_limit", 8.0);
fbb.Int("smoothing_bits", 10);
fbb.Float("even_smoothing", 0.025);
fbb.Float("odd_smoothing", 0.06);
fbb.Float("min_signal_remaining", 0.05);
fbb.Bool("enable_pcan", true);
fbb.Float("pcan_strength", 0.95);
fbb.Float("pcan_offset", 80.0);
fbb.Int("gain_bits", 21);
fbb.Bool("enable_log", true);
fbb.Int("scale_shift", 6);
fbb.Int("left_context", n_left_context);
fbb.Int("right_context", n_right_context);
fbb.Int("frame_stride", n_frame_stride);
fbb.Bool("zero_padding", true);
fbb.Int("out_scale", 1);
fbb.Bool("out_float", false);
});
fbb.Finish();
SetCustomOp("MICRO_FRONTEND", fbb.GetBuffer(),
Register_AUDIO_MICROFRONTEND);
BuildInterpreter(input_shapes);
}
void SetInput(const std::vector<int16_t>& data) {
PopulateTensor(input_, data);
}
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
int num_inputs() { return n_input_; }
int num_frmes() { return n_frame_; }
int num_frequency_per_frame() { return n_frequency_per_frame_; }
int num_left_context() { return n_left_context_; }
int num_right_context() { return n_right_context_; }
int num_frame_stride() { return n_frame_stride_; }
protected:
int input_;
int output_;
int n_input_;
int n_frame_;
int n_frequency_per_frame_;
int n_left_context_;
int n_right_context_;
int n_frame_stride_;
};
class BaseMicroFrontendTest : public ::testing::Test {
protected:
std::vector<int16_t> micro_frontend_input_;
void VerifyGoldens(const std::vector<int16_t>& input,
const std::vector<std::vector<int>>& output,
MicroFrontendOpModel* micro_frontend,
float tolerance = 1e-5) {
const int num_inputs = micro_frontend->num_inputs();
EXPECT_GT(num_inputs, 0);
const int num_frames = micro_frontend->num_frmes();
EXPECT_GT(num_frames, 0);
EXPECT_EQ(num_frames, output.size());
const int num_frequency_per_frame =
micro_frontend->num_frequency_per_frame();
EXPECT_GT(num_frequency_per_frame, 0);
EXPECT_EQ(num_frequency_per_frame, output[0].size());
micro_frontend->SetInput(input);
ASSERT_EQ(micro_frontend->Invoke(), kTfLiteOk);
std::vector<int> output_flattened;
int anchor;
for (anchor = 0; anchor < output.size();
anchor += micro_frontend->num_frame_stride()) {
int frame;
for (frame = anchor - micro_frontend->num_left_context();
frame <= anchor + micro_frontend->num_right_context(); ++frame) {
if (frame < 0 || frame >= output.size()) {
int j;
for (j = 0; j < num_frequency_per_frame; ++j) {
output_flattened.push_back(0.0);
}
} else {
for (auto data_point : output[frame]) {
output_flattened.push_back(data_point);
}
}
}
}
EXPECT_THAT(micro_frontend->GetOutput(),
ElementsAreArray(output_flattened));
}
};
class TwoConsecutive36InputsMicroFrontendTest : public BaseMicroFrontendTest {
void SetUp() override {
micro_frontend_input_ = {
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768};
}
};
TEST_F(TwoConsecutive36InputsMicroFrontendTest, MicroFrontendBlackBoxTest) {
const int n_input = 36;
const int n_frame = 2;
const int n_frequency_per_frame = 2;
MicroFrontendOpModel micro_frontend(n_input, n_frame, n_frequency_per_frame,
1, 1, 1,
{
{n_input},
});
const std::vector<std::vector<int>> micro_frontend_golden_output = {
{479, 425}, {436, 378}};
VerifyGoldens(micro_frontend_input_, micro_frontend_golden_output,
µ_frontend);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/microfrontend/audio_microfrontend.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/microfrontend/audio_microfrontend_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a6e70aec-991c-4fe3-b7a0-d7fcca4b9887 | cpp | tensorflow/tensorflow | model_loader | tensorflow/lite/tools/model_loader.cc | tensorflow/lite/tools/model_loader_test.cc | #include "tensorflow/lite/tools/model_loader.h"
#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace tools {
bool ModelLoader::Init() {
if (model_ && model_->initialized()) {
return true;
}
if (!InitInternal()) {
return false;
}
if (!model_ || !model_->initialized()) {
return false;
}
return true;
}
bool PathModelLoader::InitInternal() {
if (model_path_.empty()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "model_path is empty.");
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromFile(model_path_.c_str());
return true;
}
bool BufferModelLoader::InitInternal() {
if (!caller_owned_buffer_ || model_size_ <= 0) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to create BufferModelLoader: caller_owned_buffer "
"is %s; model_size: %zu",
caller_owned_buffer_ ? "not null" : "null", model_size_);
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromBuffer(caller_owned_buffer_,
model_size_);
return true;
}
#ifndef _WIN32
bool MmapModelLoader::InitInternal() {
if (model_fd_ < 0 || model_offset_ < 0 || model_size_ < 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Invalid model file descriptor. file descriptor: %d model_offset: "
"%zu model_size: %zu",
model_fd_, model_offset_, model_size_);
return false;
}
if (!MMAPAllocation::IsSupported()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "MMAPAllocation is not supported.");
return false;
}
auto allocation = std::make_unique<MMAPAllocation>(
model_fd_, model_offset_, model_size_, tflite::DefaultErrorReporter());
if (!allocation->valid()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "MMAPAllocation is not valid.");
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromAllocation(std::move(allocation));
#if FLATBUFFERS_LITTLEENDIAN == 0
model_ = FlatBufferModel::ByteConvertModel(std::move(model_));
#endif
return true;
}
bool PipeModelLoader::InitInternal() {
if (pipe_fd_ < 0) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Invalid pipe file descriptor %d",
pipe_fd_);
return false;
}
std::free(model_buffer_);
model_buffer_ = reinterpret_cast<uint8_t*>(std::malloc(model_size_));
int read_bytes = 0;
int remaining_bytes = model_size_;
uint8_t* buffer = model_buffer_;
while (remaining_bytes > 0 &&
(read_bytes = read(pipe_fd_, buffer, remaining_bytes)) > 0) {
remaining_bytes -= read_bytes;
buffer += read_bytes;
}
close(pipe_fd_);
if (read_bytes < 0 || remaining_bytes != 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Read Model from pipe failed: %s. Expect to read %zu bytes, "
"%d bytes missing.",
std::strerror(errno), model_size_, remaining_bytes);
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromBuffer(
reinterpret_cast<const char*>(model_buffer_), model_size_);
return true;
}
#endif
std::unique_ptr<ModelLoader> CreateModelLoaderFromPath(
const std::string& path) {
std::vector<absl::string_view> parts = absl::StrSplit(path, ':');
if (parts.empty()) {
return nullptr;
}
#ifndef _WIN32
if (parts[0] == "fd") {
int model_fd;
size_t model_offset, model_size;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[1], &model_fd) ||
!absl::SimpleAtoi(parts[2], &model_offset) ||
!absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
return std::make_unique<MmapModelLoader>(model_fd, model_offset,
model_size);
}
if (parts[0] == "pipe") {
int read_fd, write_fd;
size_t model_size;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[1], &read_fd) ||
!absl::SimpleAtoi(parts[2], &write_fd) ||
!absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
if (write_fd >= 0) {
close(write_fd);
}
return std::make_unique<PipeModelLoader>(read_fd, model_size);
}
#endif
if (parts[0] == "buffer") {
int64_t buffer_handle;
size_t model_size;
if (parts.size() != 3 || !absl::SimpleAtoi(parts[1], &buffer_handle) ||
!absl::SimpleAtoi(parts[2], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
return std::make_unique<BufferModelLoader>(
reinterpret_cast<const char*>(buffer_handle), model_size);
}
return std::make_unique<PathModelLoader>(path);
}
}
} | #include "tensorflow/lite/tools/model_loader.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace tools {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
using ::testing::IsNull;
using ::testing::Not;
using ::testing::WhenDynamicCastTo;
class ModelLoaderTest : public ::testing::Test {};
TEST_F(ModelLoaderTest, CreateFromModelPath) {
auto model_loader = std::make_unique<PathModelLoader>(kModelPath);
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateFromFdPath) {
int fd = open(kModelPath, O_RDONLY);
ASSERT_GE(fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(fd, &stat_buf), 0);
auto model_loader =
std::make_unique<MmapModelLoader>(fd, 0, stat_buf.st_size);
close(fd);
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateFromPipePath) {
auto model = FlatBufferModel::BuildFromFile(kModelPath);
flatbuffers::FlatBufferBuilder fbb;
ModelT model_obj;
model->GetModel()->UnPackTo(&model_obj);
std::string model_description = model_obj.description;
FinishModelBuffer(fbb, CreateModel(fbb, &model_obj));
int pipe_fds[2];
ASSERT_EQ(pipe(pipe_fds), 0);
pid_t r = fork();
if (r == 0) {
close(pipe_fds[0]);
int written_bytes = 0;
int remaining_bytes = fbb.GetSize();
uint8_t* buffer = fbb.GetBufferPointer();
while (remaining_bytes > 0 &&
(written_bytes = write(pipe_fds[1], buffer, remaining_bytes)) > 0) {
remaining_bytes -= written_bytes;
buffer += written_bytes;
}
close(pipe_fds[1]);
ASSERT_TRUE(written_bytes > 0 && remaining_bytes == 0);
_exit(0);
}
close(pipe_fds[1]);
auto model_loader =
std::make_unique<PipeModelLoader>(pipe_fds[0], fbb.GetSize());
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
EXPECT_EQ(model_loader->GetModel()->GetModel()->description()->string_view(),
model_description);
}
TEST_F(ModelLoaderTest, CreateBufferModelLoader) {
auto model = FlatBufferModel::BuildFromFile(kModelPath);
flatbuffers::FlatBufferBuilder fbb;
ModelT model_obj;
model->GetModel()->UnPackTo(&model_obj);
std::string model_description = model_obj.description;
FinishModelBuffer(fbb, CreateModel(fbb, &model_obj));
ASSERT_NE(model->allocation(), nullptr);
auto model_loader = std::make_unique<BufferModelLoader>(
reinterpret_cast<const char*>(fbb.GetBufferPointer()), fbb.GetSize());
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
EXPECT_EQ(model_loader->GetModel()->GetModel()->description()->string_view(),
model_description);
}
TEST_F(ModelLoaderTest, InvalidModelPath) {
auto model_loader = std::make_unique<PathModelLoader>("invalid/path");
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, InvalidFd) {
auto model_loader = std::make_unique<MmapModelLoader>(0, 5, 10);
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, InvalidPipe) {
auto model_loader = std::make_unique<PipeModelLoader>(-1, 10);
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateModelLoaderFromValidPath) {
EXPECT_THAT(CreateModelLoaderFromPath("a/b/c").get(),
WhenDynamicCastTo<PathModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("fd:1:2:3").get(),
WhenDynamicCastTo<MmapModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("pipe:1:2:3").get(),
WhenDynamicCastTo<PipeModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("buffer:1:2").get(),
WhenDynamicCastTo<BufferModelLoader*>(Not(IsNull())));
}
TEST_F(ModelLoaderTest, CreateModelLoaderFromInvalidPath) {
EXPECT_EQ(CreateModelLoaderFromPath("fd:1"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("fd:1:2:3:4"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("pipe:1"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("pipe:1:2:3:4"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("buffer:1:2:3"), nullptr);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/model_loader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/model_loader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4207ac5e-ac3c-4889-a0b0-7b2ecbf83816 | cpp | tensorflow/tensorflow | tool_params | tensorflow/lite/tools/tool_params.cc | tensorflow/lite/tools/tool_params_test.cc | #include "tensorflow/lite/tools/tool_params.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace tools {
void ToolParam::AssertHasSameType(ToolParam::ParamType a,
ToolParam::ParamType b) {
TFLITE_TOOLS_CHECK(a == b) << "Type mismatch while accessing parameter.";
}
template <>
ToolParam::ParamType ToolParam::GetValueType<int32_t>() {
return ToolParam::ParamType::TYPE_INT32;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<bool>() {
return ToolParam::ParamType::TYPE_BOOL;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<float>() {
return ToolParam::ParamType::TYPE_FLOAT;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<std::string>() {
return ToolParam::ParamType::TYPE_STRING;
}
void ToolParams::AssertParamExists(const std::string& name) const {
TFLITE_TOOLS_CHECK(HasParam(name)) << name << " was not found.";
}
void ToolParams::Set(const ToolParams& other) {
for (const auto& param : params_) {
const ToolParam* other_param = other.GetParam(param.first);
if (other_param == nullptr) continue;
param.second->Set(*other_param);
}
}
void ToolParams::Merge(const ToolParams& other, bool overwrite) {
for (const auto& one : other.params_) {
auto it = params_.find(one.first);
if (it == params_.end()) {
AddParam(one.first, one.second->Clone());
} else if (overwrite) {
it->second->Set(*one.second);
}
}
}
}
} | #include "tensorflow/lite/tools/tool_params.h"
#include <gtest/gtest.h>
namespace tflite {
namespace tools {
namespace {
TEST(ToolParams, SetTest) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true, 1));
params.Set(others);
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_TRUE(params.HasValueSet<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_EQ(0, params.GetPosition<int>("some-int2"));
EXPECT_FALSE(params.HasValueSet<int>("some-int2"));
EXPECT_FALSE(params.HasParam("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteTrue) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others, true );
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteFalse) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others);
EXPECT_EQ(13, params.Get<int>("some-int1"));
EXPECT_EQ(0, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/tool_params.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/tool_params_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c05f1a72-d7c8-40ca-9a21-d6f1b89f5fcc | cpp | tensorflow/tensorflow | gen_op_registration | tensorflow/lite/tools/gen_op_registration.cc | tensorflow/lite/tools/gen_op_registration_test.cc | #include "tensorflow/lite/tools/gen_op_registration.h"
#include <algorithm>
#include <string>
#include <vector>
#include "re2/re2.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
string NormalizeCustomOpName(const string& op) {
string method(op);
RE2::GlobalReplace(&method, "([a-z])([A-Z])", "\\1_\\2");
std::transform(method.begin(), method.end(), method.begin(), ::toupper);
return method;
}
void ReadOpsFromModel(const ::tflite::Model* model,
tflite::RegisteredOpMap* builtin_ops,
tflite::RegisteredOpMap* custom_ops) {
if (!model) return;
auto opcodes = model->operator_codes();
if (!opcodes) return;
for (const auto* opcode : *opcodes) {
const int version = opcode->version();
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
auto iter_and_bool = builtin_ops->insert(
std::make_pair(tflite::EnumNameBuiltinOperator(builtin_code),
std::make_pair(version, version)));
auto& versions = iter_and_bool.first->second;
versions.first = std::min(versions.first, version);
versions.second = std::max(versions.second, version);
} else {
auto iter_and_bool = custom_ops->insert(std::make_pair(
opcode->custom_code()->c_str(), std::make_pair(version, version)));
auto& versions = iter_and_bool.first->second;
versions.first = std::min(versions.first, version);
versions.second = std::max(versions.second, version);
}
}
}
} | #include "tensorflow/lite/tools/gen_op_registration.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
using ::testing::ElementsAreArray;
namespace tflite {
class GenOpRegistrationTest : public ::testing::Test {
protected:
GenOpRegistrationTest() {}
void ReadOps(const string& model_path) {
auto model = FlatBufferModel::BuildFromFile(model_path.data());
if (model) {
ReadOpsFromModel(model->GetModel(), &builtin_ops_, &custom_ops_);
}
}
std::map<string, std::pair<int, int>> builtin_ops_;
std::map<string, std::pair<int, int>> custom_ops_;
};
TEST_F(GenOpRegistrationTest, TestNonExistentFiles) {
ReadOps("/tmp/tflite_model_1234");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestModels) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {1, 1}}};
RegisteredOpMap custom_expected{{"testing_op", {1, 1}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestVersionedModels) {
ReadOps("tensorflow/lite/testdata/test_model_versioned_ops.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {3, 3}}};
RegisteredOpMap custom_expected{{"testing_op", {2, 2}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestBothModels) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
ReadOps("tensorflow/lite/testdata/test_model_versioned_ops.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {1, 3}}};
RegisteredOpMap custom_expected{{"testing_op", {1, 2}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestEmptyModels) {
ReadOps("tensorflow/lite/testdata/empty_model.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestZeroSubgraphs) {
ReadOps("tensorflow/lite/testdata/0_subgraphs.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestBrokenMmap) {
ReadOps("tensorflow/lite/testdata/test_model_broken.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestNormalizeCustomOpName) {
std::vector<std::pair<string, string>> testcase = {
{"CustomOp", "CUSTOM_OP"},
{"a", "A"},
{"custom_op", "CUSTOM_OP"},
{"customop", "CUSTOMOP"},
};
for (const auto& test : testcase) {
EXPECT_EQ(NormalizeCustomOpName(test.first), test.second);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/gen_op_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/gen_op_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
096bf5ca-4d46-4bb3-b4a0-0aa0eeb250b7 | cpp | tensorflow/tensorflow | list_flex_ops | tensorflow/lite/tools/list_flex_ops.cc | tensorflow/lite/tools/list_flex_ops_test.cc | #include "tensorflow/lite/tools/list_flex_ops.h"
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "json/json.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
std::string OpListToJSONString(const OpKernelSet& flex_ops) {
Json::Value result(Json::arrayValue);
for (const OpKernel& op : flex_ops) {
Json::Value op_kernel(Json::arrayValue);
op_kernel.append(Json::Value(op.op_name));
op_kernel.append(Json::Value(op.kernel_name));
result.append(op_kernel);
}
return Json::FastWriter().write(result);
}
string FindTensorflowKernelClass(tensorflow::NodeDef* node_def) {
if (!node_def || node_def->op().empty()) {
LOG(FATAL) << "Invalid NodeDef";
}
const tensorflow::OpRegistrationData* op_reg_data;
auto status =
tensorflow::OpRegistry::Global()->LookUp(node_def->op(), &op_reg_data);
if (!status.ok()) {
LOG(FATAL) << "Op " << node_def->op() << " not found: " << status;
}
AddDefaultsToNodeDef(op_reg_data->op_def, node_def);
tensorflow::DeviceNameUtils::ParsedName parsed_name;
if (!tensorflow::DeviceNameUtils::ParseFullName(node_def->device(),
&parsed_name)) {
LOG(FATAL) << "Failed to parse device from node_def: "
<< node_def->ShortDebugString();
}
string class_name;
if (!tensorflow::FindKernelDef(
tensorflow::DeviceType(parsed_name.type.c_str()), *node_def,
nullptr , &class_name)
.ok()) {
LOG(FATAL) << "Failed to find kernel class for op: " << node_def->op();
}
return class_name;
}
void AddFlexOpsFromModel(const tflite::Model* model, OpKernelSet* flex_ops) {
auto* subgraphs = model->subgraphs();
if (!subgraphs) return;
for (int subgraph_index = 0; subgraph_index < subgraphs->size();
++subgraph_index) {
const tflite::SubGraph* subgraph = subgraphs->Get(subgraph_index);
auto* operators = subgraph->operators();
auto* opcodes = model->operator_codes();
if (!operators || !opcodes) continue;
for (int i = 0; i < operators->size(); ++i) {
const tflite::Operator* op = operators->Get(i);
const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
if (tflite::GetBuiltinCode(opcode) != tflite::BuiltinOperator_CUSTOM ||
!tflite::IsFlexOp(opcode->custom_code()->c_str())) {
continue;
}
std::string flex_op_name(opcode->custom_code()->c_str());
std::string tf_op_name =
flex_op_name.substr(strlen(tflite::kFlexCustomCodePrefix));
if (op->custom_options_format() !=
tflite::CustomOptionsFormat_FLEXBUFFERS) {
LOG(FATAL) << "Invalid CustomOptionsFormat";
}
const flatbuffers::Vector<uint8_t>* custom_opt_bytes =
op->custom_options();
if (custom_opt_bytes && custom_opt_bytes->size()) {
const flexbuffers::Vector& v =
flexbuffers::GetRoot(custom_opt_bytes->data(),
custom_opt_bytes->size())
.AsVector();
std::string nodedef_str = v[1].AsString().str();
tensorflow::NodeDef nodedef;
if (nodedef_str.empty() || !nodedef.ParseFromString(nodedef_str)) {
LOG(FATAL) << "Failed to parse data into a valid NodeDef";
}
*nodedef.mutable_device() = "/CPU:0";
std::string kernel_class = FindTensorflowKernelClass(&nodedef);
flex_ops->insert({tf_op_name, kernel_class});
}
}
}
}
}
} | #include "tensorflow/lite/tools/list_flex_ops.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace flex {
class FlexOpsListTest : public ::testing::Test {
protected:
FlexOpsListTest() {}
void ReadOps(const string& path) {
std::string full_path = tensorflow::GetDataDependencyFilepath(path);
auto model = FlatBufferModel::BuildFromFile(full_path.data());
AddFlexOpsFromModel(model->GetModel(), &flex_ops_);
output_text_ = OpListToJSONString(flex_ops_);
}
void ReadOps(const tflite::Model* model) {
AddFlexOpsFromModel(model, &flex_ops_);
output_text_ = OpListToJSONString(flex_ops_);
}
std::string output_text_;
OpKernelSet flex_ops_;
};
TfLiteRegistration* Register_TEST() {
static TfLiteRegistration r = {nullptr, nullptr, nullptr, nullptr};
return &r;
}
std::vector<uint8_t> CreateFlexCustomOptions(std::string nodedef_raw_string) {
tensorflow::NodeDef node_def;
tensorflow::protobuf::TextFormat::ParseFromString(nodedef_raw_string,
&node_def);
std::string node_def_str = node_def.SerializeAsString();
auto flex_builder = std::make_unique<flexbuffers::Builder>();
flex_builder->Vector([&]() {
flex_builder->String(node_def.op());
flex_builder->String(node_def_str);
});
flex_builder->Finish();
return flex_builder->GetBuffer();
}
class FlexOpModel : public SingleOpModel {
public:
FlexOpModel(const std::string& op_name, const TensorData& input1,
const TensorData& input2, const TensorType& output,
const std::vector<uint8_t>& custom_options) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetCustomOp(op_name, custom_options, Register_TEST);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
TEST_F(FlexOpsListTest, TestModelsNoFlex) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
EXPECT_EQ(output_text_, "[]\n");
}
TEST_F(FlexOpsListTest, TestBrokenModel) {
EXPECT_DEATH_IF_SUPPORTED(
ReadOps("tensorflow/lite/testdata/test_model_broken.bin"), "");
}
TEST_F(FlexOpsListTest, TestZeroSubgraphs) {
ReadOps("tensorflow/lite/testdata/0_subgraphs.bin");
EXPECT_EQ(output_text_, "[]\n");
}
TEST_F(FlexOpsListTest, TestFlexAdd) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
TEST_F(FlexOpsListTest, TestTwoModel) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
ReadOps("tensorflow/lite/testdata/softplus_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, "
"functor::add<float>>\"],[\"Softplus\",\"SoftplusOp<CPUDevice, "
"float>\"]]\n");
}
TEST_F(FlexOpsListTest, TestDuplicatedOp) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
TEST_F(FlexOpsListTest, TestInvalidCustomOptions) {
std::vector<uint8_t> random_custom_options(20);
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
random_custom_options);
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Failed to parse data into a valid NodeDef");
}
TEST_F(FlexOpsListTest, TestOpNameEmpty) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
std::string random_fieldname = "random string";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())), "Invalid NodeDef");
}
TEST_F(FlexOpsListTest, TestOpNotFound) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"FlexInvalidOp\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Op FlexInvalidOp not found");
}
TEST_F(FlexOpsListTest, TestKernelNotFound) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"Add\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_BOOL } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Failed to find kernel class for op: Add");
}
TEST_F(FlexOpsListTest, TestFlexAddWithSingleOpModel) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"Add\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
ReadOps(tflite::GetModel(max_model.GetModelBuffer()));
EXPECT_EQ(output_text_,
"[[\"Add\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/list_flex_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/list_flex_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
307db5e8-a1cf-4674-a56e-967b9a33e171 | cpp | tensorflow/tensorflow | benchmark_utils | tensorflow/lite/tools/benchmark/benchmark_utils.cc | tensorflow/lite/tools/benchmark/benchmark_utils_test.cc | #include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace benchmark {
namespace util {
void SleepForSeconds(double sleep_seconds) {
if (sleep_seconds <= 0.0) {
return;
}
tflite::profiling::time::SleepForMicros(
static_cast<uint64_t>(sleep_seconds * 1e6));
}
}
}
} | #include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace benchmark {
namespace {
TEST(BenchmarkHelpersTest, SleepForNegativeSeconds) {
const auto start_ts = tflite::profiling::time::NowMicros();
util::SleepForSeconds(-5.0);
const auto end_ts = tflite::profiling::time::NowMicros();
EXPECT_LT(end_ts - start_ts, 1000000);
}
TEST(BenchmarkHelpersTest, SleepForSomeSeconds) {
const auto start_ts = tflite::profiling::time::NowMicros();
util::SleepForSeconds(2.0);
const auto end_ts = tflite::profiling::time::NowMicros();
EXPECT_GT(end_ts - start_ts, 1900000);
}
TEST(BenchmarkHelpersTest, SplitAndParseFailed) {
std::vector<int> results;
const bool splitted = util::SplitAndParse("hello;world", ';', &results);
EXPECT_FALSE(splitted);
}
TEST(BenchmarkHelpersTest, SplitAndParseString) {
std::vector<std::string> results;
const bool splitted = util::SplitAndParse("hello,world", ',', &results);
EXPECT_TRUE(splitted);
EXPECT_EQ(2, results.size());
EXPECT_EQ("hello", results[0]);
EXPECT_EQ("world", results[1]);
}
TEST(BenchmarkHelpersTest, SplitAndParseInts) {
std::vector<int> results;
const bool splitted = util::SplitAndParse("1,2", ',', &results);
EXPECT_TRUE(splitted);
EXPECT_EQ(2, results.size());
EXPECT_EQ(1, results[0]);
EXPECT_EQ(2, results[1]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c628d76-0061-41ef-ac7a-3ab387fdc529 | cpp | tensorflow/tensorflow | benchmark_tflite_model | tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc | tensorflow/lite/tools/benchmark/benchmark_tflite_model_test.cc | #include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <iostream>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <string_view>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "ruy/profiler/profiler.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/core/signature_runner.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/model_runtime_info.h"
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/tools/benchmark/profiling_listener.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/model_loader.h"
#include "tensorflow/lite/tools/utils.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
void ABSL_ATTRIBUTE_WEAK
RegisterSelectedOps(::tflite::MutableOpResolver* resolver) {}
namespace tflite {
namespace benchmark {
namespace {
using utils::InputTensorData;
using utils::VoidUniquePtr;
#if defined(TFLITE_PROFILING_ENABLED)
constexpr bool kOpProfilingEnabledDefault = true;
#else
constexpr bool kOpProfilingEnabledDefault = false;
#endif
constexpr char kOpProfilingOutputModeStdout[] = "stdout";
constexpr char kOpProfilingOutputModeCsv[] = "csv";
constexpr char kOpProfilingOutputModeProto[] = "proto";
const char* kOpProfilingOutputModes[] = {kOpProfilingOutputModeStdout,
kOpProfilingOutputModeCsv,
kOpProfilingOutputModeProto};
TfLiteStatus MaybeSetFeatureValuesFromTensor(const TfLiteTensor& tensor,
tensorflow::Example& example) {
if (tensor.dims == nullptr) {
return kTfLiteError;
}
int total_elements = 1;
for (int i = 0; i < tensor.dims->size; i++) {
total_elements *= tensor.dims->data[i];
}
tensorflow::Feature& feature =
(*example.mutable_features()->mutable_feature())[tensor.name];
switch (tensor.type) {
case kTfLiteFloat32:
case kTfLiteFloat64:
feature.mutable_float_list()->mutable_value()->Resize(total_elements, 0);
return utils::TfLiteTensorToFloat32Array(
tensor,
absl::MakeSpan(
feature.mutable_float_list()->mutable_value()->mutable_data(),
feature.float_list().value_size()));
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteUInt16:
case kTfLiteInt16:
case kTfLiteInt32:
case kTfLiteUInt32:
case kTfLiteUInt64:
case kTfLiteInt64:
feature.mutable_int64_list()->mutable_value()->Resize(total_elements, 0);
return utils::TfLiteTensorToInt64Array(
tensor,
absl::MakeSpan(
feature.mutable_int64_list()->mutable_value()->mutable_data(),
feature.int64_list().value_size()));
default:
return kTfLiteError;
}
}
class RuyProfileListener : public BenchmarkListener {
public:
void OnBenchmarkStart(const BenchmarkParams& params) override;
void OnBenchmarkEnd(const BenchmarkResults& results) override;
private:
std::unique_ptr<ruy::profiler::ScopeProfile> ruy_profile_;
};
void RuyProfileListener::OnBenchmarkStart(const BenchmarkParams& params) {
ruy_profile_ = std::make_unique<ruy::profiler::ScopeProfile>();
}
void RuyProfileListener::OnBenchmarkEnd(const BenchmarkResults& results) {
ruy_profile_ = nullptr;
}
class InterpreterStatePrinter : public BenchmarkListener {
public:
explicit InterpreterStatePrinter(Interpreter* interpreter)
: interpreter_(interpreter) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
params_ = ¶ms;
if (params_->Get<bool>("print_preinvoke_state")) {
TFLITE_LOG(INFO) << "\n====Printing out TfLite interpreter pre-invoke "
"state begins====";
tflite::PrintInterpreterState(
interpreter_, params_->Get<int32_t>("tensor_name_display_length"),
params_->Get<int32_t>("tensor_type_display_length"),
params_->Get<int32_t>("alloc_type_display_length"));
TFLITE_LOG(INFO) << "====Printing out TfLite interpreter pre-invoke "
"state ends====\n";
}
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
if (params_->Get<bool>("print_postinvoke_state")) {
TFLITE_LOG(INFO) << "\n====Printing out TfLite interpreter post-invoke "
"state begins====";
tflite::PrintInterpreterState(
interpreter_, params_->Get<int32_t>("tensor_name_display_length"),
params_->Get<int32_t>("tensor_type_display_length"),
params_->Get<int32_t>("alloc_type_display_length"));
TFLITE_LOG(INFO) << "====Printing out TfLite interpreter post-invoke "
"state ends====\n";
}
}
private:
Interpreter* const interpreter_ = nullptr;
const BenchmarkParams* params_ = nullptr;
};
class OutputSaver : public BenchmarkListener {
public:
explicit OutputSaver(BenchmarkInterpreterRunner* runner)
: interpreter_runner_(runner) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
params_ = ¶ms;
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
const std::string path = params_->Get<std::string>("output_filepath");
if (!path.empty()) {
std::ofstream ofs(path, std::ofstream::out);
if (ofs.good()) {
for (int i = 0; i < interpreter_runner_->outputs().size(); i++) {
int tensor_index = interpreter_runner_->outputs()[i];
ofs.write(interpreter_runner_->tensor(tensor_index)->data.raw,
interpreter_runner_->tensor(tensor_index)->bytes);
}
ofs.close();
}
}
const std::string output_proto_path =
params_->Get<std::string>("output_proto_filepath");
if (!output_proto_path.empty()) {
tensorflow::Example example;
for (int i = 0; i < interpreter_runner_->outputs().size(); i++) {
const int tensor_index = interpreter_runner_->outputs()[i];
const TfLiteTensor& tensor =
*(interpreter_runner_->tensor(tensor_index));
MaybeSetFeatureValuesFromTensor(tensor, example);
}
std::ofstream ofs(output_proto_path, std::ios::out | std::ios::binary);
if (ofs.good()) {
example.SerializeToOstream(&ofs);
ofs.close();
}
}
}
private:
BenchmarkInterpreterRunner* const interpreter_runner_;
const BenchmarkParams* params_ = nullptr;
};
class ModelRuntimeInfoListener : public BenchmarkListener {
public:
explicit ModelRuntimeInfoListener(Interpreter* interpreter)
: interpreter_(interpreter) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
const std::string output_file_path =
params.Get<std::string>("model_runtime_info_output_file");
const auto status =
profiling::GenerateModelRuntimeInfo(*interpreter_, output_file_path);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to generate model runtime info: " << status;
}
}
private:
Interpreter* const interpreter_ = nullptr;
};
std::vector<std::string> Split(const std::string& str, const char delim) {
if (str.empty()) {
return {};
}
return absl::StrSplit(str, delim);
}
int GetNumElements(const TfLiteIntArray* dim_array) {
int num_elements = 1;
for (size_t i = 0; i < dim_array->size; i++) {
num_elements *= dim_array->data[i];
}
return num_elements;
}
void FillRandomString(tflite::DynamicBuffer* buffer,
const TfLiteIntArray* dim_array,
const std::function<std::string()>& random_func) {
int num_elements = GetNumElements(dim_array);
for (int i = 0; i < num_elements; ++i) {
auto str = random_func();
buffer->AddString(str.data(), str.length());
}
}
int FindLayerInfoIndex(std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info,
const std::string& input_name,
const string& names_string) {
for (int i = 0; i < info->size(); ++i) {
if (info->at(i).name == input_name) {
return i;
}
}
TFLITE_LOG(FATAL) << "Cannot find the corresponding input_layer name("
<< input_name << ") in --input_layer as " << names_string;
return -1;
}
TfLiteStatus PopulateInputValueRanges(
const std::string& names_string, const std::string& value_ranges_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
std::vector<std::string> value_ranges = Split(value_ranges_string, ':');
for (const auto& val : value_ranges) {
std::vector<std::string> name_range = Split(val, ',');
if (name_range.size() != 3) {
TFLITE_LOG(ERROR) << "Wrong input value range item specified: " << val;
return kTfLiteError;
}
int layer_info_idx = FindLayerInfoIndex(info, name_range[0], names_string);
int low, high;
bool has_low = absl::SimpleAtoi(name_range[1], &low);
bool has_high = absl::SimpleAtoi(name_range[2], &high);
if (!has_low || !has_high || low > high) {
TFLITE_LOG(ERROR)
<< "Wrong low and high value of the input value range specified: "
<< val;
return kTfLiteError;
}
info->at(layer_info_idx).has_value_range = true;
info->at(layer_info_idx).low = low;
info->at(layer_info_idx).high = high;
}
return kTfLiteOk;
}
TfLiteStatus PopulateInputValueFiles(
const std::string& names_string, const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
std::vector<std::string> value_files = Split(value_files_string, ',');
for (const auto& val : value_files) {
std::pair<std::string, std::string> name_file_pair;
TfLiteStatus status = SplitInputLayerNameAndValueFile(val, name_file_pair);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Wrong input value file item specified: " << val;
TFLITE_LOG(ERROR) << status;
return status;
}
int layer_info_idx =
FindLayerInfoIndex(info, name_file_pair.first, names_string);
if (info->at(layer_info_idx).has_value_range) {
TFLITE_LOG(WARN)
<< "The input_name:" << info->at(layer_info_idx).name
<< " appears both in input_layer_value_files and "
"input_layer_value_range. The input_layer_value_range of the "
"input_name will be ignored.";
}
info->at(layer_info_idx).input_file_path = name_file_pair.second;
}
return kTfLiteOk;
}
TfLiteStatus PopulateInputLayerInfo(
const std::string& names_string, const std::string& shapes_string,
const std::string& value_ranges_string,
const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
info->clear();
std::vector<std::string> names = Split(names_string, ',');
std::vector<std::string> shapes = Split(shapes_string, ':');
if (names.size() != shapes.size()) {
TFLITE_LOG(ERROR)
<< "The number of items in --input_layer_shape (" << shapes_string
<< ", with " << shapes.size()
<< " items) must match the number of items in --input_layer ("
<< names_string << ", with " << names.size()
<< " items). For example --input_layer=input1,input2 "
"--input_layer_shape=1,224,224,4:1,20";
return kTfLiteError;
}
for (int i = 0; i < names.size(); ++i) {
info->push_back(BenchmarkTfLiteModel::InputLayerInfo());
BenchmarkTfLiteModel::InputLayerInfo& input = info->back();
input.name = names[i];
TFLITE_TOOLS_CHECK(util::SplitAndParse(shapes[i], ',', &input.shape))
<< "Incorrect size string specified: " << shapes[i];
for (int dim : input.shape) {
if (dim == -1) {
TFLITE_LOG(ERROR)
<< "Any unknown sizes in the shapes (-1's) must be replaced"
<< " with the size you want to benchmark with.";
return kTfLiteError;
}
}
}
TF_LITE_ENSURE_STATUS(
PopulateInputValueRanges(names_string, value_ranges_string, info));
TF_LITE_ENSURE_STATUS(
PopulateInputValueFiles(names_string, value_files_string, info));
return kTfLiteOk;
}
std::shared_ptr<profiling::ProfileSummaryFormatter>
CreateProfileSummaryFormatter(const std::string& output_mode) {
if (output_mode == kOpProfilingOutputModeCsv) {
return std::make_shared<profiling::ProfileSummaryCSVFormatter>();
} else if (output_mode == kOpProfilingOutputModeProto) {
return std::make_shared<profiling::ProfileSummaryProtoFormatter>();
} else {
return std::make_shared<profiling::ProfileSummaryDefaultFormatter>();
}
}
}
TfLiteStatus SplitInputLayerNameAndValueFile(
const std::string& name_and_value_file,
std::pair<std::string, std::string>& name_file_pair) {
int delim_index = -1;
for (int i = 0; i < name_and_value_file.length() - 1; ++i) {
if (name_and_value_file[i] == ':') {
if (name_and_value_file[i + 1] == ':') {
++i;
} else {
if (delim_index == -1) {
delim_index = i;
} else {
TFLITE_LOG(ERROR)
<< name_and_value_file << " contains more than one delimiter.";
return kTfLiteError;
}
}
}
}
if (delim_index == -1) {
TFLITE_LOG(ERROR) << name_and_value_file
<< " doesn't contain any delimiter.";
return kTfLiteError;
}
name_file_pair.first = absl::StrReplaceAll(
name_and_value_file.substr(0, delim_index), {{"::", ":"}});
name_file_pair.second = absl::StrReplaceAll(
name_and_value_file.substr(delim_index + 1), {{"::", ":"}});
return kTfLiteOk;
}
std::pair<TfLiteStatus, std::unique_ptr<BenchmarkInterpreterRunner>>
BenchmarkInterpreterRunner::Create(tflite::Interpreter* const interpreter,
std::string signature_key) {
if (!signature_key.empty()) {
const std::vector<const std::string*>& keys = interpreter->signature_keys();
bool found = std::any_of(
keys.begin(), keys.end(),
[&signature_key](const auto& k) { return *k == signature_key; });
if (keys.size() > 1 && (signature_key.empty() || !found)) {
TFLITE_LOG(ERROR)
<< "Signature not specified or incorrect for graph with multiple "
"signatures. Pass one of the following to the flag "
"\"--signature_to_run_for\"";
for (const std::string* k : keys) {
TFLITE_LOG(ERROR) << " #> Signature key: " << *k;
}
return {kTfLiteError, nullptr};
} else if (keys.size() == 1 && signature_key.empty()) {
signature_key = *keys[0];
}
if (!signature_key.empty() && !keys.empty()) {
TFLITE_LOG(INFO) << "Using signature: " << signature_key;
auto signature_runner =
interpreter->GetSignatureRunner(signature_key.c_str());
if (signature_runner == nullptr) {
return {kTfLiteError, nullptr};
} else {
int subgraph_index =
interpreter->GetSubgraphIndexFromSignature(signature_key.c_str());
return {kTfLiteOk, std::make_unique<BenchmarkInterpreterRunner>(
interpreter, signature_runner,
interpreter->subgraph(subgraph_index))};
}
}
}
return {kTfLiteOk, std::make_unique<BenchmarkInterpreterRunner>(
interpreter, nullptr, nullptr)};
}
TfLiteStatus BenchmarkInterpreterRunner::AllocateTensors() {
if (signature_runner_ != nullptr) {
return signature_runner_->AllocateTensors();
} else {
return interpreter_->AllocateTensors();
}
}
TfLiteStatus BenchmarkInterpreterRunner::Invoke() {
if (signature_runner_ != nullptr) {
return signature_runner_->Invoke();
} else {
return interpreter_->Invoke();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::execution_plan() const {
if (signature_runner_ != nullptr) {
return subgraph_->execution_plan();
} else {
return interpreter_->execution_plan();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::inputs() const {
if (signature_runner_ != nullptr) {
return subgraph_->inputs();
} else {
return interpreter_->inputs();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::outputs() const {
if (signature_runner_ != nullptr) {
return subgraph_->outputs();
} else {
return interpreter_->outputs();
}
}
TfLiteTensor* BenchmarkInterpreterRunner::tensor(int tensor_index) {
if (signature_runner_ != nullptr) {
return subgraph_->tensor(tensor_index);
} else {
return interpreter_->tensor(tensor_index);
}
}
const std::pair<TfLiteNode, TfLiteRegistration>*
BenchmarkInterpreterRunner::node_and_registration(int node_index) const {
if (signature_runner_ != nullptr) {
return subgraph_->node_and_registration(node_index);
} else {
return interpreter_->node_and_registration(node_index);
}
}
TfLiteStatus BenchmarkInterpreterRunner::ResizeInputTensor(
int tensor_index, const std::vector<int>& new_size) {
if (signature_runner_ != nullptr) {
return subgraph_->ResizeInputTensor(tensor_index, new_size);
} else {
return interpreter_->ResizeInputTensor(tensor_index, new_size);
}
}
BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
BenchmarkParams default_params = BenchmarkModel::DefaultParams();
default_params.AddParam("graph", BenchmarkParam::Create<std::string>(""));
default_params.AddParam("signature_to_run_for",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("list_signatures",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("input_layer",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_shape",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_value_range",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_value_files",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("allow_fp16", BenchmarkParam::Create<bool>(false));
default_params.AddParam("require_full_delegation",
BenchmarkParam::Create<bool>(false));
default_params.AddParam(
"enable_op_profiling",
BenchmarkParam::Create<bool>(kOpProfilingEnabledDefault));
default_params.AddParam(
"op_profiling_output_mode",
BenchmarkParam::Create<std::string>(kOpProfilingOutputModeStdout));
default_params.AddParam("op_profiling_output_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("max_profiling_buffer_entries",
BenchmarkParam::Create<int32_t>(1024));
default_params.AddParam("allow_dynamic_profiling_buffer_increase",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("profiling_output_csv_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("export_model_runtime_info",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("model_runtime_info_output_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("print_preinvoke_state",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("print_postinvoke_state",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("release_dynamic_tensors",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("optimize_memory_for_large_tensors",
BenchmarkParam::Create<int32_t>(0));
default_params.AddParam("disable_delegate_clustering",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("enable_builtin_cast_constant_cache",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("output_filepath",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("output_proto_filepath",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("tensor_name_display_length",
BenchmarkParam::Create<int32_t>(25));
default_params.AddParam("tensor_type_display_length",
BenchmarkParam::Create<int32_t>(15));
default_params.AddParam("alloc_type_display_length",
BenchmarkParam::Create<int32_t>(18));
tools::ProvidedDelegateList delegate_providers(&default_params);
delegate_providers.AddAllDelegateParams();
return default_params;
}
BenchmarkTfLiteModel::BenchmarkTfLiteModel(BenchmarkParams params)
: BenchmarkModel(std::move(params)),
random_engine_(std::random_device()()) {
AddListener(&log_output_);
}
void BenchmarkTfLiteModel::CleanUp() {
inputs_data_.clear();
}
BenchmarkTfLiteModel::~BenchmarkTfLiteModel() {
CleanUp();
interpreter_runner_.reset();
interpreter_.reset();
}
std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
std::vector<Flag> flags = BenchmarkModel::GetFlags();
std::vector<Flag> specific_flags = {
CreateFlag<std::string>("graph", ¶ms_, "graph file name"),
CreateFlag<std::string>("input_layer", ¶ms_, "input layer names"),
CreateFlag<std::string>("input_layer_shape", ¶ms_,
"input layer shape"),
CreateFlag<std::string>(
"input_layer_value_range", ¶ms_,
"A map-like string representing value range for *integer* input "
"layers. Each item is separated by ':', and the item value consists "
"of input layer name and integer-only range values (both low and "
"high are inclusive) separated by ',', e.g. input1,1,2:input2,0,254"),
CreateFlag<std::string>(
"input_layer_value_files", ¶ms_,
"A map-like string representing value file. Each item is separated "
"by ',', and the item value consists "
"of input layer name and value file path separated by ':', e.g. "
"input1:file_path1,input2:file_path2. In case the input layer name "
"contains ':' e.g. \"input:0\", escape it with \"\\:\". If the "
"input_name appears both in input_layer_value_range and "
"input_layer_value_files, input_layer_value_range of the input_name "
"will be ignored. The file format is binary and it should be array "
"format or null separated strings format."),
CreateFlag<bool>("allow_fp16", ¶ms_, "allow fp16"),
CreateFlag<bool>("require_full_delegation", ¶ms_,
"require delegate to run the entire graph"),
CreateFlag<bool>("enable_op_profiling", ¶ms_, "enable op profiling"),
CreateFlag<std::string>(
"op_profiling_output_mode", ¶ms_,
"Output mode for op profiling results. Supported values are: "
"'stdout', 'csv' and 'proto'."),
CreateFlag<std::string>("op_profiling_output_file", ¶ms_,
"Output file for op profiling results."),
CreateFlag<int32_t>("max_profiling_buffer_entries", ¶ms_,
"max initial profiling buffer entries"),
CreateFlag<bool>("allow_dynamic_profiling_buffer_increase", ¶ms_,
"allow dynamic increase on profiling buffer entries"),
CreateFlag<std::string>("profiling_output_csv_file", ¶ms_,
"[DEPRECATED: Use op_profiling_output_file and "
"op_profiling_output_mode instead] File path to "
"export profile data as CSV, if not set "
"prints to stdout."),
CreateFlag<bool>("export_model_runtime_info", ¶ms_,
"Enable Model Runtime Info Export"),
CreateFlag<std::string>("model_runtime_info_output_file", ¶ms_,
"Proto File to export model runtime info to"),
CreateFlag<bool>(
"print_preinvoke_state", ¶ms_,
"print out the interpreter internals just before calling Invoke. The "
"internals will include allocated memory size of each tensor etc."),
CreateFlag<bool>(
"print_postinvoke_state", ¶ms_,
"print out the interpreter internals just before benchmark completes "
"(i.e. after all repeated Invoke calls complete). The internals will "
"include allocated memory size of each tensor etc."),
CreateFlag<bool>("release_dynamic_tensors", ¶ms_,
"Ensure dynamic tensor's memory is released when they "
"are not used."),
CreateFlag<int32_t>(
"optimize_memory_for_large_tensors", ¶ms_,
"Optimize memory usage for large tensors with sacrificing latency."),
CreateFlag<bool>("disable_delegate_clustering", ¶ms_,
"Disable delegate clustering."),
CreateFlag<bool>(
"enable_builtin_cast_constant_cache", ¶ms_,
"Cache the output of the builtin cast operation when its input "
"is a constant tensor."),
CreateFlag<std::string>(
"output_filepath", ¶ms_,
"File path to export outputs layer as binary data."),
CreateFlag<std::string>(
"output_proto_filepath", ¶ms_,
"File path to export outputs layer as tf example proto."),
CreateFlag<int32_t>(
"tensor_name_display_length", ¶ms_,
"The number of characters to show for the tensor's name when "
"printing the interpeter's state, defaults to 25."),
CreateFlag<int32_t>(
"tensor_type_display_length", ¶ms_,
"The number of characters to show for the tensor's type when "
"printing the interpeter's state, defaults to 15."),
CreateFlag<int32_t>(
"alloc_type_display_length", ¶ms_,
"The number of characters to show for the tensor's allocation type "
"when printing the interpeter's state, defaults to 18."),
CreateFlag<std::string>(
"signature_to_run_for", ¶ms_,
"If the model contains multiple signatures, use this flag to specify "
"the signature to benchmark. If multiple signatures are present and "
"this flag is not specified, the benchmark will throw an error. If "
"only one signature is present and this flag is not specified, the "
"default signature will be used."),
CreateFlag<bool>("list_signatures", ¶ms_,
"Displays all signatures present in the model and then "
"terminates the program.")};
flags.insert(flags.end(), specific_flags.begin(), specific_flags.end());
tools::ProvidedDelegateList delegate_providers(¶ms_);
delegate_providers.AppendCmdlineFlags(flags);
return flags;
}
void BenchmarkTfLiteModel::LogParams() {
BenchmarkModel::LogParams();
const bool verbose = params_.Get<bool>("verbose");
LOG_BENCHMARK_PARAM(std::string, "graph", "Graph", true);
LOG_BENCHMARK_PARAM(std::string, "signature_to_run_for", "Signature to run",
true);
LOG_BENCHMARK_PARAM(bool, "list_signatures",
"List signatures from the provided model", false);
LOG_BENCHMARK_PARAM(std::string, "input_layer", "Input layers", verbose);
LOG_BENCHMARK_PARAM(std::string, "input_layer_shape", "Input shapes",
verbose);
LOG_BENCHMARK_PARAM(std::string, "input_layer_value_range",
"Input value ranges", verbose);
LOG_BENCHMARK_PARAM(std::string, "input_layer_value_files",
"Input value files", verbose);
LOG_BENCHMARK_PARAM(bool, "allow_fp16", "Allow fp16", verbose);
LOG_BENCHMARK_PARAM(bool, "require_full_delegation",
"Require full delegation", verbose);
LOG_BENCHMARK_PARAM(bool, "enable_op_profiling", "Enable op profiling",
verbose);
LOG_BENCHMARK_PARAM(std::string, "op_profiling_output_mode",
"Op profiling output mode.", verbose);
LOG_BENCHMARK_PARAM(std::string, "op_profiling_output_file",
"Op profiling output file.", verbose);
LOG_BENCHMARK_PARAM(int32_t, "max_profiling_buffer_entries",
"Max initial profiling buffer entries", verbose);
LOG_BENCHMARK_PARAM(bool, "allow_dynamic_profiling_buffer_increase",
"Allow dynamic increase on profiling buffer entries",
verbose);
LOG_BENCHMARK_PARAM(std::string, "profiling_output_csv_file",
"CSV File to export profiling data to", verbose);
LOG_BENCHMARK_PARAM(bool, "export_model_runtime_info",
"Enable Model Runtime Info Export", verbose);
LOG_BENCHMARK_PARAM(std::string, "model_runtime_info_output_file",
"Proto File to export model runtime info to", verbose);
LOG_BENCHMARK_PARAM(bool, "print_preinvoke_state",
"Print pre-invoke interpreter state", verbose);
LOG_BENCHMARK_PARAM(bool, "print_postinvoke_state",
"Print post-invoke interpreter state", verbose);
LOG_BENCHMARK_PARAM(bool, "release_dynamic_tensors",
"Release dynamic tensor memory", verbose);
LOG_BENCHMARK_PARAM(int32_t, "optimize_memory_for_large_tensors",
"Optimize memory usage for large tensors", verbose);
LOG_BENCHMARK_PARAM(bool, "disable_delegate_clustering",
"Disable delegate clustering", verbose);
LOG_BENCHMARK_PARAM(bool, "enable_builtin_cast_constant_cache",
"Constant CAST output cache", verbose);
LOG_BENCHMARK_PARAM(std::string, "output_filepath",
"File path to export outputs layer to", verbose);
LOG_BENCHMARK_PARAM(std::string, "output_proto_filepath",
"File path to export outputs layer as tf example to",
verbose);
LOG_BENCHMARK_PARAM(int32_t, "tensor_name_display_length",
"Tensor name display length", verbose);
LOG_BENCHMARK_PARAM(int32_t, "tensor_type_display_length",
"Tensor type display length", verbose);
LOG_BENCHMARK_PARAM(int32_t, "alloc_type_display_length",
"Tensor allocation type display length", verbose);
for (const auto& delegate_provider :
tools::GetRegisteredDelegateProviders()) {
delegate_provider->LogParams(params_, verbose);
}
}
TfLiteStatus BenchmarkTfLiteModel::ValidateParams() {
TF_LITE_ENSURE_STATUS(BenchmarkModel::ValidateParams());
if (params_.Get<std::string>("graph").empty()) {
TFLITE_LOG(ERROR)
<< "Please specify the name of your TF Lite input file with --graph";
return kTfLiteError;
}
if (params_.Get<bool>("enable_op_profiling")) {
bool found =
std::find(std::begin(kOpProfilingOutputModes),
std::end(kOpProfilingOutputModes),
params_.Get<std::string>("op_profiling_output_mode")) !=
std::end(kOpProfilingOutputModes);
if (!found) {
TFLITE_LOG(ERROR) << "Output mode"
<< params_.Get<std::string>("op_profiling_output_mode")
<< " is not supported. Supported values are: 'stdout', "
"'csv' and 'proto'.";
return kTfLiteError;
}
if (!params_.Get<std::string>("profiling_output_csv_file").empty()) {
params_.Set<std::string>("op_profiling_output_mode",
kOpProfilingOutputModeCsv);
params_.Set<std::string>(
"op_profiling_output_file",
params_.Get<std::string>("profiling_output_csv_file"));
}
}
return PopulateInputLayerInfo(
params_.Get<std::string>("input_layer"),
params_.Get<std::string>("input_layer_shape"),
params_.Get<std::string>("input_layer_value_range"),
params_.Get<std::string>("input_layer_value_files"), &inputs_);
}
uint64_t BenchmarkTfLiteModel::ComputeInputBytes() {
TFLITE_TOOLS_CHECK(interpreter_runner_);
uint64_t total_input_bytes = 0;
for (int input : interpreter_runner_->inputs()) {
auto* t = interpreter_runner_->tensor(input);
total_input_bytes += t->bytes;
}
return total_input_bytes;
}
int64_t BenchmarkTfLiteModel::MayGetModelFileSize() {
std::string fd_or_graph_path = params_.Get<std::string>("graph");
std::vector<absl::string_view> parts = absl::StrSplit(fd_or_graph_path, ':');
if (!parts.empty() && parts[0] == "fd") {
int64_t model_size = -1;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG(ERROR) << "Failed to parse model file size: "
<< fd_or_graph_path;
}
return model_size;
}
std::ifstream in_file(fd_or_graph_path, std::ios::binary | std::ios::ate);
return in_file.tellg();
}
InputTensorData BenchmarkTfLiteModel::LoadInputTensorData(
const TfLiteTensor& t, const std::string& input_file_path) {
std::ifstream value_file(input_file_path, std::ios::binary);
if (!value_file.good()) {
TFLITE_LOG(FATAL) << "Failed to read the input_layer_value_file:"
<< input_file_path;
}
InputTensorData t_data;
if (t.type == kTfLiteString) {
t_data.data = VoidUniquePtr(
static_cast<void*>(new tflite::DynamicBuffer()),
[](void* ptr) { delete static_cast<DynamicBuffer*>(ptr); });
if (input_file_path.size() > 3 &&
input_file_path.substr(input_file_path.size() - 3) == ".pb") {
std::stringstream buffer;
buffer << value_file.rdbuf();
static_cast<DynamicBuffer*>(t_data.data.get())
->AddString(buffer.str().data(), buffer.str().length());
TFLITE_LOG(INFO) << "Read " << buffer.str().length()
<< " bytes data from " << input_file_path << ".";
} else {
std::string line;
size_t num_line = 0;
while (std::getline(value_file, line, '\0')) {
num_line++;
static_cast<DynamicBuffer*>(t_data.data.get())
->AddString(line.data(), line.length());
}
int num_elements = GetNumElements(t.dims);
if (num_line != num_elements) {
TFLITE_LOG(FATAL)
<< "The number of string in the input_layer_value_file("
<< input_file_path << ") is " << num_line << ". It should be "
<< num_elements << ".";
}
}
} else {
value_file.seekg(0, std::ios_base::end);
if (value_file.tellg() != t.bytes) {
TFLITE_LOG(FATAL) << "The size of " << input_file_path << " is "
<< value_file.tellg() << " bytes. It should be "
<< t.bytes << " bytes.";
}
t_data.bytes = t.bytes;
t_data.data =
VoidUniquePtr(static_cast<void*>(new char[t.bytes]),
[](void* ptr) { delete[] static_cast<char*>(ptr); });
value_file.clear();
value_file.seekg(0, std::ios_base::beg);
value_file.read(static_cast<char*>(t_data.data.get()), t.bytes);
}
return t_data;
}
InputTensorData BenchmarkTfLiteModel::CreateRandomTensorData(
const TfLiteTensor& t, const InputLayerInfo* layer_info) {
float low_range = 0;
float high_range = 0;
if (layer_info && layer_info->has_value_range) {
low_range = layer_info->low;
high_range = layer_info->high;
} else {
utils::GetDataRangesForType(t.type, &low_range, &high_range);
}
return utils::CreateRandomTensorData(t, low_range, high_range);
}
TfLiteStatus BenchmarkTfLiteModel::PrepareInputData() {
CleanUp();
const std::vector<int>& runner_inputs = interpreter_runner_->inputs();
for (int i = 0; i < runner_inputs.size(); ++i) {
int tensor_index = runner_inputs[i];
const TfLiteTensor& t = *(interpreter_runner_->tensor(tensor_index));
const InputLayerInfo* input_layer_info = nullptr;
if (!inputs_.empty()) input_layer_info = &inputs_[i];
InputTensorData t_data;
if (input_layer_info && !input_layer_info->input_file_path.empty()) {
t_data = LoadInputTensorData(t, input_layer_info->input_file_path);
} else {
t_data = CreateRandomTensorData(t, input_layer_info);
}
inputs_data_.push_back(std::move(t_data));
}
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::ResetInputsAndOutputs() {
const std::vector<int>& runner_inputs = interpreter_runner_->inputs();
for (int j = 0; j < runner_inputs.size(); ++j) {
int i = runner_inputs[j];
TfLiteTensor* t = interpreter_runner_->tensor(i);
if (t->type == kTfLiteString) {
if (inputs_data_[j].data) {
static_cast<DynamicBuffer*>(inputs_data_[j].data.get())
->WriteToTensor(t, nullptr);
} else {
tflite::DynamicBuffer buffer;
FillRandomString(&buffer, t->dims, []() {
return "we're have some friends over saturday to hang out in "
"the "
"yard";
});
buffer.WriteToTensor(t, nullptr);
}
} else {
std::memcpy(t->data.raw, inputs_data_[j].data.get(),
inputs_data_[j].bytes);
}
}
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::InitInterpreter() {
auto resolver = GetOpResolver();
const int32_t num_threads = params_.Get<int32_t>("num_threads");
const bool use_caching = params_.Get<bool>("use_caching");
InterpreterOptions options;
options.SetEnsureDynamicTensorsAreReleased(
params_.Get<bool>("release_dynamic_tensors"));
options.OptimizeMemoryForLargeTensors(
params_.Get<int32_t>("optimize_memory_for_large_tensors"));
options.SetDisableDelegateClustering(
params_.Get<bool>("disable_delegate_clustering"));
options.SetCacheConstantCastOp(
params_.Get<bool>("enable_builtin_cast_constant_cache"));
tflite::InterpreterBuilder builder(*model_, *resolver, &options);
if (builder.SetNumThreads(num_threads) != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to set thread number";
return kTfLiteError;
}
builder(&interpreter_);
if (!interpreter_) {
TFLITE_LOG(ERROR) << "Failed to initialize the interpreter";
return kTfLiteError;
}
if (use_caching) {
external_context_ = std::make_unique<tflite::ExternalCpuBackendContext>();
std::unique_ptr<tflite::CpuBackendContext> cpu_backend_context(
new tflite::CpuBackendContext());
cpu_backend_context->SetUseCaching(true);
cpu_backend_context->SetMaxNumThreads(num_threads);
external_context_->set_internal_backend_context(
std::move(cpu_backend_context));
interpreter_->SetExternalContext(kTfLiteCpuBackendContext,
external_context_.get());
}
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::Init() {
TF_LITE_ENSURE_STATUS(LoadModel());
TF_LITE_ENSURE_STATUS(InitInterpreter());
if (params_.Get<bool>("list_signatures")) {
const std::vector<const std::string*>& keys =
interpreter_->signature_keys();
TFLITE_LOG(INFO) << "The Model contains " << keys.size()
<< " signature key(s).";
if (!keys.empty()) {
TFLITE_LOG(INFO) << "They are listed below: ";
}
for (const std::string* key : keys) {
TFLITE_LOG(INFO) << "-> Signature Key: " << *key;
}
return kTfLiteError;
}
int total_nodes = 0;
for (int i = 0; i < interpreter_->subgraphs_size(); ++i) {
total_nodes += static_cast<int>(interpreter_->subgraph(i)->nodes_size());
}
if (total_nodes > params_.Get<int32_t>("max_profiling_buffer_entries")) {
constexpr int kProfilingBufferHeadrooms = 512;
params_.Set<int32_t>("max_profiling_buffer_entries",
total_nodes + kProfilingBufferHeadrooms);
}
AddOwnedListener(MayCreateProfilingListener());
AddOwnedListener(std::unique_ptr<BenchmarkListener>(
new InterpreterStatePrinter(interpreter_.get())));
if (params_.Get<bool>("export_model_runtime_info")) {
AddOwnedListener(std::unique_ptr<BenchmarkListener>(
new ModelRuntimeInfoListener(interpreter_.get())));
}
interpreter_->SetAllowFp16PrecisionForFp32(params_.Get<bool>("allow_fp16"));
std::pair<TfLiteStatus, std::unique_ptr<BenchmarkInterpreterRunner>>
status_and_runner = BenchmarkInterpreterRunner::Create(
interpreter_.get(), params_.Get<std::string>("signature_to_run_for"));
TF_LITE_ENSURE_STATUS(status_and_runner.first);
interpreter_runner_ = std::move(status_and_runner.second);
const std::vector<int>& runner_inputs = interpreter_runner_->inputs();
if (!inputs_.empty()) {
TFLITE_TOOLS_CHECK_EQ(inputs_.size(), runner_inputs.size())
<< "Inputs mismatch: Model inputs #:" << inputs_.size()
<< " expected: " << runner_inputs.size();
}
for (int j = 0; j < inputs_.size(); ++j) {
const InputLayerInfo& input = inputs_[j];
int i = runner_inputs[j];
TfLiteTensor* t = interpreter_runner_->tensor(i);
if (input.name != t->name) {
TFLITE_LOG(WARN) << "Tensor # " << i << " is named " << t->name
<< " but flags call it " << input.name;
}
if (t->type != kTfLiteString && input.shape.size() != t->dims->size) {
TFLITE_LOG(ERROR) << "Input tensor #" << i << " should have "
<< t->dims->size << " dimensions!";
return kTfLiteError;
}
}
for (int j = 0; j < inputs_.size(); ++j) {
const InputLayerInfo& input = inputs_[j];
int i = runner_inputs[j];
TfLiteTensor* t = interpreter_runner_->tensor(i);
if (t->type != kTfLiteString) {
interpreter_runner_->ResizeInputTensor(i, input.shape);
}
}
owned_delegates_.clear();
std::unordered_set<int> checked_node_ids;
tools::ProvidedDelegateList delegate_providers(¶ms_);
auto created_delegates = delegate_providers.CreateAllRankedDelegates();
TFLITE_MAY_LOG(INFO, (created_delegates.size() >= 2))
<< "Going to apply " << created_delegates.size()
<< " delegates one after another.";
if (created_delegates.empty() &&
params_.Get<bool>("require_full_delegation")) {
TFLITE_LOG(ERROR) << "Disallowed CPU fallback detected.";
return kTfLiteError;
}
for (auto& created_delegate : created_delegates) {
const auto* delegate_provider = created_delegate.provider;
TfLiteDelegate* delegate = created_delegate.delegate.get();
TFLITE_TOOLS_CHECK(delegate != nullptr)
<< "The created delegate by the delegate provider should not be "
"nullptr!";
owned_delegates_.emplace_back(std::move(created_delegate.delegate));
if (interpreter_->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to apply " << delegate_provider->GetName()
<< " delegate.";
return kTfLiteError;
} else {
int num_delegated_kernels = 0;
for (int i = 0; i < interpreter_runner_->execution_plan().size(); ++i) {
int node_id = interpreter_runner_->execution_plan()[i];
if (checked_node_ids.find(node_id) != checked_node_ids.end()) {
continue;
}
const TfLiteNode& node =
interpreter_runner_->node_and_registration(node_id)->first;
if (node.delegate != nullptr) {
num_delegated_kernels++;
checked_node_ids.insert(node_id);
}
}
bool fully_delegated =
(num_delegated_kernels == 1 &&
interpreter_runner_->execution_plan().size() == 1);
if (params_.Get<bool>("require_full_delegation") && !fully_delegated) {
TFLITE_LOG(ERROR) << "Disallowed CPU fallback detected.";
return kTfLiteError;
}
if (fully_delegated) {
TFLITE_LOG(INFO) << "Explicitly applied "
<< delegate_provider->GetName()
<< " delegate, and the model graph will be completely"
<< " executed by the delegate.";
} else if (num_delegated_kernels > 0) {
TFLITE_LOG(INFO) << "Explicitly applied "
<< delegate_provider->GetName()
<< " delegate, and the model graph will be partially"
<< " executed by the delegate w/ "
<< num_delegated_kernels << " delegate kernels.";
} else {
TFLITE_LOG(INFO) << "Though " << delegate_provider->GetName()
<< " delegate is explicitly applied, the model "
"graph will not be"
<< " executed by the delegate.";
}
}
}
if (interpreter_runner_->AllocateTensors() != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to allocate tensors!";
return kTfLiteError;
}
AddOwnedListener(
std::unique_ptr<BenchmarkListener>(new RuyProfileListener()));
AddOwnedListener(std::unique_ptr<BenchmarkListener>(
new OutputSaver(interpreter_runner_.get())));
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::LoadModel() {
std::string fd_or_graph_path = params_.Get<std::string>("graph");
model_loader_ = tools::CreateModelLoaderFromPath(fd_or_graph_path);
if (!model_loader_) {
TFLITE_LOG(ERROR) << "Failed to initialize model loader with path "
<< fd_or_graph_path;
return kTfLiteError;
}
if (!model_loader_->Init()) {
TFLITE_LOG(ERROR) << "Failed to load model " << fd_or_graph_path;
return kTfLiteError;
}
model_ = tflite::FlatBufferModel::BuildFromBuffer(
reinterpret_cast<const char*>(
model_loader_->GetModel()->allocation()->base()),
model_loader_->GetModel()->allocation()->bytes());
TFLITE_LOG(INFO) << "Loaded model " << fd_or_graph_path;
return kTfLiteOk;
}
std::unique_ptr<tflite::OpResolver> BenchmarkTfLiteModel::GetOpResolver()
const {
tflite::ops::builtin::BuiltinOpResolver* resolver = nullptr;
if (params_.HasParam("use_xnnpack") &&
params_.HasValueSet<bool>("use_xnnpack") &&
!params_.Get<bool>("use_xnnpack")) {
resolver =
new tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates();
} else {
resolver = new tflite::ops::builtin::BuiltinOpResolver();
}
RegisterSelectedOps(resolver);
return std::unique_ptr<tflite::OpResolver>(resolver);
}
std::unique_ptr<BenchmarkListener>
BenchmarkTfLiteModel::MayCreateProfilingListener() const {
if (!params_.Get<bool>("enable_op_profiling")) return nullptr;
return std::unique_ptr<BenchmarkListener>(new ProfilingListener(
interpreter_.get(), params_.Get<int32_t>("max_profiling_buffer_entries"),
params_.Get<bool>("allow_dynamic_profiling_buffer_increase"),
params_.Get<std::string>("op_profiling_output_file"),
CreateProfileSummaryFormatter(
params_.Get<std::string>("op_profiling_output_mode"))));
}
TfLiteStatus BenchmarkTfLiteModel::RunImpl() {
return interpreter_runner_->Invoke();
}
}
} | #include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace benchmark {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
class TestBenchmarkListener : public BenchmarkListener {
public:
void OnBenchmarkEnd(const BenchmarkResults& results) override {
results_ = results;
}
BenchmarkResults results_;
};
TEST(BenchmarkTfLiteModelTest, GetModelSizeFromPathSucceeded) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
params.Set<std::string>("graph", kModelPath);
params.Set<int>("num_runs", 1);
params.Set<int>("warmup_runs", 0);
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
TestBenchmarkListener listener;
benchmark.AddListener(&listener);
benchmark.Run();
EXPECT_GE(listener.results_.model_size_mb(), 0);
}
TEST(BenchmarkTfLiteModelTest, GetModelSizeFromFileDescriptorSucceeded) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
int fd = open(kModelPath, O_RDONLY);
ASSERT_GE(fd, 0);
int model_offset = 0;
struct stat stat_buf = {0};
ASSERT_EQ(fstat(fd, &stat_buf), 0);
params.Set<std::string>("graph", absl::StrCat("fd:", fd, ":", model_offset,
":", stat_buf.st_size));
params.Set<int>("num_runs", 1);
params.Set<int>("warmup_runs", 0);
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
TestBenchmarkListener listener;
benchmark.AddListener(&listener);
benchmark.Run();
EXPECT_EQ(listener.results_.model_size_mb(), stat_buf.st_size / 1e6);
}
TEST(BenchmarkTfLiteModelTest, ResizeInputWithDelegate) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
params.Set<std::string>("graph", kModelPath);
params.Set<bool>("use_xnnpack", true);
params.Set<std::string>("input_layer", "input_87");
params.Set<std::string>("input_layer_shape", "2,224,224,3");
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
EXPECT_EQ(benchmark.Run(), kTfLiteOk);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_tflite_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
440ad401-881b-4038-950a-c4db8e384c34 | cpp | tensorflow/tensorflow | latency_benchmark | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.cc | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/latency_benchmark_test.cc | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.h"
#include <errno.h>
#include <sys/stat.h>
#include <fstream>
#include <iterator>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/proto/delegate_performance.pb.h"
namespace tflite {
namespace benchmark {
namespace latency {
namespace {
static constexpr char kBenchmarkToolName[] = "(BenchmarkModelAndroid)";
class DelegatePerformanceReportingListener : public BenchmarkListener {
public:
void OnBenchmarkStart(const BenchmarkParams& unused) override {
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_START);
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
ReportResult(results);
}
void ReportFailure(TfLiteStatus status) {
std::string status_msg =
status == kTfLiteError
? "TFLite error"
: (status == kTfLiteDelegateError ? "TFLite delegate error"
: "unexpected TFLite status");
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Benchmark failed due to %s with status code %d.",
status_msg.c_str(), status);
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_ERROR);
results_proto_.mutable_error()->mutable_error_code()->set_tflite_error(
status);
results_proto_.mutable_error()->set_error_message(status_msg);
}
const proto::benchmark::LatencyResults& GetResults() {
return results_proto_;
}
private:
void ReportResult(const BenchmarkResults& results) {
tensorflow::Stat<int64_t> warmup_us = results.warmup_time_us();
tensorflow::Stat<int64_t> inference_us = results.inference_time_us();
profiling::memory::MemoryUsage init_mem_usage = results.init_mem_usage();
profiling::memory::MemoryUsage overall_mem_usage =
results.overall_mem_usage();
if (results.model_size_mb() > 0) {
AddMetric("model_size_megabyte",
results.model_size_mb());
}
AddMetric("initialization_latency_us",
results.startup_latency_us());
AddMetric("warmup_latency_average_us", warmup_us.avg());
AddMetric("warmup_latency_min_us", warmup_us.min());
AddMetric("warmup_latency_max_us", warmup_us.max());
AddMetric("warmup_latency_standard_deviation_us",
warmup_us.std_deviation());
AddMetric("inference_latency_average_us",
inference_us.avg());
AddMetric("inference_latency_min_us",
inference_us.min());
AddMetric("inference_latency_max_us",
inference_us.max());
AddMetric("inference_latency_standard_deviation_us",
inference_us.std_deviation());
AddMetric("initialization_memory_max_rss_mebibyte",
init_mem_usage.mem_footprint_kb / 1024.0);
AddMetric("initialization_memory_total_non_mmapped_heap_mebibyte",
init_mem_usage.total_allocated_bytes / 1024.0 / 1024.0);
AddMetric(
"initialization_memory_in_use_heap_mebibyte",
init_mem_usage.in_use_allocated_bytes / 1024.0 / 1024.0);
AddMetric("overall_memory_max_rss_mebibyte",
overall_mem_usage.mem_footprint_kb / 1024.0);
AddMetric(
"overall_memory_total_non_mmapped_heap_mebibyte",
overall_mem_usage.total_allocated_bytes / 1024.0 / 1024.0);
AddMetric(
"overall_memory_in_use_heap_mebibyte",
overall_mem_usage.in_use_allocated_bytes / 1024.0 / 1024.0);
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_END);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Benchmark finished.");
}
void AddMetric(std::string name, float value) {
proto::benchmark::BenchmarkMetric* metric = results_proto_.add_metrics();
metric->set_name(name);
metric->set_value(value);
}
proto::benchmark::LatencyResults results_proto_;
};
std::vector<std::string> ParseArgumentsFromTfLiteSettings(
const TFLiteSettings& tflite_settings,
const std::string& tflite_settings_path) {
std::vector<std::string> args;
if (tflite_settings_path.empty()) {
return args;
}
if (tflite_settings.stable_delegate_loader_settings()) {
args.push_back(absl::StrFormat("--stable_delegate_settings_file=%s",
tflite_settings_path));
return args;
}
switch (tflite_settings.delegate()) {
case Delegate_XNNPACK: {
args.push_back("--use_xnnpack=true");
if (tflite_settings.xnnpack_settings()) {
if (tflite_settings.xnnpack_settings()->num_threads()) {
args.push_back(absl::StrFormat(
"--num_threads=%d",
tflite_settings.xnnpack_settings()->num_threads()));
}
if (tflite_settings.xnnpack_settings()->flags() ==
XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16) {
args.push_back("--xnnpack_force_fp16=true");
}
}
return args;
}
case Delegate_GPU: {
args.push_back("--use_gpu=true");
const tflite::GPUSettings* gpu_settings = tflite_settings.gpu_settings();
if (gpu_settings) {
if (gpu_settings->is_precision_loss_allowed()) {
args.push_back("--gpu_precision_loss_allowed=true");
}
if (gpu_settings->enable_quantized_inference()) {
args.push_back("--gpu_experimental_enable_quant=true");
}
if (gpu_settings->inference_preference() ==
GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED) {
args.push_back("--gpu_inference_for_sustained_speed=true");
}
if (gpu_settings->force_backend() == GPUBackend_OPENCL) {
args.push_back("--gpu_backend=cl");
} else if (gpu_settings->force_backend() == GPUBackend_OPENGL) {
args.push_back("--gpu_backend=gl");
}
if (gpu_settings->cache_directory()) {
args.push_back(
absl::StrFormat("--delegate_serialize_dir=%s",
gpu_settings->cache_directory()->c_str()));
}
if (gpu_settings->model_token()) {
args.push_back(absl::StrFormat("--delegate_serialize_token=%s",
gpu_settings->model_token()->c_str()));
}
}
break;
}
case Delegate_EDGETPU: {
args.push_back("--use_edgetpu=true");
break;
}
default:
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Delegate type %s is not enabled by the latency module.",
EnumNameDelegate(tflite_settings.delegate()));
break;
}
if (tflite_settings.disable_default_delegates()) {
args.push_back("--use_xnnpack=false");
}
return args;
}
}
proto::benchmark::LatencyResults Benchmark(
const TFLiteSettings& tflite_settings,
const std::string& tflite_settings_path, int model_fd, size_t model_offset,
size_t model_size, const std::vector<std::string>& args) {
std::vector<char*> argv;
argv.push_back(const_cast<char*>(kBenchmarkToolName));
std::string arg_graph =
absl::StrCat("--graph=fd:", model_fd, ":", model_offset, ":", model_size);
argv.push_back(const_cast<char*>(arg_graph.data()));
std::vector<std::string> args_from_tflite_settings =
ParseArgumentsFromTfLiteSettings(tflite_settings, tflite_settings_path);
for (const std::string& arg : args_from_tflite_settings) {
argv.push_back(const_cast<char*>(arg.data()));
}
for (const std::string& arg : args) {
argv.push_back(const_cast<char*>(arg.data()));
}
BenchmarkTfLiteModel benchmark;
DelegatePerformanceReportingListener delegatePerformanceReporting;
benchmark.AddListener(&delegatePerformanceReporting);
TfLiteStatus status = benchmark.Run(argv.size(), argv.data());
if (status != kTfLiteOk) {
delegatePerformanceReporting.ReportFailure(status);
}
return delegatePerformanceReporting.GetResults();
}
}
}
} | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.h"
#include <fcntl.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/proto/delegate_performance.pb.h"
namespace tflite {
namespace benchmark {
namespace latency {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
static constexpr char kSettingsFilePath[] =
"tensorflow/lite/tools/delegates/experimental/stable_delegate/"
"test_sample_stable_delegate_settings.json";
class LatencyBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
model_fp_ = fopen(kModelPath, "rb");
ASSERT_TRUE(model_fp_ != nullptr);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_END), 0);
model_size_ = ftell(model_fp_);
ASSERT_NE(model_size_, -1);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_SET), 0);
settings_ = parser_.Parse(kSettingsFilePath);
}
delegates::utils::TfLiteSettingsJsonParser parser_;
const TFLiteSettings* settings_;
size_t model_size_;
FILE* model_fp_;
std::vector<std::string> args_;
};
TEST_F(LatencyBenchmarkTest, FailedWithNullFileDescriptor) {
EXPECT_TRUE(Benchmark(*settings_, kSettingsFilePath,
0, 0,
0, args_)
.has_error());
}
TEST_F(LatencyBenchmarkTest, FailedWithInvalidNumThreadsSettings) {
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings =
CreateXNNPackSettings(fbb, -3);
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_delegate(Delegate_XNNPACK);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_TRUE(Benchmark(*settings,
"example_path",
fileno(model_fp_),
0, model_size_, args_)
.has_error());
}
TEST_F(LatencyBenchmarkTest, SucceedWithEmptyTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
TEST_F(LatencyBenchmarkTest, SucceedWithCpuTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_disable_default_delegates(true);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
#ifdef __ANDROID__
TEST_F(LatencyBenchmarkTest, SucceedWithGpuTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_delegate(Delegate_GPU);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
#endif
TEST_F(LatencyBenchmarkTest, SucceedWithSampleStableDelegate) {
EXPECT_EQ(Benchmark(*settings_, kSettingsFilePath, fileno(model_fp_),
0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
TEST_F(LatencyBenchmarkTest,
SucceedWithSampleStableDelegateAndBenchmarkToolArguments) {
std::vector<std::string> args = {"--warmup_runs=10"};
EXPECT_EQ(Benchmark(*settings_, kSettingsFilePath, fileno(model_fp_),
0, model_size_, args)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/latency_benchmark_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3a9b2913-0787-4486-8b90-3e84df9f9f90 | cpp | tensorflow/tensorflow | accuracy_benchmark | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/accuracy_benchmark.cc | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/accuracy_benchmark_test.cc | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/accuracy_benchmark.h"
#include <errno.h>
#include <stdio.h>
#include <sys/stat.h>
#include <cstddef>
#include <string>
#include <vector>
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/vector.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/c/c_api.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/status_codes.h"
namespace tflite {
namespace benchmark {
namespace accuracy {
namespace {
std::vector<const tflite::BenchmarkEvent*> ToBenchmarkEvents(uint8_t* data,
size_t size) {
std::vector<const tflite::BenchmarkEvent*> results;
uint8_t* current_root = data;
while (current_root < data + size) {
flatbuffers::uoffset_t current_size =
flatbuffers::GetPrefixedSize(current_root);
results.push_back(
flatbuffers::GetSizePrefixedRoot<tflite::BenchmarkEvent>(current_root));
current_root += current_size + sizeof(flatbuffers::uoffset_t);
}
TFLITE_CHECK_EQ(current_root, data + size);
return results;
}
}
flatbuffers::Offset<BenchmarkEvent> Benchmark(
flatbuffers::FlatBufferBuilder& fbb, const TFLiteSettings& tflite_settings,
int model_fd, size_t model_offset, size_t model_size,
const char* result_path_chars) {
std::string result_path(result_path_chars);
std::string storage_path = result_path + "/storage_path.fb";
int return_code = std::remove(storage_path.c_str());
if (return_code) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Failed to remove storage file (%s): %s.",
storage_path.c_str(), strerror(errno));
}
flatbuffers::FlatBufferBuilder mini_benchmark_fbb;
TFLiteSettingsT tflite_settings_t;
tflite_settings.UnPackTo(&tflite_settings_t);
flatbuffers::Offset<TFLiteSettings> tflite_settings_offset =
CreateTFLiteSettings(mini_benchmark_fbb, &tflite_settings_t);
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TFLiteSettings>>>
tflite_settings_vector_offset =
mini_benchmark_fbb.CreateVector({tflite_settings_offset});
ModelFileBuilder model_file_builder(mini_benchmark_fbb);
model_file_builder.add_fd(model_fd);
model_file_builder.add_offset(model_offset);
model_file_builder.add_length(model_size);
flatbuffers::Offset<ModelFile> model_file_offset =
model_file_builder.Finish();
flatbuffers::Offset<BenchmarkStoragePaths> storage_paths_offset =
CreateBenchmarkStoragePaths(mini_benchmark_fbb,
mini_benchmark_fbb.CreateString(storage_path),
mini_benchmark_fbb.CreateString(result_path));
flatbuffers::Offset<ValidationSettings> validation_settings_offset =
CreateValidationSettings(mini_benchmark_fbb,
5000);
mini_benchmark_fbb.Finish(CreateMinibenchmarkSettings(
mini_benchmark_fbb, tflite_settings_vector_offset, model_file_offset,
storage_paths_offset, validation_settings_offset));
TfLiteMiniBenchmarkSettings* settings = TfLiteMiniBenchmarkSettingsCreate();
TfLiteMiniBenchmarkSettingsSetFlatBufferData(
settings, mini_benchmark_fbb.GetBufferPointer(),
mini_benchmark_fbb.GetSize());
TfLiteMiniBenchmarkResult* result =
TfLiteBlockingValidatorRunnerTriggerValidation(settings);
std::vector<const BenchmarkEvent*> events =
ToBenchmarkEvents(TfLiteMiniBenchmarkResultFlatBufferData(result),
TfLiteMiniBenchmarkResultFlatBufferDataSize(result));
TfLiteMiniBenchmarkSettingsFree(settings);
if (events.size() != 1) {
TfLiteMiniBenchmarkResultFree(result);
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Number of result events (%zu) doesn't match the expectation (%zu).",
events.size(), 1);
flatbuffers::Offset<BenchmarkError> error =
CreateBenchmarkError(fbb, BenchmarkStage_INFERENCE,
kBenchmarkResultCountMismatch);
BenchmarkEventBuilder builder(fbb);
builder.add_event_type(BenchmarkEventType_ERROR);
builder.add_error(error);
return builder.Finish();
}
BenchmarkEventT benchmark_event;
events[0]->UnPackTo(&benchmark_event);
TfLiteMiniBenchmarkResultFree(result);
return CreateBenchmarkEvent(fbb, &benchmark_event);
}
}
}
} | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/accuracy_benchmark.h"
#include <fcntl.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/status_codes.h"
namespace tflite {
namespace benchmark {
namespace accuracy {
namespace {
class AccuracyBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
acceleration::MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
if (!should_perform_test_) {
return;
}
std::string embedded_model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_FALSE(embedded_model_path.empty());
model_fp_ = fopen(embedded_model_path.c_str(), "rb");
ASSERT_NE(model_fp_, nullptr);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_END), 0);
model_size_ = ftell(model_fp_);
ASSERT_NE(model_size_, -1);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_SET), 0);
result_path_ = ::testing::TempDir();
}
void TearDown() override { fclose(model_fp_); }
std::string result_path_;
size_t model_size_;
FILE* model_fp_;
bool should_perform_test_ = true;
};
TEST_F(AccuracyBenchmarkTest, FailedWithInvalidModelFileDescriptor) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
delegates::utils::TfLiteSettingsJsonParser parser;
flatbuffers::FlatBufferBuilder builder;
std::vector<std::string> args;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/tools/delegates/experimental/"
"stable_delegate/test_sample_stable_delegate_settings.json");
flatbuffers::Offset<BenchmarkEvent> offset =
Benchmark(builder, *tflite_settings, 0,
0, 0, result_path_.c_str());
builder.Finish(offset);
const BenchmarkEvent* event =
flatbuffers::GetRoot<BenchmarkEvent>(builder.GetBufferPointer());
ASSERT_NE(event, nullptr);
EXPECT_EQ(event->event_type(), BenchmarkEventType_ERROR);
ASSERT_NE(event->error(), nullptr);
EXPECT_EQ(event->error()->stage(), BenchmarkStage_INFERENCE);
EXPECT_EQ(event->error()->exit_code(),
DelegatePerformanceBenchmarkStatus::kBenchmarkResultCountMismatch);
}
TEST_F(AccuracyBenchmarkTest, SucceedWithSampleStableDelegate) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
delegates::utils::TfLiteSettingsJsonParser parser;
flatbuffers::FlatBufferBuilder builder;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/tools/delegates/experimental/"
"stable_delegate/test_sample_stable_delegate_settings.json");
flatbuffers::Offset<BenchmarkEvent> offset = Benchmark(
builder, *tflite_settings, fileno(model_fp_),
0, model_size_, result_path_.c_str());
builder.Finish(offset);
const BenchmarkEvent* event =
flatbuffers::GetRoot<BenchmarkEvent>(builder.GetBufferPointer());
ASSERT_NE(event, nullptr);
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_EQ(event->error(), nullptr);
}
TEST_F(AccuracyBenchmarkTest, SucceedWithEmbeddedValidationAndXNNPack) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
delegates::utils::TfLiteSettingsJsonParser parser;
flatbuffers::FlatBufferBuilder builder;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
flatbuffers::Offset<BenchmarkEvent> offset = Benchmark(
builder, *tflite_settings, fileno(model_fp_),
0, model_size_, result_path_.c_str());
builder.Finish(offset);
const BenchmarkEvent* event =
flatbuffers::GetRoot<BenchmarkEvent>(builder.GetBufferPointer());
ASSERT_NE(event, nullptr);
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_EQ(event->error(), nullptr);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/accuracy_benchmark.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/accuracy_benchmark_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c899a3c4-6fd5-4e1e-a310-16b62a1d4c34 | cpp | tensorflow/tensorflow | quantization_wrapper_utils | tensorflow/lite/tools/optimize/quantization_wrapper_utils.cc | tensorflow/lite/tools/optimize/quantization_wrapper_utils_test.cc | #include "tensorflow/lite/tools/optimize/quantization_wrapper_utils.h"
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/compiler/mlir/lite/tools/optimize/operator_property.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace impl {
class FlatBufferModel;
}
namespace optimize {
namespace {
#ifdef TFLITE_CUSTOM_LSTM
constexpr bool kUseCustomLSTM = true;
#else
constexpr bool kUseCustomLSTM = false;
#endif
void MakeTensor(const string& name, std::unique_ptr<TensorT>* tensor) {
TensorT* tensor_raw = new TensorT;
tensor_raw->name = name;
tensor_raw->shape = {0};
tensor_raw->type = TensorType_FLOAT32;
tensor->reset(tensor_raw);
}
string CreateTensorName(int op_index, int tensor_index) {
return "intermediate_" + std::to_string(op_index) + "_" +
std::to_string(tensor_index);
}
bool IntermediateTensorExists(ModelT* model) {
for (int subgraph_idx = 0; subgraph_idx < model->subgraphs.size();
++subgraph_idx) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) {
OperatorT* op = subgraph->operators[op_idx].get();
if (!op->intermediates.empty()) {
return true;
}
}
}
return false;
}
}
TfLiteStatus LoadModel(const string& path, ModelT* model) {
auto input_model = impl::FlatBufferModel::BuildFromFile(path.c_str());
if (!input_model) {
return kTfLiteError;
}
auto readonly_model = input_model->GetModel();
if (!readonly_model) {
return kTfLiteError;
}
readonly_model->UnPackTo(model);
return kTfLiteOk;
}
TfLiteStatus AddIntermediateTensorsToFusedOp(
flatbuffers::FlatBufferBuilder* builder, ModelT* model) {
if (model->subgraphs.size() == 1 && model->subgraphs[0]->operators.empty()) {
return kTfLiteOk;
}
if (IntermediateTensorExists(model)) {
return kTfLiteOk;
}
for (int subgraph_idx = 0; subgraph_idx < model->subgraphs.size();
++subgraph_idx) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) {
OperatorT* op = subgraph->operators[op_idx].get();
operator_property::OperatorProperty property =
operator_property::GetOperatorProperty(model, subgraph_idx, op_idx);
if (property.intermediates.empty()) {
continue;
}
const int next_tensor_index = subgraph->tensors.size();
int num_intermediates = property.intermediates.size();
if (kUseCustomLSTM) {
num_intermediates = 12;
}
for (int i = 0; i < num_intermediates; ++i) {
std::unique_ptr<TensorT> intermediate_tensor;
auto name = CreateTensorName(op_idx, i);
MakeTensor(name, &intermediate_tensor);
subgraph->tensors.push_back(std::move(intermediate_tensor));
op->intermediates.push_back(next_tensor_index + i);
}
}
}
flatbuffers::Offset<Model> output_model_location =
Model::Pack(*builder, model);
FinishModelBuffer(*builder, output_model_location);
return kTfLiteOk;
}
bool WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes) {
std::fstream stream(out_file, std::ios::binary | std::ios::out);
for (size_t i = 0; i < num_bytes; i++) {
stream << bytes[i];
}
return (!stream.bad() && !stream.fail());
}
}
} | #include "tensorflow/lite/tools/optimize/quantization_wrapper_utils.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace optimize {
namespace {
using ::testing::ElementsAreArray;
TEST(LstmPreprocess, Add2Tensors) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto lstm_op_code = std::make_unique<OperatorCodeT>();
auto lstm_op = std::make_unique<OperatorT>();
lstm_op_code->builtin_code = BuiltinOperator_LSTM;
lstm_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_LSTM);
lstm_op_code->version = 2;
lstm_op->opcode_index = 0;
lstm_op->inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
lstm_op->outputs = {24};
model->subgraphs.push_back(std::move(subgraph));
for (int i = 0; i < lstm_op->inputs.size(); ++i) {
const int index = lstm_op->inputs[i];
if (index == -1) {
continue;
}
auto tensor = std::make_unique<TensorT>();
tensor->name = "lstm_tensor" + std::to_string(index);
tensor->shape = {2, 3, 4};
tensor->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor));
}
model->subgraphs[0]->operators.push_back(std::move(lstm_op));
model->operator_codes.push_back(std::move(lstm_op_code));
model->buffers.push_back(std::move(buffer));
flatbuffers::FlatBufferBuilder builder;
tflite::optimize::AddIntermediateTensorsToFusedOp(&builder, model.get());
EXPECT_EQ(model->operator_codes.size(), 1);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 26);
EXPECT_EQ(model->buffers.size(), 1);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_LSTM);
EXPECT_EQ(model->subgraphs[0]->tensors[0]->name, "lstm_tensor0");
EXPECT_EQ(model->subgraphs[0]->tensors[21]->name, "intermediate_0_0");
EXPECT_EQ(model->subgraphs[0]->tensors[22]->name, "intermediate_0_1");
EXPECT_EQ(model->subgraphs[0]->tensors[23]->name, "intermediate_0_2");
EXPECT_EQ(model->subgraphs[0]->tensors[24]->name, "intermediate_0_3");
EXPECT_EQ(model->subgraphs[0]->tensors[25]->name, "intermediate_0_4");
EXPECT_THAT(
model->subgraphs[0]->operators[0]->inputs,
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->outputs,
ElementsAreArray({24}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->intermediates,
ElementsAreArray({21, 22, 23, 24, 25}));
tflite::optimize::AddIntermediateTensorsToFusedOp(&builder, model.get());
EXPECT_EQ(model->operator_codes.size(), 1);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 26);
EXPECT_EQ(model->buffers.size(), 1);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_LSTM);
EXPECT_EQ(model->subgraphs[0]->tensors[0]->name, "lstm_tensor0");
EXPECT_EQ(model->subgraphs[0]->tensors[21]->name, "intermediate_0_0");
EXPECT_EQ(model->subgraphs[0]->tensors[22]->name, "intermediate_0_1");
EXPECT_EQ(model->subgraphs[0]->tensors[23]->name, "intermediate_0_2");
EXPECT_EQ(model->subgraphs[0]->tensors[24]->name, "intermediate_0_3");
EXPECT_EQ(model->subgraphs[0]->tensors[25]->name, "intermediate_0_4");
EXPECT_THAT(
model->subgraphs[0]->operators[0]->inputs,
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->outputs,
ElementsAreArray({24}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->intermediates,
ElementsAreArray({21, 22, 23, 24, 25}));
}
}
}
}
int main(int argc, char** argv) { return RUN_ALL_TESTS(); } | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/quantization_wrapper_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/quantization_wrapper_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21a73865-3364-4558-ae4b-b86f47c55303 | cpp | tensorflow/tensorflow | modify_model_interface | tensorflow/lite/tools/optimize/python/modify_model_interface.cc | tensorflow/lite/tools/optimize/modify_model_interface_test.cc | #include "tensorflow/lite/tools/optimize/modify_model_interface.h"
#include <string>
#include "pybind11/pybind11.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace pybind11 {
PYBIND11_MODULE(_pywrap_modify_model_interface, m) {
m.def("modify_model_interface",
[](const std::string& input_file, const std::string& output_file,
const int input_type, const int output_type) -> int {
return tflite::optimize::ModifyModelInterface(
input_file, output_file,
static_cast<tflite::TensorType>(input_type),
static_cast<tflite::TensorType>(output_type));
});
}
} | #include "tensorflow/lite/tools/optimize/modify_model_interface.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace optimize {
namespace {
std::unique_ptr<ModelT> CreateQuantizedModelSingleInputOutput(
const TensorType& quantization_type) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto quant_op_code = std::make_unique<OperatorCodeT>();
auto quant_op = std::make_unique<OperatorT>();
auto fc_op_code = std::make_unique<OperatorCodeT>();
auto fc_op = std::make_unique<OperatorT>();
auto dequant_op_code = std::make_unique<OperatorCodeT>();
auto dequant_op = std::make_unique<OperatorT>();
model->subgraphs.push_back(std::move(subgraph));
quant_op_code->builtin_code = BuiltinOperator_QUANTIZE;
quant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_QUANTIZE);
quant_op_code->version = 2;
fc_op_code->builtin_code = BuiltinOperator_FULLY_CONNECTED;
fc_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_FULLY_CONNECTED);
fc_op_code->version = 2;
dequant_op_code->builtin_code = BuiltinOperator_DEQUANTIZE;
dequant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_DEQUANTIZE);
dequant_op_code->version = 2;
quant_op->opcode_index = 0;
quant_op->inputs = {0};
quant_op->outputs = {1};
fc_op->opcode_index = 1;
fc_op->inputs = {1};
fc_op->outputs = {2};
dequant_op->opcode_index = 2;
dequant_op->inputs = {2};
dequant_op->outputs = {3};
model->subgraphs[0]->operators.push_back(std::move(quant_op));
model->subgraphs[0]->operators.push_back(std::move(fc_op));
model->subgraphs[0]->operators.push_back(std::move(dequant_op));
model->operator_codes.push_back(std::move(quant_op_code));
model->operator_codes.push_back(std::move(fc_op_code));
model->operator_codes.push_back(std::move(dequant_op_code));
model->subgraphs[0]->inputs = {0};
model->subgraphs[0]->outputs = {3};
auto tensor_0 = std::make_unique<TensorT>();
tensor_0->name = "tensor_0";
tensor_0->shape = {};
tensor_0->type = TensorType_FLOAT32;
auto tensor_1 = std::make_unique<TensorT>();
tensor_1->quantization = std::make_unique<QuantizationParametersT>();
tensor_1->quantization->scale.push_back(0.35);
tensor_1->quantization->zero_point.push_back(28);
tensor_1->name = "tensor_1";
tensor_1->shape = {};
tensor_1->type = quantization_type;
auto tensor_2 = std::make_unique<TensorT>();
tensor_2->quantization = std::make_unique<QuantizationParametersT>();
tensor_2->quantization->scale.push_back(0.12);
tensor_2->quantization->zero_point.push_back(50);
tensor_2->name = "tensor_2";
tensor_2->shape = {};
tensor_2->type = quantization_type;
auto tensor_3 = std::make_unique<TensorT>();
tensor_3->name = "tensor_3";
tensor_3->shape = {};
tensor_3->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor_0));
model->subgraphs[0]->tensors.push_back(std::move(tensor_1));
model->subgraphs[0]->tensors.push_back(std::move(tensor_2));
model->subgraphs[0]->tensors.push_back(std::move(tensor_3));
model->buffers.push_back(std::move(buffer));
return model;
}
std::unique_ptr<ModelT> CreateQuantizedModelMultipleInputOutput(
const TensorType& quantization_type) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto quant_op_code = std::make_unique<OperatorCodeT>();
auto quant_op_1 = std::make_unique<OperatorT>();
auto quant_op_2 = std::make_unique<OperatorT>();
auto fc_op_code = std::make_unique<OperatorCodeT>();
auto fc_op = std::make_unique<OperatorT>();
auto dequant_op_code = std::make_unique<OperatorCodeT>();
auto dequant_op_1 = std::make_unique<OperatorT>();
auto dequant_op_2 = std::make_unique<OperatorT>();
model->subgraphs.push_back(std::move(subgraph));
quant_op_code->builtin_code = BuiltinOperator_QUANTIZE;
quant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_QUANTIZE);
quant_op_code->version = 2;
fc_op_code->builtin_code = BuiltinOperator_FULLY_CONNECTED;
fc_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_FULLY_CONNECTED);
fc_op_code->version = 2;
dequant_op_code->builtin_code = BuiltinOperator_DEQUANTIZE;
dequant_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_DEQUANTIZE);
dequant_op_code->version = 2;
quant_op_1->opcode_index = 0;
quant_op_1->inputs = {0};
quant_op_1->outputs = {2};
quant_op_2->opcode_index = 0;
quant_op_2->inputs = {1};
quant_op_2->outputs = {3};
fc_op->opcode_index = 1;
fc_op->inputs = {2, 3};
fc_op->outputs = {4, 5};
dequant_op_1->opcode_index = 2;
dequant_op_1->inputs = {4};
dequant_op_1->outputs = {6};
dequant_op_2->opcode_index = 2;
dequant_op_2->inputs = {5};
dequant_op_2->outputs = {7};
model->subgraphs[0]->operators.push_back(std::move(quant_op_1));
model->subgraphs[0]->operators.push_back(std::move(quant_op_2));
model->subgraphs[0]->operators.push_back(std::move(fc_op));
model->subgraphs[0]->operators.push_back(std::move(dequant_op_1));
model->subgraphs[0]->operators.push_back(std::move(dequant_op_2));
model->operator_codes.push_back(std::move(quant_op_code));
model->operator_codes.push_back(std::move(fc_op_code));
model->operator_codes.push_back(std::move(dequant_op_code));
model->subgraphs[0]->inputs = {0, 1};
model->subgraphs[0]->outputs = {6, 7};
auto tensor_0 = std::make_unique<TensorT>();
tensor_0->name = "tensor_0";
tensor_0->shape = {};
tensor_0->type = TensorType_FLOAT32;
auto tensor_1 = std::make_unique<TensorT>();
tensor_1->name = "tensor_1";
tensor_1->shape = {};
tensor_1->type = TensorType_FLOAT32;
auto tensor_2 = std::make_unique<TensorT>();
tensor_2->quantization = std::make_unique<QuantizationParametersT>();
tensor_2->quantization->scale.push_back(0.35);
tensor_2->quantization->zero_point.push_back(28);
tensor_2->name = "tensor_2";
tensor_2->shape = {};
tensor_2->type = quantization_type;
auto tensor_3 = std::make_unique<TensorT>();
tensor_3->quantization = std::make_unique<QuantizationParametersT>();
tensor_3->quantization->scale.push_back(0.12);
tensor_3->quantization->zero_point.push_back(50);
tensor_3->name = "tensor_3";
tensor_3->shape = {};
tensor_3->type = quantization_type;
auto tensor_4 = std::make_unique<TensorT>();
tensor_4->quantization = std::make_unique<QuantizationParametersT>();
tensor_4->quantization->scale.push_back(0.45);
tensor_4->quantization->zero_point.push_back(28);
tensor_4->name = "tensor_4";
tensor_4->shape = {};
tensor_4->type = quantization_type;
auto tensor_5 = std::make_unique<TensorT>();
tensor_5->quantization = std::make_unique<QuantizationParametersT>();
tensor_5->quantization->scale.push_back(0.22);
tensor_5->quantization->zero_point.push_back(50);
tensor_5->name = "tensor_5";
tensor_5->shape = {};
tensor_5->type = quantization_type;
auto tensor_6 = std::make_unique<TensorT>();
tensor_6->name = "tensor_6";
tensor_6->shape = {};
tensor_6->type = TensorType_FLOAT32;
auto tensor_7 = std::make_unique<TensorT>();
tensor_7->name = "tensor_7";
tensor_7->shape = {};
tensor_7->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor_0));
model->subgraphs[0]->tensors.push_back(std::move(tensor_1));
model->subgraphs[0]->tensors.push_back(std::move(tensor_2));
model->subgraphs[0]->tensors.push_back(std::move(tensor_3));
model->subgraphs[0]->tensors.push_back(std::move(tensor_4));
model->subgraphs[0]->tensors.push_back(std::move(tensor_5));
model->subgraphs[0]->tensors.push_back(std::move(tensor_6));
model->subgraphs[0]->tensors.push_back(std::move(tensor_7));
model->buffers.push_back(std::move(buffer));
return model;
}
std::unique_ptr<ModelT> CreateFloatModel() {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto fc_op_code = std::make_unique<OperatorCodeT>();
auto fc_op = std::make_unique<OperatorT>();
model->subgraphs.push_back(std::move(subgraph));
fc_op_code->builtin_code = BuiltinOperator_FULLY_CONNECTED;
fc_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_FULLY_CONNECTED);
fc_op_code->version = 2;
fc_op->opcode_index = 0;
fc_op->inputs = {0};
fc_op->outputs = {1};
model->subgraphs[0]->operators.push_back(std::move(fc_op));
model->operator_codes.push_back(std::move(fc_op_code));
model->subgraphs[0]->inputs = {0};
model->subgraphs[0]->outputs = {1};
auto tensor_0 = std::make_unique<TensorT>();
tensor_0->name = "tensor_0";
tensor_0->shape = {};
tensor_0->type = TensorType_FLOAT32;
auto tensor_1 = std::make_unique<TensorT>();
tensor_1->name = "tensor_1";
tensor_1->shape = {};
tensor_1->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor_0));
model->subgraphs[0]->tensors.push_back(std::move(tensor_1));
model->buffers.push_back(std::move(buffer));
return model;
}
struct ModelInterface : ::testing::TestWithParam<tflite::TensorType> {};
TEST_P(ModelInterface, SingleInputOutput) {
TensorType quantization_type = GetParam();
auto model = CreateQuantizedModelSingleInputOutput(quantization_type);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), quantization_type,
quantization_type),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 3);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 1);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 2);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 1);
auto fc_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[fc_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_1");
EXPECT_EQ(input->type, quantization_type);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.35);
EXPECT_EQ(input->quantization->zero_point[0], 28);
auto output = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_2");
EXPECT_EQ(output->type, quantization_type);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.12);
EXPECT_EQ(output->quantization->zero_point[0], 50);
}
TEST_P(ModelInterface, MutipleInputOutput) {
TensorType quantization_type = GetParam();
auto model = CreateQuantizedModelMultipleInputOutput(quantization_type);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), quantization_type,
quantization_type),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 6);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 2);
EXPECT_EQ(model->subgraphs[0]->inputs[1], 3);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 4);
EXPECT_EQ(model->subgraphs[0]->outputs[1], 5);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 1);
auto fc_op = model->subgraphs[0]->operators[0].get();
auto input_1 = model->subgraphs[0]->tensors[fc_op->inputs[0]].get();
EXPECT_EQ(input_1->name, "tensor_2");
EXPECT_EQ(input_1->type, quantization_type);
EXPECT_FLOAT_EQ(input_1->quantization->scale[0], 0.35);
EXPECT_EQ(input_1->quantization->zero_point[0], 28);
auto input_2 = model->subgraphs[0]->tensors[fc_op->inputs[1]].get();
EXPECT_EQ(input_2->name, "tensor_3");
EXPECT_EQ(input_2->type, quantization_type);
EXPECT_FLOAT_EQ(input_2->quantization->scale[0], 0.12);
EXPECT_EQ(input_2->quantization->zero_point[0], 50);
auto output_1 = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output_1->name, "tensor_4");
EXPECT_EQ(output_1->type, quantization_type);
EXPECT_FLOAT_EQ(output_1->quantization->scale[0], 0.45);
EXPECT_EQ(output_1->quantization->zero_point[0], 28);
auto output_2 = model->subgraphs[0]->tensors[fc_op->outputs[1]].get();
EXPECT_EQ(output_2->name, "tensor_5");
EXPECT_EQ(output_2->type, quantization_type);
EXPECT_FLOAT_EQ(output_2->quantization->scale[0], 0.22);
EXPECT_EQ(output_2->quantization->zero_point[0], 50);
}
INSTANTIATE_TEST_SUITE_P(MultipleInputOutputTests, ModelInterface,
::testing::Values(TensorType_INT8, TensorType_INT16));
TEST(ModelInterface, MixedTypeSingleInputOutput) {
auto model = CreateQuantizedModelSingleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_UINT8,
TensorType_INT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 3);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 2);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 2);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[1]->opcode_index, 1);
auto quant_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[quant_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_0");
EXPECT_EQ(input->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.35);
EXPECT_EQ(input->quantization->zero_point[0], 156);
auto fc_op = model->subgraphs[0]->operators[1].get();
auto output = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_2");
EXPECT_EQ(output->type, TensorType_INT8);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.12);
EXPECT_EQ(output->quantization->zero_point[0], 50);
}
TEST(ModelInterface, Uint8SingleInputOutput) {
auto model = CreateQuantizedModelSingleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_UINT8,
TensorType_UINT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 4);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 3);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[1]->opcode_index, 1);
EXPECT_EQ(model->subgraphs[0]->operators[2]->opcode_index, 0);
auto input_quant_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[input_quant_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_0");
EXPECT_EQ(input->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.35);
EXPECT_EQ(input->quantization->zero_point[0], 156);
auto output_quant_op = model->subgraphs[0]->operators[2].get();
auto output = model->subgraphs[0]->tensors[output_quant_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_3");
EXPECT_EQ(output->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.12);
EXPECT_EQ(output->quantization->zero_point[0], 178);
}
TEST(ModelInterface, Uint8MutipleInputOutput) {
auto model = CreateQuantizedModelMultipleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_UINT8,
TensorType_UINT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 8);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->inputs[1], 1);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 6);
EXPECT_EQ(model->subgraphs[0]->outputs[1], 7);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 5);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[1]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[2]->opcode_index, 1);
EXPECT_EQ(model->subgraphs[0]->operators[3]->opcode_index, 0);
EXPECT_EQ(model->subgraphs[0]->operators[4]->opcode_index, 0);
auto input_quant_1 = model->subgraphs[0]->operators[0].get();
auto input_1 = model->subgraphs[0]->tensors[input_quant_1->inputs[0]].get();
EXPECT_EQ(input_1->name, "tensor_0");
EXPECT_EQ(input_1->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input_1->quantization->scale[0], 0.35);
EXPECT_EQ(input_1->quantization->zero_point[0], 156);
auto input_quant_2 = model->subgraphs[0]->operators[1].get();
auto input_2 = model->subgraphs[0]->tensors[input_quant_2->inputs[0]].get();
EXPECT_EQ(input_2->name, "tensor_1");
EXPECT_EQ(input_2->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input_2->quantization->scale[0], 0.12);
EXPECT_EQ(input_2->quantization->zero_point[0], 178);
auto output_quant_1 = model->subgraphs[0]->operators[3].get();
auto output_1 =
model->subgraphs[0]->tensors[output_quant_1->outputs[0]].get();
EXPECT_EQ(output_1->name, "tensor_6");
EXPECT_EQ(output_1->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output_1->quantization->scale[0], 0.45);
EXPECT_EQ(output_1->quantization->zero_point[0], 156);
auto output_quant_2 = model->subgraphs[0]->operators[4].get();
auto output_2 =
model->subgraphs[0]->tensors[output_quant_2->outputs[0]].get();
EXPECT_EQ(output_2->name, "tensor_7");
EXPECT_EQ(output_2->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output_2->quantization->scale[0], 0.22);
EXPECT_EQ(output_2->quantization->zero_point[0], 178);
}
TEST(ModelInterface, Int8MutipleInputOutput) {
auto model = CreateQuantizedModelMultipleInputOutput(TensorType_INT8);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(ModifyModelInterface(&builder, model.get(), TensorType_INT8,
TensorType_INT8),
kTfLiteOk);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 6);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 2);
EXPECT_EQ(model->subgraphs[0]->inputs[1], 3);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 2);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 4);
EXPECT_EQ(model->subgraphs[0]->outputs[1], 5);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators[0]->opcode_index, 1);
auto fc_op = model->subgraphs[0]->operators[0].get();
auto input_1 = model->subgraphs[0]->tensors[fc_op->inputs[0]].get();
EXPECT_EQ(input_1->name, "tensor_2");
EXPECT_EQ(input_1->type, TensorType_INT8);
EXPECT_FLOAT_EQ(input_1->quantization->scale[0], 0.35);
EXPECT_EQ(input_1->quantization->zero_point[0], 28);
auto input_2 = model->subgraphs[0]->tensors[fc_op->inputs[1]].get();
EXPECT_EQ(input_2->name, "tensor_3");
EXPECT_EQ(input_2->type, TensorType_INT8);
EXPECT_FLOAT_EQ(input_2->quantization->scale[0], 0.12);
EXPECT_EQ(input_2->quantization->zero_point[0], 50);
auto output_1 = model->subgraphs[0]->tensors[fc_op->outputs[0]].get();
EXPECT_EQ(output_1->name, "tensor_4");
EXPECT_EQ(output_1->type, TensorType_INT8);
EXPECT_FLOAT_EQ(output_1->quantization->scale[0], 0.45);
EXPECT_EQ(output_1->quantization->zero_point[0], 28);
auto output_2 = model->subgraphs[0]->tensors[fc_op->outputs[1]].get();
EXPECT_EQ(output_2->name, "tensor_5");
EXPECT_EQ(output_2->type, TensorType_INT8);
EXPECT_FLOAT_EQ(output_2->quantization->scale[0], 0.22);
EXPECT_EQ(output_2->quantization->zero_point[0], 50);
}
TEST(ModelInterface, Float) {
std::unique_ptr<ModelT> input_model_t = CreateFloatModel();
flatbuffers::FlatBufferBuilder builder_temp;
flatbuffers::Offset<Model> output_model_location =
Model::Pack(builder_temp, input_model_t.get());
FinishModelBuffer(builder_temp, output_model_location);
const uint8_t* buffer_temp = builder_temp.GetBufferPointer();
const Model* input_model = GetModel(buffer_temp);
flatbuffers::FlatBufferBuilder builder;
EXPECT_EQ(Uint8QuantizeModelInputsOutputs(&builder, input_model,
{{"tensor_0", {0.4, 2}}},
{{"tensor_1", {0.5, -5}}}),
kTfLiteOk);
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
std::unique_ptr<ModelT> model;
model.reset(output_model->UnPack());
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 4);
EXPECT_EQ(model->subgraphs[0]->inputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->inputs[0], 0);
EXPECT_EQ(model->subgraphs[0]->outputs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->outputs[0], 1);
EXPECT_EQ(model->operator_codes.size(), 3);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_FULLY_CONNECTED);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[1].get()),
BuiltinOperator_DEQUANTIZE);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[2].get()),
BuiltinOperator_QUANTIZE);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 3);
auto dequantize_op = model->subgraphs[0]->operators[0].get();
auto input = model->subgraphs[0]->tensors[dequantize_op->inputs[0]].get();
EXPECT_EQ(input->name, "tensor_0_uint8");
EXPECT_EQ(input->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(input->quantization->scale[0], 0.4);
EXPECT_EQ(input->quantization->zero_point[0], 2);
auto quantize_op = model->subgraphs[0]->operators[2].get();
auto output = model->subgraphs[0]->tensors[quantize_op->outputs[0]].get();
EXPECT_EQ(output->name, "tensor_1_uint8");
EXPECT_EQ(output->type, TensorType_UINT8);
EXPECT_FLOAT_EQ(output->quantization->scale[0], 0.5);
EXPECT_EQ(output->quantization->zero_point[0], -5);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/python/modify_model_interface.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/modify_model_interface_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
80f9ff27-8ea6-4612-a1fe-cbb58bc0ed83 | cpp | tensorflow/tensorflow | quantize_model | tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc | tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.h"
#include <memory>
#include <optional>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/statistics.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/context.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/debugger.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/post_calibration.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::mlir::quant::stablehlo::ConvertMlirModuleToExportedModel;
using ::mlir::quant::stablehlo::CreateMlirContextForQuantization;
using ::mlir::quant::stablehlo::ExportOptions;
using ::mlir::quant::stablehlo::FunctionAlias;
using ::mlir::quant::stablehlo::FunctionName;
using ::mlir::quant::stablehlo::GetFunctionAliases;
using ::mlir::quant::stablehlo::kExportStepSuffix;
using ::mlir::quant::stablehlo::PostCalibrationComponent;
using ::mlir::quant::stablehlo::PreCalibrationComponent;
using ::mlir::quant::stablehlo::RunCalibrationPasses;
using ::mlir::quant::stablehlo::UpdateFunctionAliases;
using ::mlir::quant::stablehlo::WeightOnlyPtqComponent;
using ::stablehlo::quantization::AddCalibrationStatistics;
using ::stablehlo::quantization::ChangeToQuantizedFilename;
using ::stablehlo::quantization::DebuggerConfig;
using ::stablehlo::quantization::ExpandPresets;
using ::stablehlo::quantization::IsCalibrationRequired;
using ::stablehlo::quantization::PopulateDefaults;
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::CreateTmpDir;
using ::stablehlo::quantization::io::GetLocalTmpFileName;
using ::tensorflow::quantization::PyFunctionLibrary;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ImportAndPreprocessSavedModel(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags, mlir::MLIRContext *context,
const bool is_inliner_run, const bool run_tf_to_stablehlo,
const bool deserialize_xla_call_module,
absl::flat_hash_map<std::string, std::string> &function_aliases) {
MLIRImportOptions import_options;
import_options.upgrade_legacy = true;
import_options.lift_variables = false;
import_options.include_variables_in_initializers = true;
auto bundle = std::make_unique<SavedModelBundle>();
std::vector<std::string> exported_names = signature_keys;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
SavedModelSignatureDefsToMlirImport(saved_model_path, tags,
absl::MakeSpan(exported_names),
context, import_options, &bundle);
if (!module.status().ok()) {
return absl::InternalError(absl::StrCat("Failed to import SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
UpdateFunctionAliases(function_aliases, *module_ref);
absl::flat_hash_set<std::string> aliased_function_names;
absl::c_for_each(function_aliases, [&](const auto &aliases) {
return aliased_function_names.insert(aliases.first);
});
TF_RETURN_IF_ERROR(PreprocessAndFreezeGraph(
kDefaultTfQuantMlirDumpFilePrefix,
is_inliner_run,
aliased_function_names, module_ref.get(), context,
bundle ? bundle->GetSession() : nullptr, run_tf_to_stablehlo,
deserialize_xla_call_module));
return module_ref;
}
absl::StatusOr<ExportedModel> ModuleOpToExportedModel(
mlir::ModuleOp module_op, mlir::MLIRContext *ctx,
absl::string_view step_name, const bool unfreeze_constants,
const absl::flat_hash_map<std::string, std::string> &function_aliases) {
TF_ASSIGN_OR_RETURN(const std::string checkpoint_dir, GetLocalTmpFileName());
const auto export_opts =
ExportOptions{true,
unfreeze_constants, checkpoint_dir,
absl::StrCat(step_name, kExportStepSuffix)};
TF_ASSIGN_OR_RETURN(const llvm::SmallVector<AssetFileDef> asset_file_defs,
RunExportPasses(export_opts, *ctx, module_op));
return ConvertMlirModuleToExportedModel(
module_op, checkpoint_dir, function_aliases,
{asset_file_defs.begin(), asset_file_defs.end()});
}
absl::StatusOr<ExportedModel> ExportCalibrationModel(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases,
absl::string_view calibration_data_dir) {
mlir::OwningOpRef<mlir::ModuleOp> cloned_module_ref(module_op.clone());
TF_RETURN_IF_ERROR(
RunCalibrationPasses(*cloned_module_ref, *context, calibration_data_dir,
quantization_options.calibration_options()
.force_regenerate_calibration_data()));
if (!IsCalibrationRequired(*cloned_module_ref)) return ExportedModel();
absl::StatusOr<ExportedModel> exported_model = ModuleOpToExportedModel(
*cloned_module_ref, context, kTfQuantPtqPreCalibrationStepName,
!quantization_options.freeze_all_variables(),
function_aliases);
if (!exported_model.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to export calibration model: ",
exported_model.status().message()));
}
return *exported_model;
}
absl::StatusOr<ExportedModel> ExportDebuggingModel(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases) {
mlir::OwningOpRef<mlir::ModuleOp> cloned_module_ref(module_op.clone());
absl::StatusOr<ExportedModel> exported_model = ModuleOpToExportedModel(
*cloned_module_ref, context, kTfQuantPtqPreCalibrationStepName,
!quantization_options.freeze_all_variables(),
function_aliases);
if (!exported_model.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to export debugging model: ",
exported_model.status().message()));
}
return *exported_model;
}
QuantizationConfig GetQuantizationConfigForStaticRangePtq(
const QuantizationOptions &quantization_options) {
QuantizationConfig quantization_config{};
quantization_config.mutable_static_range_ptq_preset()
->set_enable_per_channel_quantized_weight(
quantization_options.enable_per_channel_quantization());
quantization_config.mutable_pipeline_config()->set_unpack_quantized_types(
true);
*quantization_config.mutable_debugger_config() =
quantization_options.debugger_config();
quantization_config.mutable_static_range_ptq_preset();
*quantization_config.mutable_calibration_options() =
quantization_options.calibration_options();
return ExpandPresets(PopulateDefaults(quantization_config));
}
QuantizationConfig GetQuantizationConfigForWeightOnlyPtq(
const QuantizationOptions &quantization_options) {
QuantizationConfig quantization_config{};
quantization_config.mutable_weight_only_ptq_preset();
quantization_config.mutable_pipeline_config()->set_unpack_quantized_types(
true);
*quantization_config.mutable_debugger_config() =
quantization_options.debugger_config();
return ExpandPresets(PopulateDefaults(quantization_config));
}
absl::StatusOr<ExportedModel> QuantizePtqModelPreCalibrationImpl(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases,
absl::string_view calibration_data_dir) {
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
if (is_stablehlo) {
const QuantizationConfig quantization_config =
GetQuantizationConfigForStaticRangePtq(quantization_options);
PreCalibrationComponent pre_calibration_component(context);
TF_ASSIGN_OR_RETURN(module_op, pre_calibration_component.Run(
module_op, quantization_config));
} else {
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantPtqPreCalibrationStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizePtqPreCalibrationPasses(pm, quantization_options);
},
*context, module_op));
}
return ExportCalibrationModel(module_op, context, quantization_options,
function_aliases, calibration_data_dir);
}
absl::StatusOr<ExportedModel> QuantizePtqModelPostCalibrationImpl(
mlir::ModuleOp module_op, mlir::MLIRContext *context,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, std::string> &function_aliases) {
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
if (is_stablehlo) {
const QuantizationConfig quantization_config =
GetQuantizationConfigForStaticRangePtq(quantization_options);
PostCalibrationComponent post_calibration_component(context);
TF_ASSIGN_OR_RETURN(module_op, post_calibration_component.Run(
module_op, quantization_config));
} else {
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantPtqPostCalibrationStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizePtqPostCalibrationPasses(
pm, quantization_options, kTfQuantPtqPostCalibrationStepName);
},
*context, module_op));
}
return ModuleOpToExportedModel(
module_op, context, kTfQuantPtqPostCalibrationStepName,
!quantization_options.freeze_all_variables(),
function_aliases);
}
}
absl::StatusOr<ExportedModel> QuantizeQatModel(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags,
const QuantizationOptions &quantization_options) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(saved_model_path, tags);
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path, signature_keys, tags, context.get(),
true,
false,
false, *function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantQatStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizeQatPasses(pm, quantization_options, kTfQuantQatStepName);
},
*context, *module_ref));
return ModuleOpToExportedModel(
*module_ref, context.get(), kTfQuantQatStepName,
!quantization_options.freeze_all_variables(),
*function_aliases);
}
absl::StatusOr<ExportedModel> QuantizeDynamicRangePtq(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags,
const QuantizationOptions &quantization_options) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(saved_model_path, tags);
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path, signature_keys, tags, context.get(),
true,
false, false,
*function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantPtqDynamicRangeStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizePtqDynamicRangePasses(pm, quantization_options,
kTfQuantPtqDynamicRangeStepName);
},
*context, *module_ref));
return ModuleOpToExportedModel(
*module_ref, context.get(), kTfQuantPtqDynamicRangeStepName,
!quantization_options.freeze_all_variables(),
*function_aliases);
}
absl::StatusOr<ExportedModel> QuantizeWeightOnly(
absl::string_view saved_model_path,
const QuantizationOptions &quantization_options) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(
saved_model_path, {quantization_options.tags().begin(),
quantization_options.tags().end()});
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path,
{quantization_options.signature_keys().begin(),
quantization_options.signature_keys().end()},
{quantization_options.tags().begin(),
quantization_options.tags().end()},
context.get(), true,
is_stablehlo,
false, *function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
if (is_stablehlo) {
const QuantizationConfig quantization_config =
GetQuantizationConfigForWeightOnlyPtq(quantization_options);
WeightOnlyPtqComponent weight_only_ptq_component(context.get());
TF_ASSIGN_OR_RETURN(*module_ref, weight_only_ptq_component.Run(
*module_ref, quantization_config));
} else {
TF_RETURN_IF_ERROR(RunPasses(
kTfQuantWeightOnlyStepName,
[&quantization_options](mlir::PassManager &pm) {
AddQuantizeWeightOnlyPasses(pm, quantization_options,
kTfQuantWeightOnlyStepName);
},
*context, *module_ref));
}
return ModuleOpToExportedModel(
*module_ref, context.get(), kTfQuantWeightOnlyStepName,
!quantization_options.freeze_all_variables(),
*function_aliases);
}
absl::StatusOr<ExportedModel> QuantizeStaticRangePtq(
absl::string_view saved_model_path,
const std::vector<std::string> &signature_keys,
const std::unordered_set<std::string> &tags,
const QuantizationOptions &quantization_options,
const absl::flat_hash_map<std::string, SignatureDef> &signature_def_map,
const PyFunctionLibrary &py_function_library,
const absl::flat_hash_map<std::string, RepresentativeDatasetFile>
&representative_dataset_file_map_serialized) {
std::unique_ptr<mlir::MLIRContext> context =
CreateMlirContextForQuantization();
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
function_aliases = GetFunctionAliases(saved_model_path, tags);
if (!function_aliases.ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get function alias: ", function_aliases.status().message()));
}
const bool is_stablehlo = quantization_options.op_set() == OpSet::STABLEHLO;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
ImportAndPreprocessSavedModel(
saved_model_path, signature_keys, tags, context.get(),
true,
is_stablehlo,
false, *function_aliases);
if (!module.status().ok()) {
return absl::InternalError(
absl::StrCat("Failed to import and preprocess SavedModel: ",
module.status().message()));
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref = std::move(module).value();
std::string calibration_data_dir =
quantization_options.calibration_options().calibration_data_dir();
if (calibration_data_dir.empty()) {
TF_ASSIGN_OR_RETURN(calibration_data_dir, CreateTmpDir());
}
TF_ASSIGN_OR_RETURN(ExportedModel calibration_exported_model,
QuantizePtqModelPreCalibrationImpl(
*module_ref, context.get(), quantization_options,
*function_aliases, calibration_data_dir));
if (calibration_exported_model.has_graph_def()) {
TF_ASSIGN_OR_RETURN(std::string calibration_saved_model_dir,
CreateTmpDir());
py_function_library.SaveExportedModel(
calibration_saved_model_dir, calibration_exported_model,
saved_model_path, tags, signature_def_map);
py_function_library.RunCalibration(
calibration_saved_model_dir, signature_keys, tags,
quantization_options.force_graph_mode_calibration(),
representative_dataset_file_map_serialized);
}
if (absl::Status status = AddCalibrationStatistics(
*module_ref, calibration_data_dir,
quantization_options.calibration_options(), py_function_library);
!status.ok()) {
LOG(WARNING) << "Some CustomAggregator ops do not have min or max "
"values. Parts of the graph are not quantized. "
<< status;
}
if (quantization_options.has_debugger_config() &&
quantization_options.debugger_config().debugger_type() ==
DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL) {
TF_ASSIGN_OR_RETURN(
ExportedModel debugging_exported_model,
ExportDebuggingModel(*module_ref, context.get(), quantization_options,
*function_aliases));
ChangeToQuantizedFilename(*module_ref);
absl::string_view unquantized_dump_model_path =
quantization_options.debugger_config().unquantized_dump_model_path();
py_function_library.SaveExportedModel(
unquantized_dump_model_path, debugging_exported_model, saved_model_path,
tags, signature_def_map);
}
return QuantizePtqModelPostCalibrationImpl(
*module_ref, context.get(), quantization_options, *function_aliases);
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/core/absl_error_model_builder.h"
#include "tensorflow/compiler/mlir/lite/quantization/lite/test_util.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/command_line_flags.h"
namespace {
tensorflow::string* g_test_model_dir = nullptr;
}
namespace tflite {
namespace optimize {
namespace {
using mlir::TFL::FlatBufferModelAbslError;
using testing::Eq;
using testing::FloatEq;
using testing::FloatNear;
using testing::IsEmpty;
using testing::NotNull;
using testing::SizeIs;
ModelT UnPackFlatBufferModel(const Model& flatbuffer_model) {
ModelT model;
flatbuffer_model.UnPackTo(&model);
return model;
}
absl::Status QuantizeModel(
ModelT* model, const TensorType& input_type, const TensorType& output_type,
const bool allow_float,
const std::unordered_set<std::string>& operator_names,
const TensorType& activations_type, std::string& output_buffer,
const bool disable_per_channel = false,
const absl::flat_hash_set<std::string>& blocked_ops = {},
const absl::flat_hash_set<std::string>& blocked_nodes = {},
const bool disable_per_channel_for_dense_layers = false) {
TensorType inference_tensor_type = activations_type;
const bool fully_quantize = !allow_float;
flatbuffers::FlatBufferBuilder input_builder;
tflite::FinishModelBuffer(input_builder,
tflite::Model::Pack(input_builder, model));
const std::string input_buffer(
reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
input_builder.GetSize());
auto status = mlir::lite::QuantizeModel(
input_buffer, input_type, output_type, inference_tensor_type,
{}, disable_per_channel, fully_quantize, output_buffer,
false, false,
true, blocked_ops, blocked_nodes,
false,
disable_per_channel_for_dense_layers);
if (!status.ok()) {
return status;
}
auto flatbuffer_model = FlatBufferModelAbslError::BuildFromBuffer(
output_buffer.data(), output_buffer.size());
*model = UnPackFlatBufferModel(*flatbuffer_model->GetModel());
return absl::OkStatus();
}
absl::Status QuantizeModel(ModelT* model, const TensorType& input_type,
const TensorType& output_type, bool allow_float,
std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, TensorType_INT8, output_buffer);
}
absl::Status QuantizeModel(ModelT* model, const TensorType& input_type,
const TensorType& output_type,
std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type,
false, output_buffer);
}
absl::Status QuantizeModel(ModelT* model, std::string& output_buffer) {
return QuantizeModel(model, TensorType_FLOAT32, TensorType_FLOAT32,
true, output_buffer);
}
absl::Status QuantizeModelAllOperators(
ModelT* model, const TensorType& input_type, const TensorType& output_type,
bool allow_float, const TensorType& activations_type,
bool disable_per_channel, std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, activations_type, output_buffer,
disable_per_channel);
}
absl::Status QuantizeModelAllOperators(ModelT* model,
const TensorType& input_type,
const TensorType& output_type,
bool allow_float,
const TensorType& activations_type,
std::string& output_buffer) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, activations_type, output_buffer);
}
absl::Status QuantizeModelAllOperators(
ModelT* model, const TensorType& input_type, const TensorType& output_type,
bool allow_float, const TensorType& activations_type,
std::string& output_buffer, bool disable_per_channel_for_dense_layers) {
return QuantizeModel(model, input_type, output_type, allow_float,
{}, activations_type, output_buffer,
false,
{},
{},
disable_per_channel_for_dense_layers);
}
std::unique_ptr<FlatBufferModelAbslError> ReadModel(
const std::string& model_name) {
auto model_path = tensorflow::io::JoinPath(*g_test_model_dir, model_name);
return FlatBufferModelAbslError::BuildFromFile(model_path.c_str());
}
template <typename T>
std::vector<T> GetAsVector(const flatbuffers::Vector<T>* vec) {
return std::vector<T>(vec->begin(), vec->end());
}
void VerifyQuantizationScale(
const QuantizationParameters& float_quant_params,
const QuantizationParametersT& quantized_quant_params, const int bit_num,
const bool symmetric) {
const float eps = 1e-7;
ASSERT_THAT(*float_quant_params.min(), SizeIs(1));
ASSERT_THAT(*float_quant_params.max(), SizeIs(1));
float float_min = std::min(0.f, float_quant_params.min()->Get(0));
float float_max = std::max(0.f, float_quant_params.max()->Get(0));
if (symmetric) {
float_max = std::max(std::abs(float_min), std::abs(float_max));
float_min = -float_max;
}
ASSERT_THAT(quantized_quant_params.scale, SizeIs(1));
ASSERT_THAT(quantized_quant_params.zero_point, SizeIs(1));
float scale = (float_max - float_min) / ((1 << bit_num) - 1);
EXPECT_THAT(scale, FloatNear(quantized_quant_params.scale[0], eps));
}
class QuantizeModelTest : public testing::Test {
protected:
QuantizeModelTest() {
input_model_ =
ReadModel(::mlir::lite::internal::kConvModelWith0Plus10Weights);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
std::unique_ptr<FlatBufferModelAbslError> input_model_;
const Model* readonly_model_;
tflite::ModelT model_;
std::string output_buffer_;
};
void ExpectEqualTensor(TensorT* tensor, TensorT* expected_tensor) {
const float eps = 1e-7;
EXPECT_THAT(expected_tensor, NotNull());
EXPECT_THAT(tensor->is_variable, Eq(expected_tensor->is_variable));
EXPECT_THAT(tensor->shape, Eq(expected_tensor->shape));
EXPECT_THAT(tensor->type, Eq(expected_tensor->type));
const auto quantization_params = tensor->quantization.get();
const auto expected_quantization_params = expected_tensor->quantization.get();
if (quantization_params != nullptr &&
expected_quantization_params != nullptr) {
for (int i = 0; i < quantization_params->scale.size(); ++i) {
if (quantization_params->scale[i] > 3e-5) {
EXPECT_THAT(quantization_params->scale[i],
FloatNear(expected_quantization_params->scale[i], eps));
}
}
EXPECT_THAT(quantization_params->zero_point,
Eq(expected_quantization_params->zero_point));
}
}
TensorT* FindMatchingExpectedTensor(const SubGraphT& expected_graph,
const ModelT& expected_model,
const ModelT& quant_model,
const OperatorT& quant_op, int idx) {
const auto& builtin_code =
GetBuiltinCode(quant_model.operator_codes[quant_op.opcode_index].get());
for (const auto& expected_op : expected_graph.operators) {
const auto& op_code =
expected_model.operator_codes[expected_op->opcode_index].get();
const auto& expected_code = GetBuiltinCode(op_code);
if (expected_code == builtin_code) {
return expected_graph.tensors[expected_op->inputs[idx]].get();
}
}
return nullptr;
}
void ExpectSameModels(const ModelT& model, const ModelT& expected_model) {
ASSERT_THAT(model.subgraphs, SizeIs(expected_model.subgraphs.size()));
for (size_t subgraph_idx = 0; subgraph_idx < model.subgraphs.size();
subgraph_idx++) {
const auto graph = model.subgraphs[subgraph_idx].get();
const auto expected_graph = expected_model.subgraphs[subgraph_idx].get();
for (auto& op : graph->operators) {
for (int idx = 0; idx < op->inputs.size(); idx++) {
if (op->inputs[idx] < 0) {
continue;
}
const auto& tensor = graph->tensors[op->inputs[idx]];
auto* expected_tensor = FindMatchingExpectedTensor(
*expected_graph, expected_model, model, *op, idx);
if (!expected_tensor) {
continue;
}
ExpectEqualTensor(tensor.get(), expected_tensor);
if (expected_tensor->buffer > 0) {
const int buffer_idx = tensor->buffer;
const int expected_buffer_idx = expected_tensor->buffer;
const auto buffer = model.buffers[buffer_idx].get()->data;
const auto expected_buffer =
expected_model.buffers[expected_buffer_idx].get()->data;
EXPECT_THAT(buffer, Eq(expected_buffer));
}
}
}
}
}
class QuantizeConvModelTest : public QuantizeModelTest,
public testing::WithParamInterface<TensorType> {
protected:
QuantizeConvModelTest() {
tensor_type_ = GetParam();
input_model_ =
ReadModel(::mlir::lite::internal::kConvModelWith0Plus10Weights);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
auto& subgraph = model_.subgraphs[0];
auto* input = subgraph->tensors[subgraph->inputs[0]].get();
auto* output = subgraph->tensors[subgraph->outputs[0]].get();
input->quantization = std::make_unique<QuantizationParametersT>();
output->quantization = std::make_unique<QuantizationParametersT>();
input->quantization->min.push_back(0.0);
output->quantization->min.push_back(0.0);
input->quantization->max.push_back(6.0);
output->quantization->max.push_back(6.0);
}
TensorType tensor_type_;
};
INSTANTIATE_TEST_SUITE_P(QuantizeConvModelTestInst, QuantizeConvModelTest,
testing::ValuesIn({TensorType_INT8}));
TEST_P(QuantizeConvModelTest, QuantizationSucceeds) {
TF_EXPECT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
const Model* output_model = GetModel(output_buffer_.data());
ASSERT_TRUE(output_model);
}
TEST_P(QuantizeConvModelTest, SkipUnspecifiedLayer) {
TF_EXPECT_OK(QuantizeModel(&model_, TensorType_FLOAT32, TensorType_FLOAT32,
true, {},
TensorType_FLOAT32, output_buffer_,
false, {"CONV_2D"}));
ModelT expected_model;
readonly_model_->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
TEST_P(QuantizeConvModelTest, SkipUnspecifiedLayerByName) {
TF_EXPECT_OK(QuantizeModel(&model_, TensorType_FLOAT32, TensorType_FLOAT32,
true, {},
TensorType_FLOAT32, output_buffer_,
false,
{}, {"output"}));
ModelT expected_model;
readonly_model_->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
TEST_P(QuantizeConvModelTest, GraphIsFullyQuantized) {
TF_EXPECT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
for (const auto& subgraph : model_.subgraphs) {
for (const auto& tensor : subgraph->tensors) {
EXPECT_TRUE(tensor->type == TensorType_INT32 ||
tensor->type == TensorType_INT8);
}
}
}
class QuantizeConvNoBiasModelTest : public QuantizeModelTest {
protected:
QuantizeConvNoBiasModelTest() {
input_model_ = ReadModel(::mlir::lite::internal::kConvModelWithNoBias);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
class QuantizeSplitModelTest : public QuantizeModelTest {
protected:
QuantizeSplitModelTest() {
input_model_ = ReadModel(::mlir::lite::internal::kModelSplit);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeSplitModelTest, QuantizeSplit) {
TF_EXPECT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const int32_t subgraph_idx = 0;
const auto& subgraph = model_.subgraphs[subgraph_idx];
const auto& readonly_subgraph =
readonly_model_->subgraphs()->Get(subgraph_idx);
EXPECT_THAT(*readonly_subgraph->operators(), SizeIs(2));
EXPECT_THAT(subgraph->operators, SizeIs(2));
const auto& split = subgraph->operators[0];
const auto& add = subgraph->operators[1];
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[split->opcode_index].get()),
Eq(BuiltinOperator_SPLIT));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[add->opcode_index].get()),
Eq(BuiltinOperator_ADD));
EXPECT_THAT(subgraph->tensors, SizeIs(5));
const int input_idx = 0;
EXPECT_THAT(subgraph->tensors[input_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[input_idx]->name, Eq("input"));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[input_idx]->quantization->zero_point[0],
Eq(-128));
const int output_idx = 4;
EXPECT_THAT(subgraph->tensors[output_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[output_idx]->name, Eq("output"));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[output_idx]->quantization->zero_point[0],
Eq(-128));
const int split0_idx = 2;
EXPECT_THAT(subgraph->tensors[split0_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[split0_idx]->name, Eq("split;split:1"));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[split0_idx]->quantization->zero_point[0],
Eq(-128));
const int split1_idx = 3;
EXPECT_THAT(subgraph->tensors[split1_idx]->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[split1_idx]->name, Eq("split;split:11"));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->zero_point,
SizeIs(1));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->scale[0],
FloatEq(1.0));
EXPECT_THAT(subgraph->tensors[split1_idx]->quantization->zero_point[0],
Eq(-128));
EXPECT_THAT(model_.operator_codes, SizeIs(2));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_SPLIT));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
}
class QuantizeConvModel2Test : public QuantizeModelTest,
public testing::WithParamInterface<TensorType> {
protected:
QuantizeConvModel2Test() {
tensor_type_ = GetParam();
input_model_ =
ReadModel(::mlir::lite::internal::kConvModelWith0Plus10Weights);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
auto& subgraph = model_.subgraphs[0];
auto* input = subgraph->tensors[subgraph->inputs[0]].get();
auto* output = subgraph->tensors[subgraph->outputs[0]].get();
input->quantization = std::make_unique<QuantizationParametersT>();
output->quantization = std::make_unique<QuantizationParametersT>();
input->quantization->min.push_back(0.0);
output->quantization->min.push_back(0.0);
input->quantization->max.push_back(6.0);
output->quantization->max.push_back(6.0);
}
TensorType tensor_type_;
};
INSTANTIATE_TEST_SUITE_P(QuantizeConvModel2TestInst, QuantizeConvModel2Test,
testing::ValuesIn({TensorType_INT8}));
TEST_P(QuantizeConvModel2Test, VerifyConvQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto conv_op = subgraph->operators[0].get();
const int input_tensor_idx = 0;
const int weights_tensor_idx = 1;
const int bias_tensor_index = 2;
const int output_tensor_idx = 0;
const auto bias_tensor =
subgraph->tensors[conv_op->inputs[bias_tensor_index]].get();
const auto input_tensor =
subgraph->tensors[conv_op->inputs[input_tensor_idx]].get();
const auto weights_tensor =
subgraph->tensors[conv_op->inputs[weights_tensor_idx]].get();
const auto output_tensor =
subgraph->tensors[conv_op->outputs[output_tensor_idx]].get();
EXPECT_THAT(bias_tensor->type,
Eq(tensor_type_ == TensorType_INT8 ? TensorType_INT32
: TensorType_INT64));
EXPECT_THAT(input_tensor->type, Eq(tensor_type_));
EXPECT_THAT(weights_tensor->type, Eq(TensorType_INT8));
ASSERT_TRUE(weights_tensor->quantization);
ASSERT_TRUE(bias_tensor->quantization);
ASSERT_TRUE(weights_tensor->quantization);
const std::vector<float>& bias_scales = bias_tensor->quantization->scale;
const std::vector<float>& weights_scales =
weights_tensor->quantization->scale;
const std::vector<int64_t>& weights_zero_points =
weights_tensor->quantization->zero_point;
const int out_channel_size = weights_tensor->shape[0];
ASSERT_THAT(bias_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_zero_points, SizeIs(out_channel_size));
ASSERT_THAT(input_tensor->quantization->scale, SizeIs(1));
ASSERT_THAT(output_tensor->quantization->scale, SizeIs(1));
const float eps = 1e-7;
for (size_t i = 0; i < out_channel_size; i++) {
EXPECT_THAT(bias_scales[i], FloatNear(input_tensor->quantization->scale[0] *
weights_scales[i],
eps));
}
const auto bias_buffer = model_.buffers[bias_tensor->buffer].get();
auto control_size = tensor_type_ == TensorType_INT8
? sizeof(int32_t) * bias_tensor->shape[0]
: sizeof(int64_t) * bias_tensor->shape[0];
const auto float_op =
readonly_model_->subgraphs()->Get(0)->operators()->Get(0);
const auto original_bias_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(2));
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto original_bias_buffer =
readonly_model_->buffers()->Get(original_bias_tensor->buffer());
const float* bias_float_buffer =
reinterpret_cast<const float*>(original_bias_buffer->data()->data());
if (tensor_type_ == TensorType_INT8) {
int32_t* bias_values = reinterpret_cast<int32_t*>(bias_buffer->data.data());
for (size_t i = 0; i < out_channel_size; i++) {
auto dequantized_value = bias_values[i] * bias_scales[i];
EXPECT_THAT(dequantized_value,
FloatNear(bias_float_buffer[i], bias_scales[i] / 2));
}
}
const auto weights_buffer = model_.buffers[weights_tensor->buffer].get();
const auto original_weights_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(1));
const auto original_weights_buffer =
readonly_model_->buffers()->Get(original_weights_tensor->buffer());
const int8_t* weight_values =
reinterpret_cast<int8_t*>(weights_buffer->data.data());
const float* weights_float_buffer =
reinterpret_cast<const float*>(original_weights_buffer->data()->data());
ASSERT_THAT(sizeof(float) * weights_buffer->data.size(),
Eq(original_weights_buffer->data()->size()));
int num_values_in_channel = weights_buffer->data.size() / out_channel_size;
for (size_t channel_idx = 0; channel_idx < out_channel_size; channel_idx++) {
for (size_t j = 0; j < num_values_in_channel; j++) {
size_t element_idx = channel_idx * out_channel_size + j;
auto scale = weights_scales[channel_idx];
auto zero_point = weights_zero_points[channel_idx];
auto dequantized_value = weight_values[element_idx] * scale;
EXPECT_THAT(dequantized_value,
FloatNear(weights_float_buffer[element_idx], scale / 2));
EXPECT_THAT(zero_point, Eq(0));
}
}
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_CONV_2D));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(3));
}
TEST_P(QuantizeConvModel2Test, VerifyConvDisablePerChannelQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
true,
output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto conv_op = subgraph->operators[0].get();
const int input_tensor_idx = 0;
const int weights_tensor_idx = 1;
const int bias_tensor_index = 2;
const int output_tensor_idx = 0;
const auto bias_tensor =
subgraph->tensors[conv_op->inputs[bias_tensor_index]].get();
const auto input_tensor =
subgraph->tensors[conv_op->inputs[input_tensor_idx]].get();
const auto weights_tensor =
subgraph->tensors[conv_op->inputs[weights_tensor_idx]].get();
const auto output_tensor =
subgraph->tensors[conv_op->outputs[output_tensor_idx]].get();
EXPECT_THAT(bias_tensor->type,
Eq(tensor_type_ == TensorType_INT8 ? TensorType_INT32
: TensorType_INT64));
EXPECT_THAT(input_tensor->type, Eq(tensor_type_));
EXPECT_THAT(weights_tensor->type, Eq(TensorType_INT8));
ASSERT_TRUE(weights_tensor->quantization);
ASSERT_TRUE(bias_tensor->quantization);
ASSERT_TRUE(weights_tensor->quantization);
const std::vector<float>& bias_scales = bias_tensor->quantization->scale;
const std::vector<float>& weights_scales =
weights_tensor->quantization->scale;
const std::vector<int64_t>& weights_zero_points =
weights_tensor->quantization->zero_point;
const int out_channel_size = 1;
ASSERT_THAT(bias_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_scales, SizeIs(out_channel_size));
ASSERT_THAT(weights_zero_points, SizeIs(out_channel_size));
ASSERT_THAT(input_tensor->quantization->scale, SizeIs(1));
ASSERT_THAT(output_tensor->quantization->scale, SizeIs(1));
const float eps = 1e-7;
for (size_t i = 0; i < out_channel_size; i++) {
EXPECT_THAT(bias_scales[i], FloatNear(input_tensor->quantization->scale[0] *
weights_scales[i],
eps));
}
const auto bias_buffer = model_.buffers[bias_tensor->buffer].get();
auto control_size = tensor_type_ == TensorType_INT8
? sizeof(int32_t) * bias_tensor->shape[0]
: sizeof(int64_t) * bias_tensor->shape[0];
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto float_op =
readonly_model_->subgraphs()->Get(0)->operators()->Get(0);
const auto original_bias_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(2));
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto original_bias_buffer =
readonly_model_->buffers()->Get(original_bias_tensor->buffer());
const float* bias_float_buffer =
reinterpret_cast<const float*>(original_bias_buffer->data()->data());
if (tensor_type_ == TensorType_INT8) {
int32_t* bias_values = reinterpret_cast<int32_t*>(bias_buffer->data.data());
for (size_t i = 0; i < out_channel_size; i++) {
auto dequantized_value = bias_values[i] * bias_scales[i];
EXPECT_THAT(dequantized_value,
FloatNear(bias_float_buffer[i], bias_scales[i] / 2));
}
}
const auto weights_buffer = model_.buffers[weights_tensor->buffer].get();
const auto original_weights_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(1));
const auto original_weights_buffer =
readonly_model_->buffers()->Get(original_weights_tensor->buffer());
const int8_t* weight_values =
reinterpret_cast<int8_t*>(weights_buffer->data.data());
const float* weights_float_buffer =
reinterpret_cast<const float*>(original_weights_buffer->data()->data());
ASSERT_THAT(sizeof(float) * weights_buffer->data.size(),
Eq(original_weights_buffer->data()->size()));
int num_values_in_channel = weights_buffer->data.size() / out_channel_size;
for (size_t channel_idx = 0; channel_idx < out_channel_size; channel_idx++) {
for (size_t j = 0; j < num_values_in_channel; j++) {
size_t element_idx = channel_idx * out_channel_size + j;
auto scale = weights_scales[channel_idx];
auto zero_point = weights_zero_points[channel_idx];
auto dequantized_value = weight_values[element_idx] * scale;
EXPECT_THAT(dequantized_value,
FloatNear(weights_float_buffer[element_idx], scale / 2));
EXPECT_THAT(zero_point, Eq(0));
}
}
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_CONV_2D));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(3));
}
class QuantizeSoftmaxTest : public QuantizeModelTest {
protected:
QuantizeSoftmaxTest() {
input_model_ =
ReadModel(::mlir::lite::internal::kSingleSoftmaxModelMinMinus5MaxPlus5);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeSoftmaxTest, VerifySoftmaxQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[0].get();
ASSERT_THAT(op->opcode_index, Eq(0));
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_SOFTMAX));
ASSERT_THAT(op->inputs, SizeIs(1));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(op->inputs[0])->type(),
Eq(TensorType_FLOAT32));
ASSERT_THAT(float_graph->tensors()->Get(op->outputs[0])->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[0]].get()->type,
Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type,
Eq(TensorType_INT8));
auto float_input_quant_params =
float_graph->tensors()->Get(op->inputs[0])->quantization();
auto input_quant_params =
subgraph->tensors[op->inputs[0]]->quantization.get();
VerifyQuantizationScale(*float_input_quant_params, *input_quant_params,
8, false);
auto float_output_quant_params =
float_graph->tensors()->Get(op->outputs[0])->quantization();
auto output_quant_params =
subgraph->tensors[op->outputs[0]]->quantization.get();
ASSERT_THAT(*float_output_quant_params->min(), SizeIs(1));
ASSERT_THAT(*float_output_quant_params->max(), SizeIs(1));
ASSERT_THAT(output_quant_params->scale, SizeIs(1));
ASSERT_THAT(output_quant_params->zero_point, SizeIs(1));
ASSERT_THAT(1.0f / 256.0f, Eq(output_quant_params->scale[0]));
ASSERT_THAT(-128, Eq(output_quant_params->zero_point[0]));
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_SOFTMAX));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
}
class QuantizeAvgPoolTest : public QuantizeModelTest {
protected:
QuantizeAvgPoolTest() {
input_model_ =
ReadModel(::mlir::lite::internal::kSingleAvgPoolModelMinMinus5MaxPlus5);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeAvgPoolTest, VerifyAvgPoolQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[0].get();
ASSERT_THAT(op->opcode_index, Eq(0));
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_AVERAGE_POOL_2D));
ASSERT_THAT(op->inputs, SizeIs(1));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(op->inputs[0])->type(),
Eq(TensorType_FLOAT32));
ASSERT_THAT(float_graph->tensors()->Get(op->outputs[0])->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[0]].get()->type,
Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type,
Eq(TensorType_INT8));
auto float_input_quant_params =
float_graph->tensors()->Get(op->inputs[0])->quantization();
auto input_quant_params =
subgraph->tensors[op->inputs[0]]->quantization.get();
VerifyQuantizationScale(*float_input_quant_params, *input_quant_params,
8, false);
auto float_output_quant_params =
float_graph->tensors()->Get(op->outputs[0])->quantization();
auto output_quant_params =
subgraph->tensors[op->outputs[0]]->quantization.get();
ASSERT_THAT(*float_output_quant_params->min(), SizeIs(1));
ASSERT_THAT(*float_output_quant_params->max(), SizeIs(1));
ASSERT_THAT(output_quant_params->scale, SizeIs(1));
EXPECT_THAT(input_quant_params->scale[0], Eq(output_quant_params->scale[0]));
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_AVERAGE_POOL_2D));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
}
class QuantizeMultiInputAddWithReshapeTest : public QuantizeModelTest {
protected:
QuantizeMultiInputAddWithReshapeTest() {
input_model_ = ReadModel(::mlir::lite::internal::kMultiInputAddWithReshape);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeMultiInputAddWithReshapeTest, VerifyReshapeQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[1].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(BuiltinOperator_RESHAPE));
ASSERT_THAT(op->inputs, SizeIs(2));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
auto float_op = float_graph->operators()->Get(1);
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(0))->type(),
Eq(TensorType_FLOAT32));
ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[0]].get()->type,
Eq(TensorType_INT8));
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type,
Eq(TensorType_INT8));
auto float_input_quant_params =
float_graph->tensors()->Get(op->inputs[0])->quantization();
auto input_quant_params =
subgraph->tensors[op->inputs[0]]->quantization.get();
VerifyQuantizationScale(*float_input_quant_params, *input_quant_params,
8, false);
auto float_output_quant_params =
float_graph->tensors()->Get(float_op->outputs()->Get(0))->quantization();
auto output_quant_params =
subgraph->tensors[op->outputs[0]]->quantization.get();
ASSERT_THAT(*float_output_quant_params->min(), SizeIs(1));
ASSERT_THAT(*float_output_quant_params->max(), SizeIs(1));
ASSERT_THAT(output_quant_params->scale, SizeIs(1));
EXPECT_THAT(model_.operator_codes, SizeIs(2));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_ADD));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[1].get()),
Eq(BuiltinOperator_RESHAPE));
ASSERT_THAT(model_.operator_codes[1]->version, Eq(1));
}
TEST_F(QuantizeMultiInputAddWithReshapeTest, VerifyAddQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[0].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(BuiltinOperator_ADD));
ASSERT_THAT(op->inputs, SizeIs(2));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
auto float_op = float_graph->operators()->Get(0);
const int float_input0_idx = float_op->inputs()->Get(0);
const int float_input1_idx = float_op->inputs()->Get(1);
const int float_output_idx = float_op->outputs()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(float_input0_idx)->type(),
Eq(TensorType_FLOAT32));
ASSERT_THAT(float_graph->tensors()->Get(float_input1_idx)->type(),
Eq(TensorType_FLOAT32));
ASSERT_THAT(float_graph->tensors()->Get(float_output_idx)->type(),
Eq(TensorType_FLOAT32));
for (size_t input_idx = 0; input_idx < 2; ++input_idx) {
EXPECT_THAT(subgraph->tensors[op->inputs[input_idx]].get()->type,
Eq(TensorType_INT8));
auto float_input_quant_params =
float_graph->tensors()
->Get(float_op->inputs()->Get(input_idx))
->quantization();
auto input_quant_params =
subgraph->tensors[op->inputs[input_idx]]->quantization.get();
VerifyQuantizationScale(*float_input_quant_params, *input_quant_params,
8, false);
}
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type,
Eq(TensorType_INT8));
auto float_output_quant_params =
float_graph->tensors()->Get(op->outputs[0])->quantization();
auto output_quant_params =
subgraph->tensors[op->outputs[0]]->quantization.get();
ASSERT_THAT(*float_output_quant_params->min(), SizeIs(1));
ASSERT_THAT(*float_output_quant_params->max(), SizeIs(1));
ASSERT_THAT(output_quant_params->scale, SizeIs(1));
EXPECT_THAT(model_.operator_codes, SizeIs(2));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_ADD));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[1].get()),
Eq(BuiltinOperator_RESHAPE));
ASSERT_THAT(model_.operator_codes[1]->version, Eq(1));
}
class QuantizeConstInputTest : public QuantizeModelTest,
public testing::WithParamInterface<TensorType> {
protected:
QuantizeConstInputTest() {
tensor_type_ = GetParam();
input_model_ = ReadModel(::mlir::lite::internal::kConstInputAddModel);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
TensorType tensor_type_;
};
INSTANTIATE_TEST_SUITE_P(QuantizeConstInputTestInst, QuantizeConstInputTest,
testing::ValuesIn({TensorType_INT8}));
TEST_P(QuantizeConstInputTest, VerifyConstOpInput) {
TF_ASSERT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[0].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(BuiltinOperator_ADD));
ASSERT_THAT(op->inputs, SizeIs(2));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(op->inputs[0])->type(),
Eq(TensorType_FLOAT32));
ASSERT_THAT(float_graph->tensors()->Get(op->outputs[0])->type(),
Eq(TensorType_FLOAT32));
for (size_t input_idx = 0; input_idx < 2; ++input_idx) {
EXPECT_THAT(subgraph->tensors[op->inputs[input_idx]].get()->type,
Eq(tensor_type_));
}
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type, Eq(tensor_type_));
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_ADD));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
}
class QuantizeArgMaxTest : public QuantizeModelTest {
protected:
QuantizeArgMaxTest() {
input_model_ = ReadModel(::mlir::lite::internal::kModelWithArgMaxOp);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeArgMaxTest, VerifyArgMax) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[0].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(BuiltinOperator_ARG_MAX));
ASSERT_THAT(op->inputs, SizeIs(2));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
auto float_op = float_graph->operators()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(0))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[0]].get()->type,
Eq(TensorType_INT8));
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(1))->type(),
Eq(subgraph->tensors[op->inputs[1]].get()->type));
ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Eq(subgraph->tensors[op->outputs[0]].get()->type));
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_ARG_MAX));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(2));
}
class QuantizeLSTMTest : public QuantizeModelTest {
protected:
QuantizeLSTMTest() {
input_model_ = ReadModel(::mlir::lite::internal::kLstmCalibrated);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeLSTMTest, VerifyLSTM) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_FLOAT32, TensorType_FLOAT32,
true, TensorType_INT8, output_buffer_));
auto expected_fb_model = ReadModel(::mlir::lite::internal::kLstmQuantized);
auto expected_read_only_model = expected_fb_model->GetModel();
ModelT expected_model;
expected_read_only_model->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
class QuantizeLSTM2Test : public QuantizeModelTest {
protected:
QuantizeLSTM2Test() {
input_model_ = ReadModel(::mlir::lite::internal::kLstmCalibrated2);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeLSTM2Test, VerifyLSTM) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_FLOAT32, TensorType_FLOAT32,
false, TensorType_INT8, output_buffer_));
auto expected_fb_model = ReadModel(::mlir::lite::internal::kLstmQuantized2);
auto expected_read_only_model = expected_fb_model->GetModel();
ModelT expected_model;
expected_read_only_model->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
class QuantizeUnidirectionalSequenceLSTMTest : public QuantizeModelTest {
protected:
QuantizeUnidirectionalSequenceLSTMTest() {
input_model_ = ReadModel(
::mlir::lite::internal::kUnidirectionalSequenceLstmCalibrated);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeUnidirectionalSequenceLSTMTest,
VerifyUnidirectionalSequenceLSTM) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_FLOAT32, TensorType_FLOAT32,
false, TensorType_INT8, output_buffer_));
auto expected_fb_model =
ReadModel(::mlir::lite::internal::kUnidirectionalSequenceLstmQuantized);
auto expected_read_only_model = expected_fb_model->GetModel();
ModelT expected_model;
expected_read_only_model->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
class QuantizeSVDFTest : public QuantizeModelTest {
protected:
QuantizeSVDFTest() {
input_model_ = ReadModel(::mlir::lite::internal::kSvdfCalibrated);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeSVDFTest, VerifySVDF) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
auto expected_fb_model = ReadModel(::mlir::lite::internal::kSvdfQuantized);
auto expected_read_only_model = expected_fb_model->GetModel();
ModelT expected_model;
expected_read_only_model->UnPackTo(&expected_model);
ExpectSameModels(model_, expected_model);
}
class QuantizeFCTest : public QuantizeModelTest,
public testing::WithParamInterface<bool> {
protected:
QuantizeFCTest() {
disable_per_channel_quantization_for_dense_ = GetParam();
input_model_ = ReadModel(::mlir::lite::internal::kModelWithFCOp);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
bool disable_per_channel_quantization_for_dense_;
};
TEST_P(QuantizeFCTest, VerifyFC8x8) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto op = subgraph->operators[0].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(BuiltinOperator_FULLY_CONNECTED));
ASSERT_THAT(op->inputs, SizeIs(3));
ASSERT_THAT(op->outputs, SizeIs(1));
auto float_graph = readonly_model_->subgraphs()->Get(0);
auto float_op = float_graph->operators()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(0))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[0]].get()->type,
Eq(TensorType_INT8));
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(1))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[1]].get()->type,
Eq(TensorType_INT8));
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(2))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[2]].get()->type,
Eq(TensorType_INT32));
ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type,
Eq(TensorType_INT8));
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_FULLY_CONNECTED));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(5));
auto float_input_quant_params =
float_graph->tensors()->Get(op->inputs[0])->quantization();
auto input_quant_params =
subgraph->tensors[op->inputs[0]]->quantization.get();
VerifyQuantizationScale(*float_input_quant_params, *input_quant_params,
8, false);
}
TEST_P(QuantizeFCTest, VerifyFCFor16x8) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT16, output_buffer_));
const std::unique_ptr<tflite::SubGraphT>& subgraph = model_.subgraphs[0];
const tflite::OperatorT* op = subgraph->operators[0].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(BuiltinOperator_FULLY_CONNECTED));
ASSERT_THAT(op->inputs, SizeIs(3));
ASSERT_THAT(op->outputs, SizeIs(1));
const SubGraph* float_graph = readonly_model_->subgraphs()->Get(0);
const Operator* float_op = float_graph->operators()->Get(0);
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(0))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[0]].get()->type,
Eq(TensorType_INT16));
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(1))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[1]].get()->type,
Eq(TensorType_INT8));
ASSERT_THAT(float_graph->tensors()->Get(float_op->inputs()->Get(2))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->inputs[2]].get()->type,
Eq(TensorType_INT32));
ASSERT_THAT(float_graph->tensors()->Get(float_op->outputs()->Get(0))->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(subgraph->tensors[op->outputs[0]].get()->type,
Eq(TensorType_INT16));
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_FULLY_CONNECTED));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(11));
const int32_t target_input_index = op->inputs[0];
const QuantizationParameters* float_input_quant_params =
float_graph->tensors()->Get(target_input_index)->quantization();
const QuantizationParametersT* input_quant_params =
subgraph->tensors[target_input_index]->quantization.get();
VerifyQuantizationScale(*float_input_quant_params, *input_quant_params,
16, true);
}
TEST_P(QuantizeFCTest, VerifyDisablePerChannelQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(
&model_, TensorType_INT8, TensorType_INT8,
false, TensorType_INT8, output_buffer_,
disable_per_channel_quantization_for_dense_));
const auto& subgraph = model_.subgraphs[0];
auto fc_op = subgraph->operators[0].get();
ASSERT_THAT(fc_op->inputs, SizeIs(3));
ASSERT_THAT(fc_op->outputs, SizeIs(1));
const int input_tensor_idx = 0;
const int weights_tensor_idx = 1;
const int bias_tensor_index = 2;
const int output_tensor_idx = 0;
const auto bias_tensor =
subgraph->tensors[fc_op->inputs[bias_tensor_index]].get();
const auto input_tensor =
subgraph->tensors[fc_op->inputs[input_tensor_idx]].get();
const auto weights_tensor =
subgraph->tensors[fc_op->inputs[weights_tensor_idx]].get();
const auto output_tensor =
subgraph->tensors[fc_op->outputs[output_tensor_idx]].get();
EXPECT_THAT(bias_tensor->type, Eq(TensorType_INT32));
EXPECT_THAT(input_tensor->type, Eq(TensorType_INT8));
EXPECT_THAT(weights_tensor->type, Eq(TensorType_INT8));
EXPECT_THAT(output_tensor->type, Eq(TensorType_INT8));
ASSERT_TRUE(weights_tensor->quantization);
ASSERT_TRUE(bias_tensor->quantization);
ASSERT_TRUE(weights_tensor->quantization);
const std::vector<float>& bias_scales = bias_tensor->quantization->scale;
const std::vector<float>& weights_scales =
weights_tensor->quantization->scale;
const std::vector<int64_t>& weights_zero_points =
weights_tensor->quantization->zero_point;
const int out_channel_size = 2;
ASSERT_THAT(bias_scales, SizeIs(disable_per_channel_quantization_for_dense_
? 1
: out_channel_size));
ASSERT_THAT(weights_scales, SizeIs(disable_per_channel_quantization_for_dense_
? 1
: out_channel_size));
ASSERT_THAT(
weights_zero_points,
SizeIs(disable_per_channel_quantization_for_dense_ ? 1
: out_channel_size));
ASSERT_THAT(input_tensor->quantization->scale, SizeIs(1));
ASSERT_THAT(output_tensor->quantization->scale, SizeIs(1));
const float eps = 1e-7;
for (size_t i = 0; i < out_channel_size; i++) {
EXPECT_THAT((disable_per_channel_quantization_for_dense_ ? bias_scales[0]
: bias_scales[i]),
FloatNear(input_tensor->quantization->scale[0] *
(disable_per_channel_quantization_for_dense_
? weights_scales[0]
: weights_scales[i]),
eps));
}
const auto bias_buffer = model_.buffers[bias_tensor->buffer].get();
auto control_size = sizeof(int32_t) * bias_tensor->shape[0];
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto float_op =
readonly_model_->subgraphs()->Get(0)->operators()->Get(0);
const auto original_bias_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(2));
ASSERT_THAT(bias_buffer->data, SizeIs(control_size));
const auto original_bias_buffer =
readonly_model_->buffers()->Get(original_bias_tensor->buffer());
const float* bias_float_buffer =
reinterpret_cast<const float*>(original_bias_buffer->data()->data());
int32_t* bias_values = reinterpret_cast<int32_t*>(bias_buffer->data.data());
for (size_t i = 0; i < out_channel_size; i++) {
const float bias_scale = disable_per_channel_quantization_for_dense_
? bias_scales[0]
: bias_scales[i];
auto dequantized_value = bias_values[i] * bias_scale;
EXPECT_THAT(dequantized_value,
FloatNear(bias_float_buffer[i], bias_scale / 2));
}
const auto weights_buffer = model_.buffers[weights_tensor->buffer].get();
const auto original_weights_tensor =
readonly_model_->subgraphs()->Get(0)->tensors()->Get(
float_op->inputs()->Get(1));
const auto original_weights_buffer =
readonly_model_->buffers()->Get(original_weights_tensor->buffer());
const int8_t* weight_values =
reinterpret_cast<int8_t*>(weights_buffer->data.data());
const float* weights_float_buffer =
reinterpret_cast<const float*>(original_weights_buffer->data()->data());
ASSERT_THAT(sizeof(float) * weights_buffer->data.size(),
Eq(original_weights_buffer->data()->size()));
int num_values_in_channel = weights_buffer->data.size() / out_channel_size;
for (size_t channel_idx = 0; channel_idx < out_channel_size; channel_idx++) {
for (size_t j = 0; j < num_values_in_channel; j++) {
size_t element_idx = channel_idx * num_values_in_channel + j;
auto scale = disable_per_channel_quantization_for_dense_
? weights_scales[0]
: weights_scales[channel_idx];
auto zero_point = disable_per_channel_quantization_for_dense_
? weights_zero_points[0]
: weights_zero_points[channel_idx];
auto dequantized_value = weight_values[element_idx] * scale;
EXPECT_THAT(dequantized_value,
FloatNear(weights_float_buffer[element_idx], scale / 2));
EXPECT_THAT(zero_point, Eq(0));
}
}
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[0].get()),
Eq(BuiltinOperator_FULLY_CONNECTED));
ASSERT_THAT(model_.operator_codes[0]->version, 5);
}
INSTANTIATE_TEST_SUITE_P(QuantizeFCTestInst, QuantizeFCTest, testing::Bool());
class QuantizeCustomOpTest
: public QuantizeModelTest,
public ::testing::WithParamInterface<tflite::TensorType> {
protected:
QuantizeCustomOpTest() {
input_model_ = ReadModel(::mlir::lite::internal::kModelMixed);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_P(QuantizeCustomOpTest, VerifyMixedQuantization) {
TF_ASSERT_OK(QuantizeModelAllOperators(&model_, GetParam(), GetParam(),
true, GetParam(),
output_buffer_));
const auto& subgraph = model_.subgraphs[0];
auto float_graph = readonly_model_->subgraphs()->Get(0);
ASSERT_THAT(*float_graph->operators(), SizeIs(4));
ASSERT_THAT(subgraph->operators, SizeIs(6));
const std::vector<BuiltinOperator> op_codes = {
BuiltinOperator_RESHAPE, BuiltinOperator_DEQUANTIZE,
BuiltinOperator_CUSTOM, BuiltinOperator_CUSTOM,
BuiltinOperator_QUANTIZE, BuiltinOperator_SQUEEZE};
const std::vector<TensorType> op_input_types = {
GetParam(), GetParam(), TensorType_FLOAT32,
TensorType_FLOAT32, TensorType_FLOAT32, GetParam()};
for (int i = 0; i < subgraph->operators.size(); ++i) {
OperatorT* op = subgraph->operators[i].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(op_codes[i]));
ASSERT_THAT(subgraph->tensors[op->inputs[0]]->type, Eq(op_input_types[i]));
}
}
INSTANTIATE_TEST_SUITE_P(QuantizeCustomOpTest, QuantizeCustomOpTest,
::testing::Values(TensorType_INT8));
class QuantizePackTest : public QuantizeModelTest {
protected:
QuantizePackTest() {
input_model_ = ReadModel(::mlir::lite::internal::kModelPack);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizePackTest, VerifyPack) {
TF_ASSERT_OK(QuantizeModel(&model_, output_buffer_));
const auto subgraph = model_.subgraphs[0].get();
EXPECT_THAT(subgraph->inputs, SizeIs(3));
EXPECT_THAT(subgraph->outputs, SizeIs(1));
const auto& op1 = subgraph->operators[1].get();
const auto& op2 = subgraph->operators[2].get();
const auto& op3 = subgraph->operators[3].get();
const auto& op4 = subgraph->operators[4].get();
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op1->opcode_index].get()),
Eq(BuiltinOperator_QUANTIZE));
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op2->opcode_index].get()),
Eq(BuiltinOperator_QUANTIZE));
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op3->opcode_index].get()),
Eq(BuiltinOperator_PACK));
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op4->opcode_index].get()),
Eq(BuiltinOperator_DEQUANTIZE));
const auto& pack_input0 = subgraph->tensors[op3->inputs[0]].get();
const auto& pack_input1 = subgraph->tensors[op3->inputs[1]].get();
const auto& pack_input2 = subgraph->tensors[op3->inputs[2]].get();
const auto& pack_output = subgraph->tensors[op3->outputs[0]].get();
EXPECT_THAT(pack_input0->quantization->scale[0],
FloatEq(pack_input1->quantization->scale[0]));
EXPECT_THAT(pack_input1->quantization->scale[0],
FloatEq(pack_input2->quantization->scale[0]));
EXPECT_THAT(pack_input0->quantization->zero_point[0],
Eq(pack_input1->quantization->zero_point[0]));
EXPECT_THAT(pack_input1->quantization->zero_point[0],
Eq(pack_input2->quantization->zero_point[0]));
EXPECT_THAT(pack_input1->quantization->scale[0],
FloatEq(pack_output->quantization->scale[0]));
EXPECT_THAT(pack_input1->quantization->zero_point[0],
Eq(pack_output->quantization->zero_point[0]));
EXPECT_THAT(pack_output->type, Eq(TensorType_INT8));
EXPECT_THAT(pack_input0->type, Eq(TensorType_INT8));
EXPECT_THAT(pack_input1->type, Eq(TensorType_INT8));
EXPECT_THAT(pack_input2->type, Eq(TensorType_INT8));
}
class QuantizeMinimumMaximumTest
: public QuantizeModelTest,
public testing::WithParamInterface<const char*> {
protected:
QuantizeMinimumMaximumTest() {
input_model_ = ReadModel(GetParam());
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_P(QuantizeMinimumMaximumTest, VerifyMinimumMaximum) {
TF_ASSERT_OK(QuantizeModel(&model_, output_buffer_));
const auto& subgraph = model_.subgraphs[0];
const auto& quant_op = subgraph->operators[0];
const auto& dequant_op = subgraph->operators[subgraph->operators.size() - 1];
const int32_t quant_idx = quant_op->opcode_index;
const int32_t dequant_idx = dequant_op->opcode_index;
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[quant_idx].get()),
Eq(BuiltinOperator_QUANTIZE));
EXPECT_THAT(GetBuiltinCode(model_.operator_codes[dequant_idx].get()),
Eq(BuiltinOperator_DEQUANTIZE));
const auto& op = subgraph->operators[1].get();
auto op_builtin_code =
GetBuiltinCode(model_.operator_codes[op->opcode_index].get());
ASSERT_TRUE(op_builtin_code == tflite::BuiltinOperator_MINIMUM ||
op_builtin_code == tflite::BuiltinOperator_MAXIMUM);
ASSERT_THAT(op->inputs, SizeIs(2));
ASSERT_THAT(op->outputs, SizeIs(1));
auto output = subgraph->tensors[op->outputs[0]].get();
auto input1 = subgraph->tensors[op->inputs[0]].get();
auto input2 = subgraph->tensors[op->inputs[1]].get();
EXPECT_THAT(output->type, Eq(TensorType_INT8));
EXPECT_THAT(input1->type, Eq(TensorType_INT8));
EXPECT_THAT(input2->type, Eq(TensorType_INT8));
EXPECT_THAT(input1->quantization->scale, Eq(input2->quantization->scale));
EXPECT_THAT(input1->quantization->zero_point,
Eq(input2->quantization->zero_point));
EXPECT_THAT(output->quantization->scale, Eq(input1->quantization->scale));
EXPECT_THAT(output->quantization->zero_point,
Eq(input1->quantization->zero_point));
EXPECT_THAT(output->quantization->scale, Eq(input2->quantization->scale));
EXPECT_THAT(output->quantization->zero_point,
Eq(input2->quantization->zero_point));
}
INSTANTIATE_TEST_SUITE_P(
MinimumMaximumTestInst, QuantizeMinimumMaximumTest,
testing::ValuesIn({::mlir::lite::internal::kModelWithMinimumOp,
::mlir::lite::internal::kModelWithMaximumOp}));
class QuantizeUnpackTest : public QuantizeModelTest {
protected:
QuantizeUnpackTest() {
input_model_ = ReadModel(::mlir::lite::internal::kModelWithUnpack);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeUnpackTest, VerifyUnpack) {
TF_ASSERT_OK(QuantizeModel(&model_, output_buffer_));
const auto subgraph = model_.subgraphs[0].get();
auto op = subgraph->operators[1].get();
auto float_graph = readonly_model_->subgraphs()->Get(0);
ASSERT_THAT(GetBuiltinCode(model_.operator_codes[op->opcode_index].get()),
Eq(BuiltinOperator_UNPACK));
auto unpack_input = subgraph->tensors[op->inputs[0]].get();
auto unpack_output_0 = subgraph->tensors[op->outputs[0]].get();
auto unpack_output_1 = subgraph->tensors[op->outputs[1]].get();
ASSERT_THAT(float_graph->tensors()->Get(op->inputs[0])->type(),
Eq(TensorType_FLOAT32));
EXPECT_THAT(unpack_input->type, Eq(TensorType_INT8));
EXPECT_THAT(subgraph->inputs, SizeIs(1));
EXPECT_THAT(subgraph->outputs, SizeIs(2));
EXPECT_THAT(unpack_input->quantization->scale[0],
FloatEq(unpack_output_0->quantization->scale[0]));
EXPECT_THAT(unpack_input->quantization->scale[0],
FloatEq(unpack_output_1->quantization->scale[0]));
EXPECT_THAT(unpack_input->quantization->zero_point[0],
Eq(unpack_output_0->quantization->zero_point[0]));
EXPECT_THAT(unpack_input->quantization->zero_point[0],
Eq(unpack_output_1->quantization->zero_point[0]));
}
class QuantizeBroadcastToModelTest
: public QuantizeModelTest,
public testing::WithParamInterface<TensorType> {
protected:
QuantizeBroadcastToModelTest() {
tensor_type_ = GetParam();
input_model_ = ReadModel(::mlir::lite::internal::kModelWithBroadcastToOp);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
TensorType tensor_type_;
};
INSTANTIATE_TEST_SUITE_P(QuantizeBroadcastToModelTestInst,
QuantizeBroadcastToModelTest,
testing::ValuesIn({TensorType_INT8}));
TEST_P(QuantizeBroadcastToModelTest, VerifyBroadcastToQuantization) {
TF_EXPECT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
const int32_t subgraph_idx = 0;
const auto& subgraph = model_.subgraphs[subgraph_idx];
const auto& readonly_subgraph =
readonly_model_->subgraphs()->Get(subgraph_idx);
EXPECT_THAT(*readonly_subgraph->operators(), SizeIs(1));
EXPECT_THAT(subgraph->operators, SizeIs(1));
const auto& broadcast_to = subgraph->operators[0];
EXPECT_THAT(model_.operator_codes[broadcast_to->opcode_index]->builtin_code,
Eq(BuiltinOperator_BROADCAST_TO));
EXPECT_THAT(subgraph->tensors, SizeIs(3));
EXPECT_THAT(subgraph->tensors[0]->type, Eq(tensor_type_));
EXPECT_THAT(subgraph->tensors[0]->name, Eq("input_1"));
EXPECT_THAT(subgraph->tensors[0]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[0]->quantization->zero_point, SizeIs(1));
EXPECT_THAT(subgraph->tensors[2]->type, Eq(tensor_type_));
EXPECT_THAT(subgraph->tensors[2]->name, Eq("Identity"));
EXPECT_THAT(subgraph->tensors[2]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[2]->quantization->zero_point, SizeIs(1));
EXPECT_THAT(subgraph->tensors[1]->type, Eq(TensorType_INT32));
EXPECT_THAT(subgraph->tensors[1]->quantization->scale, IsEmpty());
EXPECT_THAT(subgraph->tensors[1]->quantization->zero_point, IsEmpty());
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(model_.operator_codes[0]->builtin_code,
Eq(BuiltinOperator_BROADCAST_TO));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(3));
}
class QuantizeGatherNDModelTest
: public QuantizeModelTest,
public testing::WithParamInterface<TensorType> {
protected:
QuantizeGatherNDModelTest() {
tensor_type_ = GetParam();
input_model_ = ReadModel(::mlir::lite::internal::kModelWithGatherNDOp);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
TensorType tensor_type_;
};
INSTANTIATE_TEST_SUITE_P(QuantizeGatherNDModelTestInst,
QuantizeGatherNDModelTest,
testing::ValuesIn({TensorType_INT8}));
TEST_P(QuantizeGatherNDModelTest, QuantizeGatherND) {
TF_EXPECT_OK(QuantizeModelAllOperators(&model_, tensor_type_, tensor_type_,
false, tensor_type_,
output_buffer_));
const int32_t subgraph_idx = 0;
const auto& subgraph = model_.subgraphs[subgraph_idx];
const auto& readonly_subgraph =
readonly_model_->subgraphs()->Get(subgraph_idx);
EXPECT_THAT(*readonly_subgraph->operators(), SizeIs(1));
EXPECT_THAT(subgraph->operators, SizeIs(1));
const auto& gather_nd = subgraph->operators[0];
EXPECT_THAT(model_.operator_codes[gather_nd->opcode_index]->builtin_code,
Eq(BuiltinOperator_GATHER_ND));
EXPECT_THAT(subgraph->tensors, SizeIs(3));
EXPECT_THAT(subgraph->tensors[0]->type, Eq(tensor_type_));
EXPECT_THAT(subgraph->tensors[0]->name, Eq("input"));
EXPECT_THAT(subgraph->tensors[0]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[0]->quantization->zero_point, SizeIs(1));
EXPECT_THAT(subgraph->tensors[2]->type, Eq(tensor_type_));
EXPECT_THAT(subgraph->tensors[2]->name, Eq("output"));
EXPECT_THAT(subgraph->tensors[2]->quantization->scale, SizeIs(1));
EXPECT_THAT(subgraph->tensors[2]->quantization->zero_point, SizeIs(1));
EXPECT_THAT(subgraph->tensors[1]->type, Eq(TensorType_INT32));
EXPECT_THAT(subgraph->tensors[1]->quantization->scale, IsEmpty());
EXPECT_THAT(subgraph->tensors[1]->quantization->zero_point, IsEmpty());
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(model_.operator_codes[0]->builtin_code,
Eq(BuiltinOperator_GATHER_ND));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(1));
}
class QuantizeWhereModelTest : public QuantizeModelTest {
protected:
QuantizeWhereModelTest() {
input_model_ = ReadModel(::mlir::lite::internal::kModelWithWhereOp);
readonly_model_ = input_model_->GetModel();
model_ = UnPackFlatBufferModel(*readonly_model_);
}
};
TEST_F(QuantizeWhereModelTest, QuantizeWhere) {
TF_EXPECT_OK(QuantizeModel(&model_, TensorType_BOOL, TensorType_INT64,
output_buffer_));
const int32_t subgraph_idx = 0;
const auto& subgraph = model_.subgraphs[subgraph_idx];
const auto& readonly_subgraph =
readonly_model_->subgraphs()->Get(subgraph_idx);
EXPECT_THAT(*readonly_subgraph->operators(), SizeIs(1));
EXPECT_THAT(subgraph->operators, SizeIs(1));
const auto& where = subgraph->operators[0];
EXPECT_THAT(model_.operator_codes[where->opcode_index]->builtin_code,
Eq(BuiltinOperator_WHERE));
EXPECT_THAT(subgraph->tensors, SizeIs(2));
EXPECT_THAT(subgraph->tensors[0]->type, Eq(TensorType_BOOL));
EXPECT_THAT(subgraph->tensors[0]->name, Eq("input"));
EXPECT_THAT(subgraph->tensors[0]->quantization->scale, IsEmpty());
EXPECT_THAT(subgraph->tensors[0]->quantization->zero_point, IsEmpty());
EXPECT_THAT(subgraph->tensors[1]->type, Eq(TensorType_INT64));
EXPECT_THAT(subgraph->tensors[1]->name, Eq("indices"));
EXPECT_THAT(subgraph->tensors[1]->quantization->scale, IsEmpty());
EXPECT_THAT(subgraph->tensors[1]->quantization->zero_point, IsEmpty());
EXPECT_THAT(model_.operator_codes, SizeIs(1));
EXPECT_THAT(model_.operator_codes[0]->builtin_code,
Eq(BuiltinOperator_WHERE));
ASSERT_THAT(model_.operator_codes[0]->version, Eq(1));
}
}
}
}
int main(int argc, char** argv) {
tensorflow::string model_file;
const std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("test_model_file", &model_file,
"Path to test tflite model file."),
};
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
std::cerr << "Required test_model_file\n";
std::abort();
}
g_test_model_dir =
new tensorflow::string(tensorflow::io::Dirname(model_file));
::tensorflow::port::InitMain(argv[0], &argc, &argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d52c20ab-b940-4de5-843a-acbaa8160622 | cpp | tensorflow/tensorflow | model_utils | tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.cc | tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils_test.cc | #include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
namespace mlir {
namespace lite {
namespace toco_legacy {
using std::string;
using tflite::BuiltinOperator;
using tflite::BuiltinOperator_DEQUANTIZE;
using tflite::ModelT;
using tflite::OperatorCodeT;
using tflite::OperatorT;
using tflite::TensorT;
using tflite::TensorType;
int32_t GetOrInsertOpCodeIndex(ModelT* model, const BuiltinOperator& op_code,
int32_t version) {
for (size_t i = 0; i < model->operator_codes.size(); ++i) {
if (tflite::GetBuiltinCode(model->operator_codes[i].get()) == op_code) {
return i;
}
}
model->operator_codes.push_back(std::make_unique<OperatorCodeT>());
int op_code_idx = model->operator_codes.size() - 1;
model->operator_codes[op_code_idx]->builtin_code = op_code;
model->operator_codes[op_code_idx]->deprecated_builtin_code =
tflite::ConvertBuiltinCodeToDeprecatedBuiltinCode(op_code);
model->operator_codes[op_code_idx]->version = version;
return op_code_idx;
}
void MakeDequantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output) {
OperatorT* op_raw = new OperatorT;
op_raw->opcode_index =
GetOrInsertOpCodeIndex(model, BuiltinOperator_DEQUANTIZE, 2);
op_raw->inputs = {input};
op_raw->outputs = {output};
op->reset(op_raw);
}
void MakeTensor(const string& name, const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, std::unique_ptr<TensorT>* tensor) {
TensorT* tensor_raw = new TensorT;
tensor_raw->name = name;
tensor_raw->shape = shape;
if (!shape_signature.empty()) {
tensor_raw->shape_signature = shape_signature;
}
tensor_raw->type = type;
tensor->reset(tensor_raw);
}
bool HasMinMax(const TensorT* tensor) {
return tensor->quantization && !tensor->quantization->min.empty() &&
!tensor->quantization->max.empty();
}
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
namespace mlir {
namespace lite {
namespace toco_legacy {
namespace {
using std::string;
TEST(ModelUtilsTest, HasMinMax) {
tflite::TensorT tensor;
tensor.quantization = std::make_unique<tflite::QuantizationParametersT>();
tensor.quantization->min.push_back(0.5);
EXPECT_FALSE(mlir::lite::toco_legacy::HasMinMax(&tensor));
tensor.quantization->max.push_back(1.5);
EXPECT_TRUE(mlir::lite::toco_legacy::HasMinMax(&tensor));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/model_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8aa133e2-df08-430d-ba92-71ab1f3fd17e | cpp | tensorflow/tensorflow | logging_op_resolver | tensorflow/lite/tools/optimize/calibration/logging_op_resolver.cc | tensorflow/lite/tools/optimize/calibration/logging_op_resolver_test.cc | #include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace optimize {
namespace calibration {
LoggingOpResolver::LoggingOpResolver(
const BuiltinOpsSet& builtin_ops_to_replace,
const CustomOpsSet& custom_ops_to_replace, const OpResolver& base_resolver,
KernelEvalFuncPtr logging_eval_fn, ErrorReporter* error_reporter) {
std::vector<std::string> unresolved_builtin_ops;
std::vector<std::string> unresolved_custom_ops;
for (const auto& op_and_version : builtin_ops_to_replace) {
const TfLiteRegistration* base_registration =
base_resolver.FindOp(op_and_version.first, op_and_version.second);
if (!base_registration) {
unresolved_builtin_ops.push_back(
EnumNameBuiltinOperator(op_and_version.first));
continue;
}
BuiltinOperatorKey key = op_and_version;
builtin_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registration =
std::make_unique<TfLiteRegistration>(*base_registration);
logging_registration->invoke = logging_eval_fn;
builtin_op_registration_map_[key] = std::move(logging_registration);
}
for (const auto& op_and_version : custom_ops_to_replace) {
const TfLiteRegistration* base_registration = base_resolver.FindOp(
op_and_version.first.c_str(), op_and_version.second);
if (!base_registration) {
if (!IsFlexOp(op_and_version.first.c_str()))
unresolved_custom_ops.push_back(op_and_version.first.c_str());
continue;
}
CustomOperatorKey key = op_and_version;
custom_op_evalfn_map_[key] = base_registration->invoke;
auto logging_registration =
std::make_unique<TfLiteRegistration>(*base_registration);
logging_registration->invoke = logging_eval_fn;
custom_op_registration_map_[key] = std::move(logging_registration);
}
if (!unresolved_builtin_ops.empty() || !unresolved_custom_ops.empty()) {
if (!error_reporter) return;
std::string error_message =
"Failed to initialize op resolver for calibration:";
if (!unresolved_builtin_ops.empty())
absl::StrAppend(&error_message, "\nThere are unresolved builtin ops: [",
absl::StrJoin(unresolved_builtin_ops, ", "), "]");
if (!unresolved_custom_ops.empty()) {
absl::StrAppend(&error_message, "\nThere are unresolved custom ops: [",
absl::StrJoin(unresolved_custom_ops, ", "), "]");
}
TF_LITE_REPORT_ERROR(error_reporter, error_message.c_str());
}
}
const TfLiteRegistration* LoggingOpResolver::FindOp(BuiltinOperator op,
int version) const {
BuiltinOperatorKey key = {op, version};
if (builtin_op_registration_map_.find(key) !=
builtin_op_registration_map_.end()) {
return builtin_op_registration_map_.at(key).get();
}
return nullptr;
}
KernelEvalFuncPtr LoggingOpResolver::GetWrappedKernelInvoke(BuiltinOperator op,
int version) const {
return builtin_op_evalfn_map_.at({op, version});
}
const TfLiteRegistration* LoggingOpResolver::FindOp(const char* op,
int version) const {
CustomOperatorKey key = {op, version};
if (custom_op_registration_map_.find(key) !=
custom_op_registration_map_.end()) {
return custom_op_registration_map_.at(key).get();
}
return nullptr;
}
KernelEvalFuncPtr LoggingOpResolver::GetWrappedKernelInvoke(const char* op,
int version) const {
return custom_op_evalfn_map_.at({op, version});
}
}
}
} | #include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus ConvEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus CustomPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus CustomEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus WrappingInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TEST(LoggingOpResolverTest, KernelInvokesAreReplaced) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CONV_2D);
EXPECT_TRUE(reg->prepare == ConvPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
reg = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_ADD);
EXPECT_TRUE(reg->prepare == AddPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
}
TEST(LoggingOpResolverTest, OriginalKernelInvokesAreRetained) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto kernel_invoke =
resolver.GetWrappedKernelInvoke(BuiltinOperator_CONV_2D, 1);
EXPECT_TRUE(kernel_invoke == ConvEval);
kernel_invoke = resolver.GetWrappedKernelInvoke(BuiltinOperator_ADD, 1);
EXPECT_TRUE(kernel_invoke == AddEval);
}
TEST(LoggingOpResolverTest, OnlyOpsInReplacementSetAreReplaces) {
MutableOpResolver base_resolver;
TfLiteRegistration conv_registration = {};
conv_registration.prepare = ConvPrepare;
conv_registration.invoke = ConvEval;
base_resolver.AddBuiltin(BuiltinOperator_CONV_2D, &conv_registration);
TfLiteRegistration add_registration = {};
add_registration.prepare = AddPrepare;
add_registration.invoke = AddEval;
base_resolver.AddBuiltin(BuiltinOperator_ADD, &add_registration);
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CONV_2D);
EXPECT_TRUE(reg->prepare == ConvPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
reg = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(nullptr, reg);
}
TEST(LoggingOpResolverTest, CustomOps) {
MutableOpResolver base_resolver;
TfLiteRegistration custom_registration = {};
custom_registration.prepare = CustomPrepare;
custom_registration.invoke = CustomEval;
std::string custom_op_name = "custom";
base_resolver.AddCustom(custom_op_name.c_str(), &custom_registration);
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver resolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(custom_op_name.c_str(), 1);
EXPECT_EQ(reg->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_EQ(reg->custom_name, custom_op_name.c_str());
EXPECT_TRUE(reg->prepare == CustomPrepare);
EXPECT_TRUE(reg->invoke == WrappingInvoke);
}
TEST(LoggingOpResolverTest, UnresolvedCustomOps) {
MutableOpResolver base_resolver;
std::string custom_op_name = "unresolved_custom_op";
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
}
TEST(LoggingOpResolverTest, UnresolvedBuiltinOps) {
MutableOpResolver base_resolver;
BuiltinOpsSet ops_to_replace = {
{BuiltinOperator_CONV_2D, 1},
{BuiltinOperator_ADD, 1},
};
LoggingOpResolver resolver(ops_to_replace, CustomOpsSet(), base_resolver,
WrappingInvoke, nullptr);
}
TEST(LoggingOpResolverTest, FlexOps) {
MutableOpResolver base_resolver;
std::string custom_op_name = "FlexAdd";
CustomOpsSet ops_to_replace = {
{custom_op_name, 1},
};
LoggingOpResolver resolver(BuiltinOpsSet(), ops_to_replace, base_resolver,
WrappingInvoke, nullptr);
auto reg = resolver.FindOp(custom_op_name.c_str(), 1);
EXPECT_TRUE(!reg);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/logging_op_resolver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/logging_op_resolver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5049f30e-5fab-4599-b26d-ef55273675f2 | cpp | tensorflow/tensorflow | calibrator | tensorflow/lite/tools/optimize/calibration/calibrator.cc | tensorflow/lite/tools/optimize/calibration/calibrator_test.cc | #include "tensorflow/lite/tools/optimize/calibration/calibrator.h"
#include <fstream>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/allocation.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/lstm.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_logger.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
#include "tensorflow/lite/tools/optimize/calibration/custom_logging_ops/lstm.h"
#include "tensorflow/lite/tools/optimize/calibration/logging_op.h"
#include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
class Calibrator {
public:
Calibrator(const std::unordered_map<const TfLiteNode*, OperatorInfo>&
node_ptr_opinfo_map,
std::unique_ptr<LoggingOpResolver> logging_op_resolver,
ErrorReporter* error_reporter)
: node_ptr_opinfo_map_(node_ptr_opinfo_map),
logging_op_resolver_(std::move(logging_op_resolver)),
error_reporter_(error_reporter) {
logger_ = std::make_unique<Logger>();
}
KernelEvalFuncPtr GetKernelInvoke(const TfLiteNode* node) const;
Logger* GetLogger() const { return logger_.get(); }
ErrorReporter* GetErrorReporter() const { return error_reporter_; }
const OperatorInfo& GetOpInfo(const TfLiteNode* node) const {
return node_ptr_opinfo_map_.at(node);
}
std::vector<const TfLiteNode*> GetNodesUnderCalibration() {
std::vector<const TfLiteNode*> nodes;
nodes.reserve(node_ptr_opinfo_map_.size());
for (const auto& entry : node_ptr_opinfo_map_) {
nodes.push_back(entry.first);
}
return nodes;
}
private:
std::unordered_map<const TfLiteNode*, OperatorInfo> node_ptr_opinfo_map_;
std::unique_ptr<LoggingOpResolver> logging_op_resolver_;
const std::unordered_map<int, OperatorInfo> index_opinfo_;
std::unique_ptr<Logger> logger_;
ErrorReporter* error_reporter_;
};
KernelEvalFuncPtr Calibrator::GetKernelInvoke(const TfLiteNode* node) const {
auto op_info = node_ptr_opinfo_map_.at(node);
if (op_info.is_custom_op) {
return logging_op_resolver_->GetWrappedKernelInvoke(op_info.name.c_str(),
op_info.version);
}
return logging_op_resolver_->GetWrappedKernelInvoke(op_info.builtin_op_code,
op_info.version);
}
class GlobalCalibratorRegistry {
public:
Calibrator* GetCalibrator(const TfLiteNode* node) const {
if (node_to_calibrator_.find(node) == node_to_calibrator_.cend()) {
return nullptr;
}
return node_to_calibrator_.at(node);
}
void RemoveCalibrator(const TfLiteContext* context) {
Calibrator* calibrator = calibrator_registry_.at(context).get();
auto nodes = calibrator->GetNodesUnderCalibration();
for (auto node : nodes) {
node_to_calibrator_.erase(node);
}
calibrator_registry_.erase(context);
}
TfLiteStatus CreateCalibrator(
const TfLiteContext* context,
const std::unordered_map<const TfLiteNode*, OperatorInfo>& node_to_opinfo,
std::unique_ptr<LoggingOpResolver> logging_op_resolver,
Calibrator** calibrator_ptr, ErrorReporter* reporter) {
if (calibrator_registry_.find(context) != calibrator_registry_.cend()) {
reporter->Report(
"Failed to create calibrator, context already registered.");
return kTfLiteError;
}
auto calibrator = std::make_unique<Calibrator>(
node_to_opinfo, std::move(logging_op_resolver), reporter);
calibrator_registry_[context] = std::move(calibrator);
*calibrator_ptr = calibrator_registry_.at(context).get();
for (const auto& entry : node_to_opinfo) {
node_to_calibrator_[entry.first] = *calibrator_ptr;
}
return kTfLiteOk;
}
private:
absl::flat_hash_map<const TfLiteContext*, std::unique_ptr<Calibrator>>
calibrator_registry_;
absl::flat_hash_map<const TfLiteNode*, Calibrator*> node_to_calibrator_;
};
GlobalCalibratorRegistry* GetCalibratorRegistry() {
static GlobalCalibratorRegistry* registry = new GlobalCalibratorRegistry();
return registry;
}
logging_kernel_func_ptr GetLoggingEvalFunc(TfLiteContext* context,
TfLiteNode* node,
int builtin_op_code) {
switch (builtin_op_code) {
case BuiltinOperator_LSTM: {
if (node->intermediates->size == 12) {
return tflite::optimize::calibration::custom::lstm_logging_kernel;
}
return tflite::optimize::calibration::builtin::lstm_logging_kernel;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
return tflite::optimize::calibration::builtin::
unidirectional_sequence_lstm_logging_kernel;
default:
return nullptr;
}
}
TfLiteStatus LoggingEval(TfLiteContext* context, TfLiteNode* node) {
Calibrator* calibrator = GetCalibratorRegistry()->GetCalibrator(node);
if (!calibrator) {
TF_LITE_KERNEL_LOG(context, "No calibrator found for context.");
return kTfLiteError;
}
auto kernel_invoke = calibrator->GetKernelInvoke(node);
auto logger = calibrator->GetLogger();
auto op_info = calibrator->GetOpInfo(node);
auto error_reporter = calibrator->GetErrorReporter();
for (int i : op_info.loggable_inputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
auto builtin_op_code = calibrator->GetOpInfo(node).builtin_op_code;
auto kernel_invoke_intermediate =
GetLoggingEvalFunc(context, node, builtin_op_code);
if (kernel_invoke_intermediate == nullptr) {
TF_LITE_ENSURE_STATUS(kernel_invoke(context, node));
} else {
TF_LITE_ENSURE_STATUS(
kernel_invoke_intermediate(context, op_info.subgraph_index, node,
calibrator->GetLogger(), error_reporter));
}
for (int i : op_info.loggable_inputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
for (int i : op_info.loggable_outputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
return kTfLiteOk;
}
std::vector<int> GetLoggableTensorIndices(
const std::vector<int>& tensor_indices,
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors,
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* tensor_buffers) {
std::vector<int> loggable;
for (auto tensor_index : tensor_indices) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
}
auto tensor = tensors->Get(tensor_index);
auto buffer_index = tensor->buffer();
const bool has_no_buffer =
(tensor_buffers->Get(buffer_index) == nullptr) ||
(tensor_buffers->Get(buffer_index)->data() == nullptr) ||
(tensor_buffers->Get(buffer_index)->data()->size() == 0);
if (has_no_buffer && tensor->type() == tflite::TensorType_FLOAT32) {
loggable.push_back(tensor_index);
}
}
return loggable;
}
TfLiteStatus GetNodeOpInfoMapAndContext(
const absl::flat_hash_map<std::tuple<int, int>, OperatorInfo>&
node_to_opinfo,
tflite::Interpreter* const interpreter,
std::unordered_map<const TfLiteNode*, OperatorInfo>* node_ptr_opinfo_map,
TfLiteContext** context) {
*context = interpreter->primary_subgraph().context();
TF_LITE_ENSURE(*context,
interpreter->execution_plan().size() <= node_to_opinfo.size());
for (const auto& entry : node_to_opinfo) {
auto op_info = entry.second;
int subgraph_index, op_index;
std::tie(subgraph_index, op_index) = entry.first;
const auto* node_and_reg =
interpreter->node_and_registration(subgraph_index, op_index);
op_info.registration = &node_and_reg->second;
node_ptr_opinfo_map->insert({&node_and_reg->first, op_info});
}
return kTfLiteOk;
}
string GetOpName(const tflite::OperatorCode& opcode) {
if (opcode.custom_code() != nullptr) {
return opcode.custom_code()->str();
}
return tflite::EnumNamesBuiltinOperator()[GetBuiltinCode(&opcode)];
}
class Reader : public CalibrationReader {
public:
Reader(const TfLiteContext* context, const Logger* logger)
: CalibrationReader(logger), context_(context) {}
~Reader() override { GetCalibratorRegistry()->RemoveCalibrator(context_); }
private:
const TfLiteContext* context_;
};
bool HasInputs(BuiltinOperator code) {
switch (code) {
case BuiltinOperator_CALL_ONCE:
case BuiltinOperator_VAR_HANDLE:
case BuiltinOperator_CUSTOM:
return false;
default:
return true;
}
}
bool HasOutputs(BuiltinOperator code) {
switch (code) {
case BuiltinOperator_ASSIGN_VARIABLE:
case BuiltinOperator_CALL_ONCE:
case BuiltinOperator_CUSTOM:
return false;
default:
return true;
}
}
}
TfLiteStatus BuildLoggingInterpreter(
const FlatBufferModel& model, const OpResolver& op_resolver,
std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader) {
return BuildLoggingInterpreter(model.GetModel(), model.error_reporter(),
op_resolver, interpreter, calibration_reader,
model.allocation());
}
TfLiteStatus BuildLoggingInterpreter(
const tflite::Model* tflite_model, ErrorReporter* error_reporter,
const OpResolver& op_resolver, std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader,
const Allocation* allocation) {
if (error_reporter == nullptr) {
error_reporter = DefaultErrorReporter();
}
auto subgraphs = tflite_model->subgraphs();
auto tensor_buffers = tflite_model->buffers();
absl::flat_hash_map<std::tuple<int, int>, OperatorInfo> node_to_opinfo;
BuiltinOpsSet builtin_op_and_versions;
CustomOpsSet custom_op_and_versions;
for (size_t subgraph_index = 0; subgraph_index < subgraphs->size();
subgraph_index++) {
auto subgraph = subgraphs->Get(subgraph_index);
auto operator_codes = tflite_model->operator_codes();
auto operators = subgraph->operators();
auto tensors = subgraph->tensors();
if (!operators) {
continue;
}
for (size_t i = 0; i < operators->size(); i++) {
OperatorInfo op_info;
op_info.subgraph_index = subgraph_index;
op_info.node_index = i;
auto op = operators->Get(i);
auto operator_code = operator_codes->Get(op->opcode_index());
op_info.builtin_op_code = GetBuiltinCode(operator_code);
op_info.name = GetOpName(*operator_code);
op_info.is_custom_op = operator_code->custom_code() != nullptr;
op_info.version = operator_code->version();
auto op_inputs = op->inputs();
auto op_outputs = op->outputs();
if (op_inputs) {
op_info.inputs = std::vector<int>(op_inputs->begin(), op_inputs->end());
} else if (HasInputs(op_info.builtin_op_code)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Op %s missing inputs",
op_info.name.c_str());
}
if (op_outputs) {
op_info.outputs =
std::vector<int>(op_outputs->begin(), op_outputs->end());
} else if (HasOutputs(op_info.builtin_op_code)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Op %s missing outputs",
op_info.name.c_str());
}
op_info.loggable_inputs =
GetLoggableTensorIndices(op_info.inputs, tensors, tensor_buffers);
op_info.loggable_outputs =
GetLoggableTensorIndices(op_info.outputs, tensors, tensor_buffers);
if (op_info.is_custom_op) {
op_info.registration =
op_resolver.FindOp(op_info.name.c_str(), operator_code->version());
custom_op_and_versions.insert(
{op_info.name.c_str(), operator_code->version()});
} else {
op_info.registration = op_resolver.FindOp(GetBuiltinCode(operator_code),
operator_code->version());
builtin_op_and_versions.insert(
{op_info.builtin_op_code, operator_code->version()});
}
std::tuple<int, int> key{subgraph_index, i};
node_to_opinfo[key] = op_info;
}
}
auto logging_op_resolver = std::make_unique<LoggingOpResolver>(
builtin_op_and_versions, custom_op_and_versions, op_resolver, LoggingEval,
error_reporter);
tflite::InterpreterBuilder(tflite_model, *logging_op_resolver, error_reporter,
nullptr,
allocation)(interpreter);
if (!(*interpreter)) {
error_reporter->Report("Failed to construct interpreter");
return kTfLiteError;
}
std::unordered_map<const TfLiteNode*, OperatorInfo> node_ptr_opinfo_map;
TfLiteContext* context = nullptr;
TF_LITE_ENSURE_STATUS(GetNodeOpInfoMapAndContext(
node_to_opinfo, interpreter->get(), &node_ptr_opinfo_map, &context));
Calibrator* calibrator = nullptr;
TF_LITE_ENSURE_STATUS(GetCalibratorRegistry()->CreateCalibrator(
context, node_ptr_opinfo_map, std::move(logging_op_resolver), &calibrator,
error_reporter));
*calibration_reader = std::unique_ptr<CalibrationReader>(
new Reader(context, calibrator->GetLogger()));
return kTfLiteOk;
}
}
}
} | #include "tensorflow/lite/tools/optimize/calibration/calibrator.h"
#include <cstring>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
namespace {
tensorflow::string* g_test_model_dir = nullptr;
}
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
std::unique_ptr<FlatBufferModel> ReadModel(const string& model_name) {
auto model_path = tensorflow::io::JoinPath(*g_test_model_dir, model_name);
return FlatBufferModel::BuildFromFile(model_path.c_str());
}
TEST(CalibratorTest, CalibrationStatsAreCollected) {
auto model = ReadModel("multi_add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
std::vector<float> ones(tensor_size, 1.0f);
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
TfLiteTensor* tensor = interpreter->tensor(interpreter->outputs()[0]);
for (size_t i = 0; i < tensor_size; i++) {
EXPECT_NEAR(tensor->data.f[i], 6.0f, eps);
}
tensor = interpreter->tensor(interpreter->outputs()[1]);
for (size_t i = 0; i < tensor_size; i++) {
EXPECT_NEAR(tensor->data.f[i], 9.0f, eps);
}
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
for (int tensor_idx = 0; tensor_idx < 4; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min, tensor_idx + 1, eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max, tensor_idx + 1, eps);
}
EXPECT_NEAR(stats.find({0, 4})->second.min, 5, eps);
EXPECT_NEAR(stats.find({0, 4})->second.max, 5, eps);
EXPECT_NEAR(stats.find({0, 5})->second.min, 6, eps);
EXPECT_NEAR(stats.find({0, 5})->second.max, 6, eps);
EXPECT_NEAR(stats.find({0, 6})->second.min, 9, eps);
EXPECT_NEAR(stats.find({0, 6})->second.max, 9, eps);
}
TEST(CalibratorTest, MultipleInvokes) {
auto model = ReadModel("multi_add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
const float expected_values[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min,
expected_values[tensor_idx], eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max,
expected_values[tensor_idx], eps);
}
TfLiteTensor* input0 = interpreter->tensor(0);
input0->data.f[0] = 1.5f;
input0->data.f[1] = 0.5f;
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
EXPECT_NEAR(stats.find({0, 0})->second.min, 0.5f, eps);
EXPECT_NEAR(stats.find({0, 0})->second.max, 1.5f, eps);
for (int tensor_idx = 1; tensor_idx < 5; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min,
expected_values[tensor_idx], eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max,
expected_values[tensor_idx], eps);
}
EXPECT_NEAR(stats.find({0, 5})->second.min, 5.5f, eps);
EXPECT_NEAR(stats.find({0, 5})->second.max, 6.5f, eps);
EXPECT_NEAR(stats.find({0, 6})->second.min, 9.0f, eps);
EXPECT_NEAR(stats.find({0, 6})->second.max, 9.0f, eps);
}
TEST(CalibratorTest, UpdateMinMax) {
auto flatbuffer_model = ReadModel("multi_add.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
auto input_0_quant_params =
std::make_unique<tflite::QuantizationParametersT>();
input_0_quant_params->min.push_back(0.5);
input_0_quant_params->max.push_back(1.5);
model.subgraphs[0]->tensors[0]->quantization =
std::move(input_0_quant_params);
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
const float expected_min[7] = {
0.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
const float expected_max[7] = {
1.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, true);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_min[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_max[tensor_idx], eps);
}
const float expected_value[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, false);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_value[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_value[tensor_idx], eps);
}
}
TEST(CalibratorTest, HandleNanValues) {
auto flatbuffer_model = ReadModel("multi_add.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
if (j % 2 == 0) {
tensor->data.f[j] = NAN;
} else {
tensor->data.f[j] = i + 1;
}
}
}
auto input_0_quant_params =
std::make_unique<tflite::QuantizationParametersT>();
input_0_quant_params->min.push_back(0.5);
input_0_quant_params->max.push_back(1.5);
model.subgraphs[0]->tensors[0]->quantization =
std::move(input_0_quant_params);
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
const float expected_min[7] = {
0.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
const float expected_max[7] = {
1.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, true);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_min[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_max[tensor_idx], eps);
}
const float expected_value[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, false);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_value[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_value[tensor_idx], eps);
}
}
TEST(CalibratorTest, LSTM) {
auto flatbuffer_model = ReadModel("lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(status, kTfLiteOk);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const std::vector<float> lstm_input = {0.3, 0.2};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.300000}},
{{0, 18}, {0.000000, 0.468415}},
{{0, 19}, {0.000000, 0.424350}},
{{0, 24}, {0.265968, 0.468415}},
{{0, 25}, {0.080045, 0.170588}},
{{0, 26}, {0.080045, 0.170588}},
{{0, 27}, {0.080045, 0.170588}},
{{0, 28}, {0.080045, 0.170588}},
{{0, 29}, {0.000000, 0.270944}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, UnidirectionalSequenceLSTM) {
auto flatbuffer_model = ReadModel("unidirectional_sequence_lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
EXPECT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
const std::vector<float> lstm_input = {0.3, 0.2, 0.9, 0.8};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.900000}},
{{0, 18}, {0.000000, 0.520999}},
{{0, 19}, {0.000000, 0.711364}},
{{0, 24}, {0.247992, 0.520999}},
{{0, 25}, {0.080045, 0.824241}},
{{0, 26}, {0.080045, 0.824241}},
{{0, 27}, {0.080045, 0.824241}},
{{0, 28}, {0.080045, 0.824241}},
{{0, 29}, {0.000000, 0.413618}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CustomLSTM) {
auto flatbuffer_model = ReadModel("custom_lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
EXPECT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
const std::vector<float> lstm_input = {0.3, 0.2, 0.9, 0.8};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.300000}},
{{0, 18}, {0.000000, 0.468415}},
{{0, 19}, {0.000000, 0.424349}},
{{0, 24}, {0.265968, 0.468415}},
{{0, 25}, {0.080045, 0.170588}},
{{0, 26}, {0.080045, 0.170588}},
{{0, 27}, {0.000000, 0.000000}},
{{0, 28}, {0.080045, 0.170588}},
{{0, 29}, {0.080045, 0.170588}},
{{0, 30}, {0.000000, 0.000000}},
{{0, 31}, {0.080045, 0.170588}},
{{0, 32}, {0.080045, 0.170588}},
{{0, 33}, {0.000000, 0.000000}},
{{0, 34}, {0.080045, 0.170588}},
{{0, 35}, {0.080045, 0.170588}},
{{0, 36}, {0.000000, 0.000000}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CalibrationWithMultipleSubgraphs) {
auto model = ReadModel("multi_subgraphs_while.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(int));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(4, stats.size());
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {1.0, 1.0}},
{{0, 4}, {4.0, 4.0}},
{{2, 2}, {1.0, 2.0}},
{{2, 6}, {2.0, 4.0}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CalibrationWithCallOnce) {
auto model = ReadModel("call_once_mul.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(int));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(3, stats.size());
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {1.0, 1.0}},
{{0, 2}, {2.0, 2.0}},
{{0, 3}, {2.0, 2.0}}};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
}
}
}
}
int main(int argc, char** argv) {
tensorflow::string model_file;
const std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("test_model_file", &model_file,
"Path to test tflite model file."),
};
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
std::cerr << "Required test_model_file\n";
std::abort();
}
g_test_model_dir =
new tensorflow::string(tensorflow::io::Dirname(model_file));
::tensorflow::port::InitMain(argv[0], &argc, &argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/calibrator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/calibrator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c45fb7a3-0e45-4799-97f2-897cff4b579a | cpp | tensorflow/tensorflow | signature_def_util | tensorflow/lite/tools/signature/signature_def_util.cc | tensorflow/lite/tools/signature/signature_def_util_test.cc | #include "tensorflow/lite/tools/signature/signature_def_util.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flexbuffers.h"
#include "flatbuffers/vector.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tsl/platform/status.h"
namespace tflite {
namespace {
using tensorflow::Status;
using SerializedSignatureDefMap = std::map<std::string, std::string>;
using SignatureDefMap = std::map<std::string, tensorflow::SignatureDef>;
const Metadata* GetSignatureDefMetadata(const Model* model) {
if (!model || !model->metadata()) {
return nullptr;
}
for (int i = 0; i < model->metadata()->size(); ++i) {
const Metadata* metadata = model->metadata()->Get(i);
if (metadata->name()->str() == kSignatureDefsMetadataName) {
return metadata;
}
}
return nullptr;
}
Status ReadSignatureDefMap(const Model* model, const Metadata* metadata,
SerializedSignatureDefMap* map) {
if (!model || !metadata || !map) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
const flatbuffers::Vector<uint8_t>* flatbuffer_data =
model->buffers()->Get(metadata->buffer())->data();
const auto signature_defs =
flexbuffers::GetRoot(flatbuffer_data->data(), flatbuffer_data->size())
.AsMap();
for (int i = 0; i < signature_defs.Keys().size(); ++i) {
const std::string key = signature_defs.Keys()[i].AsString().c_str();
(*map)[key] = signature_defs[key].AsString().c_str();
}
return absl::OkStatus();
}
}
Status SetSignatureDefMap(const Model* model,
const SignatureDefMap& signature_def_map,
std::string* model_data_with_signature_def) {
if (!model || !model_data_with_signature_def) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
if (signature_def_map.empty()) {
return tensorflow::errors::InvalidArgument(
"signature_def_map should not be empty");
}
flexbuffers::Builder fbb;
const size_t start_map = fbb.StartMap();
auto mutable_model = std::make_unique<ModelT>();
model->UnPackTo(mutable_model.get(), nullptr);
int buffer_id = mutable_model->buffers.size();
const Metadata* metadata = GetSignatureDefMetadata(model);
if (metadata) {
buffer_id = metadata->buffer();
} else {
auto buffer = std::make_unique<BufferT>();
mutable_model->buffers.emplace_back(std::move(buffer));
auto sigdef_metadata = std::make_unique<MetadataT>();
sigdef_metadata->buffer = buffer_id;
sigdef_metadata->name = kSignatureDefsMetadataName;
mutable_model->metadata.emplace_back(std::move(sigdef_metadata));
}
for (const auto& entry : signature_def_map) {
fbb.String(entry.first.c_str(), entry.second.SerializeAsString());
}
fbb.EndMap(start_map);
fbb.Finish();
mutable_model->buffers[buffer_id]->data = fbb.GetBuffer();
flatbuffers::FlatBufferBuilder builder;
auto packed_model = Model::Pack(builder, mutable_model.get());
FinishModelBuffer(builder, packed_model);
*model_data_with_signature_def =
std::string(reinterpret_cast<const char*>(builder.GetBufferPointer()),
builder.GetSize());
return absl::OkStatus();
}
bool HasSignatureDef(const Model* model, const std::string& signature_key) {
if (!model) {
return false;
}
const Metadata* metadata = GetSignatureDefMetadata(model);
if (!metadata) {
return false;
}
SerializedSignatureDefMap signature_defs;
if (ReadSignatureDefMap(model, metadata, &signature_defs) !=
absl::OkStatus()) {
return false;
}
return (signature_defs.find(signature_key) != signature_defs.end());
}
Status GetSignatureDefMap(const Model* model,
SignatureDefMap* signature_def_map) {
if (!model || !signature_def_map) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
SignatureDefMap retrieved_signature_def_map;
const Metadata* metadata = GetSignatureDefMetadata(model);
if (metadata) {
SerializedSignatureDefMap signature_defs;
auto status = ReadSignatureDefMap(model, metadata, &signature_defs);
if (status != absl::OkStatus()) {
return tensorflow::errors::Internal("Error reading signature def map: ",
status.message());
}
for (const auto& entry : signature_defs) {
tensorflow::SignatureDef signature_def;
if (!signature_def.ParseFromString(entry.second)) {
return tensorflow::errors::Internal(
"Cannot parse signature def found in flatbuffer.");
}
retrieved_signature_def_map[entry.first] = signature_def;
}
*signature_def_map = retrieved_signature_def_map;
}
return absl::OkStatus();
}
Status ClearSignatureDefMap(const Model* model, std::string* model_data) {
if (!model || !model_data) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
auto mutable_model = std::make_unique<ModelT>();
model->UnPackTo(mutable_model.get(), nullptr);
for (int id = 0; id < model->metadata()->size(); ++id) {
const Metadata* metadata = model->metadata()->Get(id);
if (metadata->name()->str() == kSignatureDefsMetadataName) {
auto* buffers = &(mutable_model->buffers);
buffers->erase(buffers->begin() + metadata->buffer());
mutable_model->metadata.erase(mutable_model->metadata.begin() + id);
break;
}
}
flatbuffers::FlatBufferBuilder builder;
auto packed_model = Model::Pack(builder, mutable_model.get());
FinishModelBuffer(builder, packed_model);
*model_data =
std::string(reinterpret_cast<const char*>(builder.GetBufferPointer()),
builder.GetSize());
return absl::OkStatus();
}
} | #include "tensorflow/lite/tools/signature/signature_def_util.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "flatbuffers/buffer.h"
#include "tensorflow/cc/saved_model/signature_constants.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tsl/platform/status.h"
namespace tflite {
namespace {
using tensorflow::kClassifyMethodName;
using tensorflow::kDefaultServingSignatureDefKey;
using tensorflow::kPredictMethodName;
using tensorflow::SignatureDef;
using tensorflow::Status;
constexpr char kSignatureInput[] = "input";
constexpr char kSignatureOutput[] = "output";
constexpr char kTestFilePath[] = "tensorflow/lite/testdata/add.bin";
class SimpleSignatureDefUtilTest : public testing::Test {
protected:
void SetUp() override {
flatbuffer_model_ = FlatBufferModel::BuildFromFile(kTestFilePath);
ASSERT_NE(flatbuffer_model_, nullptr);
model_ = flatbuffer_model_->GetModel();
ASSERT_NE(model_, nullptr);
}
SignatureDef GetTestSignatureDef() {
auto signature_def = SignatureDef();
tensorflow::TensorInfo input_tensor;
tensorflow::TensorInfo output_tensor;
*input_tensor.mutable_name() = kSignatureInput;
*output_tensor.mutable_name() = kSignatureOutput;
*signature_def.mutable_method_name() = kClassifyMethodName;
(*signature_def.mutable_inputs())[kSignatureInput] = input_tensor;
(*signature_def.mutable_outputs())[kSignatureOutput] = output_tensor;
return signature_def;
}
std::unique_ptr<FlatBufferModel> flatbuffer_model_;
const Model* model_;
};
TEST_F(SimpleSignatureDefUtilTest, SetSignatureDefTest) {
SignatureDef expected_signature_def = GetTestSignatureDef();
std::string model_output;
const std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
SignatureDef test_signature_def =
test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
}
TEST_F(SimpleSignatureDefUtilTest, OverwriteSignatureDefTest) {
auto expected_signature_def = GetTestSignatureDef();
std::string model_output;
std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
SignatureDef test_signature_def =
test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
*expected_signature_def.mutable_method_name() = kPredictMethodName;
expected_signature_def_map.erase(
expected_signature_def_map.find(kDefaultServingSignatureDefKey));
constexpr char kTestSignatureDefKey[] = "ServingTest";
expected_signature_def_map[kTestSignatureDefKey] = expected_signature_def;
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(add_model, expected_signature_def_map, &model_output));
const Model* final_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_FALSE(HasSignatureDef(final_model, kDefaultServingSignatureDefKey));
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(final_model, &test_signature_def_map));
EXPECT_NE(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
EXPECT_TRUE(HasSignatureDef(final_model, kTestSignatureDefKey));
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(final_model, &test_signature_def_map));
test_signature_def = test_signature_def_map[kTestSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
}
TEST_F(SimpleSignatureDefUtilTest, GetSignatureDefTest) {
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(model_, &test_signature_def_map));
EXPECT_FALSE(HasSignatureDef(model_, kDefaultServingSignatureDefKey));
}
TEST_F(SimpleSignatureDefUtilTest, ClearSignatureDefTest) {
const int expected_num_buffers = model_->buffers()->size();
auto expected_signature_def = GetTestSignatureDef();
std::string model_output;
std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
SignatureDef test_signature_def;
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
test_signature_def = test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
EXPECT_EQ(absl::OkStatus(), ClearSignatureDefMap(add_model, &model_output));
const Model* clear_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_FALSE(HasSignatureDef(clear_model, kDefaultServingSignatureDefKey));
EXPECT_EQ(expected_num_buffers, clear_model->buffers()->size());
}
TEST_F(SimpleSignatureDefUtilTest, SetSignatureDefErrorsTest) {
std::map<string, SignatureDef> test_signature_def_map;
std::string model_output;
EXPECT_TRUE(tensorflow::errors::IsInvalidArgument(
SetSignatureDefMap(model_, test_signature_def_map, &model_output)));
SignatureDef test_signature_def;
test_signature_def_map[kDefaultServingSignatureDefKey] = test_signature_def;
EXPECT_TRUE(tensorflow::errors::IsInvalidArgument(
SetSignatureDefMap(model_, test_signature_def_map, nullptr)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/signature/signature_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/signature/signature_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d5029fc-e5a7-4df6-8d8f-b8c571a6e12a | cpp | tensorflow/tensorflow | writer | tensorflow/lite/tools/serialization/writer.cc | tensorflow/lite/tools/serialization/writer_test.cc | #include <iostream>
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/serialization/writer_lib.h"
int main(int argc, char* argv[]) {
if (argc != 3) {
fprintf(stderr, "Usage: %s input_file output_file\n", argv[0]);
return 1;
}
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(argv[1]);
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates
builtin_op_resolver;
tflite::InterpreterBuilder(*model, builtin_op_resolver)(&interpreter);
tflite::ModelWriter writer(interpreter.get());
writer.Write(argv[2]);
return 0;
} | #include <iostream>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/serialization/writer_lib.h"
int main(int argc, char* argv[]) {
if (argc != 2) {
fprintf(stderr, "Usage: %s input_file\n", argv[0]);
return 1;
}
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(argv[1]);
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates
builtin_op_resolver;
tflite::InterpreterBuilder(*model, builtin_op_resolver)(&interpreter);
tflite::ModelWriter writer(interpreter.get());
std::unique_ptr<uint8_t[]> output_buffer;
size_t output_buffer_size;
writer.GetBuffer(&output_buffer, &output_buffer_size);
std::unique_ptr<tflite::Interpreter> new_interpreter;
model = tflite::FlatBufferModel::BuildFromBuffer(
reinterpret_cast<char*>(output_buffer.get()), output_buffer_size);
tflite::InterpreterBuilder(*model, builtin_op_resolver)(&new_interpreter);
if (new_interpreter->AllocateTensors() != kTfLiteOk) {
fprintf(stderr, "AllocateTensors failed on the round-tripped model.\n");
return 1;
}
return 0;
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d8d55ae-5c41-44a2-905e-07d95bfb1b0c | cpp | tensorflow/tensorflow | writer_lib | tensorflow/lite/tools/serialization/writer_lib.cc | tensorflow/lite/tools/serialization/writer_lib_test.cc | #include "tensorflow/lite/tools/serialization/writer_lib.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/string.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/interpreter.h"
#if FLATBUFFERS_LITTLEENDIAN == 0
#include "tensorflow/lite/core/model_builder.h"
#endif
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/tools/serialization/enum_mapping.h"
#include "tensorflow/lite/tools/versioning/op_version.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
CreateOpCodeTableImpl(flatbuffers::FlatBufferBuilder* fbb,
std::vector<OpCode>* opcodes) {
std::vector<flatbuffers::Offset<OperatorCode>> codes;
for (const auto& it : *opcodes) {
const char* custom_name = it.custom.empty() ? nullptr : it.custom.c_str();
int32_t op_version = it.builtin != tflite::BuiltinOperator_CUSTOM ? 0 : 1;
codes.push_back(
CreateOperatorCodeDirect(*fbb, static_cast<BuiltinOperator>(it.builtin),
custom_name, op_version));
}
return fbb->template CreateVector<flatbuffers::Offset<OperatorCode>>(codes);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
ExportBuffersImpl(flatbuffers::FlatBufferBuilder* fbb,
std::vector<std::pair<const uint8_t*, size_t>>* buffers) {
std::vector<flatbuffers::Offset<Buffer>> buffer_vector;
for (auto buffer : *buffers) {
auto data_offset = fbb->CreateVector(buffer.first, buffer.second);
buffer_vector.push_back(CreateBuffer(*fbb, data_offset));
}
return fbb->template CreateVector<flatbuffers::Offset<Buffer>>(buffer_vector);
}
TfLiteStatus WriteImpl(const std::string& filename, void* data, size_t size) {
FILE* fp = fopen(filename.c_str(), "wb");
if (!fp) return kTfLiteError;
#if FLATBUFFERS_LITTLEENDIAN == 0
const tflite::Model* input_model = tflite::GetModel(data);
tflite::FlatBufferModel::ByteSwapTFLiteModel(input_model);
#endif
const int result_size = fwrite(data, 1, size, fp);
fclose(fp);
if (result_size != size) return kTfLiteError;
return kTfLiteOk;
}
std::pair<BuiltinOptions, flatbuffers::Offset<void>> CreateBuiltinUnion(
flatbuffers::FlatBufferBuilder* fbb, enum BuiltinOperator op,
void* builtin_op_data, int node_inputs_size) {
switch (op) {
#include "tensorflow/lite/tools/serialization/option_writer_generated.h"
}
return std::make_pair(BuiltinOptions_NONE, flatbuffers::Offset<void>());
}
}
template <class T_OUTPUT, class T_INPUT>
flatbuffers::Offset<flatbuffers::Vector<T_OUTPUT>> SubgraphWriter::ExportVector(
flatbuffers::FlatBufferBuilder* fbb, const T_INPUT& v) {
std::vector<T_OUTPUT> inputs(v.begin(), v.end());
return fbb->template CreateVector<T_OUTPUT>(inputs);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>>
SubgraphWriter::ExportOperators(flatbuffers::FlatBufferBuilder* fbb) {
std::vector<flatbuffers::Offset<Operator>> operators;
std::vector<int> operator_to_opcode;
operator_to_opcode.resize(subgraph_->nodes_size(), -1);
for (int op_index : execution_plan_) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteRegistration* registration = &node_and_registration->second;
if (!registration->custom_name) {
operator_to_opcode[op_index] =
GetOpCodeForBuiltin(registration->builtin_code);
} else {
operator_to_opcode[op_index] =
GetOpCodeForCustom(registration->custom_name);
}
}
for (int op_index : execution_plan_) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteNode& node = node_and_registration->first;
const TfLiteRegistration& registration = node_and_registration->second;
flatbuffers::Offset<void> builtin_options;
BuiltinOptions builtin_options_type = BuiltinOptions_NONE;
auto custom_options_format = CustomOptionsFormat_FLEXBUFFERS;
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0;
if (!registration.custom_name) {
auto builtin_options_and_type = CreateBuiltinUnion(
fbb, static_cast<enum BuiltinOperator>(registration.builtin_code),
node.builtin_data, node.inputs->size);
builtin_options = builtin_options_and_type.second;
builtin_options_type = builtin_options_and_type.first;
} else {
auto custom_writer = custom_op_to_writer_.find(registration.custom_name);
if (custom_writer != custom_op_to_writer_.end() &&
custom_writer->second) {
custom_writer->second(fbb, subgraph_, op_index, &custom_options,
&custom_options_format);
} else {
custom_options = fbb->CreateVector(
reinterpret_cast<const uint8_t*>(node.custom_initial_data),
node.custom_initial_data_size);
}
}
int opcode_index = operator_to_opcode[op_index];
std::vector<int> written_inputs =
RemapTensorIndicesToWritten(TfLiteIntArrayView(node.inputs));
std::vector<int> written_outputs =
RemapTensorIndicesToWritten(TfLiteIntArrayView(node.outputs));
auto inputs = ExportVector<int32_t>(fbb, written_inputs);
auto outputs = ExportVector<int32_t>(fbb, written_outputs);
operators.push_back(CreateOperator(*fbb, opcode_index, inputs, outputs,
builtin_options_type, builtin_options,
custom_options, custom_options_format));
}
return fbb->template CreateVector<flatbuffers::Offset<Operator>>(operators);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>>
SubgraphWriter::ExportTensors(flatbuffers::FlatBufferBuilder* fbb) {
tensor_to_written_tensor_.resize(subgraph_->tensors_size(), -1);
std::vector<flatbuffers::Offset<Tensor>> tensors;
std::vector<bool> tensor_is_temporary(subgraph_->tensors_size(), false);
for (int op_index = 0; op_index < subgraph_->nodes_size(); ++op_index) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
for (auto tensor_index :
TfLiteIntArrayView(node_and_registration->first.temporaries))
tensor_is_temporary[tensor_index] = true;
}
int curr_output_index = 0;
for (int tensor_index = 0; tensor_index < subgraph_->tensors_size();
tensor_index++) {
if (!tensor_is_temporary[tensor_index] &&
unused_tensors_.find(tensor_index) == unused_tensors_.end()) {
tensor_to_written_tensor_[tensor_index] = curr_output_index++;
}
}
for (int tensor_index = 0; tensor_index < subgraph_->tensors_size();
++tensor_index) {
if (tensor_to_written_tensor_[tensor_index] == -1) continue;
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
int buffer_index = 0;
if (tensor->allocation_type == kTfLiteMmapRo) {
buffer_index = buffers_->size();
buffers_->push_back(std::make_pair(
reinterpret_cast<const uint8_t*>(tensor->data.raw), tensor->bytes));
}
TensorType type = TfLiteTypeToSchemaType(tensor->type);
flatbuffers::Offset<QuantizationParameters> quantization_params;
const flatbuffers::Offset<flatbuffers::Vector<float>> null_array;
flatbuffers::Offset<flatbuffers::Vector<float>> scale_array;
flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point_array;
if (tensor->quantization.type == kTfLiteAffineQuantization) {
if (tensor->params.scale != 0.f) {
scale_array = fbb->CreateVector<float>({tensor->params.scale});
zero_point_array =
fbb->CreateVector<int64_t>({tensor->params.zero_point});
quantization_params = CreateQuantizationParameters(
*fbb, null_array, null_array, scale_array, zero_point_array);
} else {
const TfLiteAffineQuantization* params =
reinterpret_cast<TfLiteAffineQuantization*>(
tensor->quantization.params);
const size_t num_scales = params->scale->size;
std::vector<float> scale_vector(params->scale->data,
params->scale->data + num_scales);
std::vector<int64_t> zero_point_vector(
params->zero_point->data, params->zero_point->data + num_scales);
scale_array = fbb->CreateVector<float>(scale_vector);
zero_point_array = fbb->CreateVector<int64_t>(zero_point_vector);
quantization_params = CreateQuantizationParameters(
*fbb, null_array, null_array, scale_array, zero_point_array,
QuantizationDetails_NONE, 0, params->quantized_dimension);
}
}
if (tensor->dims) {
TfLiteIntArrayView shape_view(tensor->dims);
std::vector<int> shape =
std::vector<int>(shape_view.begin(), shape_view.end());
Offset<flatbuffers::String> tensor_name_offset = 0;
if (tensor->name != nullptr) {
tensor_name_offset = fbb->CreateString(tensor->name);
}
flatbuffers::Offset<flatbuffers::Vector<int32_t>>
shape_signature_offset = 0;
if (serialize_dims_signature_ && tensor->dims_signature != nullptr) {
TfLiteIntArrayView shape_signature_view(tensor->dims_signature);
std::vector<int32_t> shape_signature(shape_signature_view.begin(),
shape_signature_view.end());
shape_signature_offset = ExportVector<int32_t>(fbb, shape_signature);
}
bool has_rank = true;
tensors.push_back(CreateTensor(
*fbb, ExportVector<int32_t>(fbb, shape), type, buffer_index,
tensor_name_offset, quantization_params, tensor->is_variable,
0, shape_signature_offset, has_rank));
}
}
}
return fbb->template CreateVector<flatbuffers::Offset<Tensor>>(tensors);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
SubgraphWriter::ExportBuffers(flatbuffers::FlatBufferBuilder* fbb) {
return ExportBuffersImpl(fbb, buffers_);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
SubgraphWriter::CreateOpCodeTable(flatbuffers::FlatBufferBuilder* fbb) {
return CreateOpCodeTableImpl(fbb, opcodes_);
}
template <class T>
std::vector<int> SubgraphWriter::RemapTensorIndicesToWritten(const T& input) {
std::vector<int> output;
output.reserve(input.size());
for (int x : input) {
if (x == -1) {
output.push_back(x);
continue;
}
if (tensor_to_written_tensor_[x] != -1) {
output.push_back(tensor_to_written_tensor_[x]);
}
}
return output;
}
TfLiteStatus SubgraphWriter::GetBuffer(std::unique_ptr<uint8_t[]>* out,
size_t* size) {
if (!out || !size) return kTfLiteError;
flatbuffers::FlatBufferBuilder builder(10240);
std::vector<flatbuffers::Offset<SubGraph>> subgraphs_as_vector;
subgraphs_as_vector.push_back(
PopulateAndGetOffset(&builder, subgraph_->GetName()));
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
buffers = ExportBuffers(&builder);
auto description = builder.CreateString("Exported from Subgraph.");
auto op_codes = CreateOpCodeTable(&builder);
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, op_codes,
builder.CreateVector(subgraphs_as_vector),
description, buffers);
::tflite::FinishModelBuffer(builder, model);
::tflite::UpdateOpVersion(builder.GetBufferPointer());
const uint8_t* buffer = builder.GetBufferPointer();
*size = builder.GetSize();
(*out).reset(new uint8_t[*size]);
memcpy(out->get(), buffer, *size);
return kTfLiteOk;
}
flatbuffers::Offset<SubGraph> SubgraphWriter::PopulateAndGetOffset(
flatbuffers::FlatBufferBuilder* builder, const std::string& subgraph_name) {
auto tensors = ExportTensors(builder);
std::vector<int> written_inputs = RemapTensorIndicesToWritten(inputs_);
std::vector<int> written_outputs = RemapTensorIndicesToWritten(outputs_);
auto inputs = ExportVector<int32_t>(builder, written_inputs);
auto outputs = ExportVector<int32_t>(builder, written_outputs);
auto ops = ExportOperators(builder);
auto name = builder->CreateString(subgraph_name);
return CreateSubGraph(*builder, tensors, inputs, outputs, ops, name);
}
TfLiteStatus SubgraphWriter::Write(const std::string& filename) {
std::unique_ptr<uint8_t[]> buffer;
size_t size;
TF_LITE_ENSURE_STATUS(GetBuffer(&buffer, &size));
return WriteImpl(filename, buffer.get(), size);
}
TfLiteStatus SubgraphWriter::RegisterCustomWriter(
const std::string& custom_name, CustomWriter custom_writer) {
if (custom_op_to_writer_.find(custom_name) != custom_op_to_writer_.end()) {
return kTfLiteError;
}
custom_op_to_writer_.insert(std::make_pair(custom_name, custom_writer));
return kTfLiteOk;
}
TfLiteStatus SubgraphWriter::CheckInputOutput(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<int>& execution_plan) {
absl::flat_hash_set<int> known_tensors(inputs.begin(), inputs.end());
known_tensors.insert(subgraph_->variables().begin(),
subgraph_->variables().end());
for (int op_index : execution_plan) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteNode& node = node_and_registration->first;
for (int tensor_index : TfLiteIntArrayView(node.inputs)) {
if (tensor_index < 0) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
} else {
return kTfLiteError;
}
}
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
if (tensor->allocation_type == kTfLiteMmapRo) {
continue;
}
}
if (known_tensors.find(tensor_index) == known_tensors.end()) {
subgraph_->context()->ReportError(
subgraph_->context(),
"Node (%d) uses an input (%d) that is not provided.", op_index,
tensor_index);
return kTfLiteError;
}
}
TfLiteIntArrayView outputs(node.outputs);
known_tensors.insert(outputs.begin(), outputs.end());
}
for (int tensor_index : outputs) {
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
if (tensor->allocation_type == kTfLiteMmapRo) {
continue;
}
}
if (known_tensors.find(tensor_index) == known_tensors.end()) {
subgraph_->context()->ReportError(
subgraph_->context(),
"Output (%d) is not produced by the execution plan.", tensor_index);
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus SubgraphWriter::SetCustomInputOutput(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<int>& execution_plan) {
TF_LITE_ENSURE_STATUS(CheckInputOutput(inputs, outputs, execution_plan));
inputs_ = inputs;
outputs_ = outputs;
execution_plan_ = execution_plan;
return kTfLiteOk;
}
ModelWriter::ModelWriter(Interpreter* interpreter,
bool serialize_dims_signature) {
std::vector<Subgraph*> subgraphs;
subgraphs.reserve(interpreter->subgraphs_size());
for (int i = 0; i < interpreter->subgraphs_size(); ++i) {
subgraphs.push_back(interpreter->subgraph(i));
}
Init(subgraphs, serialize_dims_signature);
}
ModelWriter::ModelWriter(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature) {
Init(subgraphs, serialize_dims_signature);
}
void ModelWriter::Init(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature) {
buffers_.push_back(std::make_pair(nullptr, 0));
subgraph_writers_.reserve(subgraphs.size());
for (auto* subgraph : subgraphs) {
SubgraphWriter writer(subgraph, &buffers_, &opcodes_,
&builtin_op_to_opcode_, serialize_dims_signature);
subgraph_writers_.push_back(writer);
}
if (!subgraphs.empty()) {
absl::flat_hash_map<Subgraph*, int> subgraph_to_new_subgraph_index;
for (int i = 0; i < subgraphs.size(); ++i) {
subgraph_to_new_subgraph_index[subgraphs[i]] = i;
}
auto* all_subgraphs = subgraphs[0]->GetSubgraphs();
for (int i = 0; i < all_subgraphs->size(); ++i) {
auto it = subgraph_to_new_subgraph_index.find(all_subgraphs->at(i));
if (it != subgraph_to_new_subgraph_index.end()) {
subgraph_index_mapper_[i] = it->second;
}
}
}
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
ModelWriter::ExportBuffers(flatbuffers::FlatBufferBuilder* fbb) {
return ExportBuffersImpl(fbb, &buffers_);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
ModelWriter::CreateOpCodeTable(flatbuffers::FlatBufferBuilder* fbb) {
return CreateOpCodeTableImpl(fbb, &opcodes_);
}
TfLiteStatus ModelWriter::GetBuffer(std::unique_ptr<uint8_t[]>* out,
size_t* size) {
if (!out || !size) return kTfLiteError;
flatbuffers::FlatBufferBuilder builder(10240);
std::vector<flatbuffers::Offset<SubGraph>> subgraphs_as_vector;
subgraphs_as_vector.reserve(subgraph_writers_.size());
for (auto& subgraph_writer : subgraph_writers_) {
subgraphs_as_vector.push_back(subgraph_writer.PopulateAndGetOffset(
&builder, subgraph_writer.subgraph_->GetName()));
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
buffers = ExportBuffers(&builder);
auto description = builder.CreateString("Exported from Subgraph.");
auto op_codes = CreateOpCodeTable(&builder);
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, op_codes,
builder.CreateVector(subgraphs_as_vector),
description, buffers);
::tflite::FinishModelBuffer(builder, model);
::tflite::UpdateOpVersion(builder.GetBufferPointer());
UpdateSubgraphReferences(&builder);
const uint8_t* buffer = builder.GetBufferPointer();
*size = builder.GetSize();
(*out).reset(new uint8_t[*size]);
memcpy(out->get(), buffer, *size);
return kTfLiteOk;
}
TfLiteStatus ModelWriter::Write(const std::string& filename) {
std::unique_ptr<uint8_t[]> buffer;
size_t size;
TF_LITE_ENSURE_STATUS(GetBuffer(&buffer, &size));
return WriteImpl(filename, buffer.get(), size);
}
void ModelWriter::SetUnusedTensors(int subgraph_index,
const std::set<int>& unused_tensors) {
subgraph_writers_[subgraph_index].SetUnusedTensors(unused_tensors);
}
TfLiteStatus ModelWriter::SetCustomInputOutput(
int subgraph_index, const std::vector<int>& inputs,
const std::vector<int>& outputs, const std::vector<int>& execution_plan) {
return subgraph_writers_[subgraph_index].SetCustomInputOutput(inputs, outputs,
execution_plan);
}
TfLiteStatus ModelWriter::RegisterCustomWriter(const std::string& custom_name,
CustomWriter custom_writer) {
for (auto& subgraph_writer : subgraph_writers_) {
subgraph_writer.RegisterCustomWriter(custom_name, custom_writer);
}
return kTfLiteOk;
}
TfLiteStatus ModelWriter::UpdateSubgraphReferences(
flatbuffers::FlatBufferBuilder* fbb) {
auto model = tflite::GetMutableModel(fbb->GetBufferPointer());
for (SubGraph* subgraph : *model->mutable_subgraphs()) {
for (Operator* op : *subgraph->mutable_operators()) {
if (op->builtin_options_type() == BuiltinOptions_WhileOptions) {
auto while_options =
static_cast<tflite::WhileOptions*>(op->mutable_builtin_options());
auto new_cond_index =
subgraph_index_mapper_.find(while_options->cond_subgraph_index());
auto new_body_index =
subgraph_index_mapper_.find(while_options->body_subgraph_index());
if (new_cond_index == subgraph_index_mapper_.end() ||
new_body_index == subgraph_index_mapper_.end()) {
return kTfLiteError;
}
while_options->mutate_cond_subgraph_index(new_cond_index->second);
while_options->mutate_body_subgraph_index(new_body_index->second);
} else if (op->builtin_options_type() == BuiltinOptions_IfOptions) {
auto if_options =
static_cast<tflite::IfOptions*>(op->mutable_builtin_options());
auto new_then_index =
subgraph_index_mapper_.find(if_options->then_subgraph_index());
auto new_else_index =
subgraph_index_mapper_.find(if_options->else_subgraph_index());
if (new_then_index == subgraph_index_mapper_.end() ||
new_else_index == subgraph_index_mapper_.end()) {
return kTfLiteError;
}
if_options->mutate_then_subgraph_index(new_then_index->second);
if_options->mutate_else_subgraph_index(new_else_index->second);
}
}
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/tools/serialization/writer_lib.h"
#include <cstdlib>
#include <fstream>
#include <memory>
#include <numeric>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
#include "tsl/platform/logging.h"
namespace tflite {
using subgraph_test_util::CheckIntTensor;
using subgraph_test_util::FillIntTensor;
std::string CreateFilePath(const std::string& file_name) {
const char* tmp_dir = getenv("TEST_TMPDIR");
return std::string(tmp_dir ? tmp_dir : "./") + file_name;
}
class SingleSubgraphTest : public ::testing::TestWithParam<bool> {
protected:
void WriteToFile(Interpreter* interpreter, const std::string& filename,
bool use_subgraph_writer) {
if (use_subgraph_writer) {
SubgraphWriter writer(&interpreter->primary_subgraph());
CHECK_EQ(writer.Write(filename), kTfLiteOk);
} else {
ModelWriter writer(interpreter);
CHECK_EQ(writer.Write(filename), kTfLiteOk);
}
}
};
TEST_P(SingleSubgraphTest, InvalidDestinations) {
Interpreter interpreter;
interpreter.AddTensors(3);
float foo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<char*>(foo), sizeof(foo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
if (GetParam()) {
SubgraphWriter writer(&interpreter.primary_subgraph());
CHECK_EQ(writer.Write(""), kTfLiteError);
} else {
ModelWriter writer(&interpreter);
CHECK_EQ(writer.Write(""), kTfLiteError);
}
size_t size;
if (GetParam()) {
SubgraphWriter writer(&interpreter.primary_subgraph());
CHECK_EQ(writer.GetBuffer(nullptr, &size), kTfLiteError);
} else {
ModelWriter writer(&interpreter);
CHECK_EQ(writer.GetBuffer(nullptr, &size), kTfLiteError);
}
}
TEST_P(SingleSubgraphTest, FloatModelTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
float foo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<char*>(foo), sizeof(foo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_float.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputTest) {
Interpreter interpreter;
interpreter.AddTensors(4);
constexpr float kFoo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<const char*>(kFoo), sizeof(kFoo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "d", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({3});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const TfLiteRegistration* reg2 = resolver.FindOp(BuiltinOperator_RELU, 1);
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, reg2);
const std::string test_file = CreateFilePath("test_custom.tflite");
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({2}, {3},
{1}),
kTfLiteOk);
writer.SetUnusedTensors({0, 1});
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputErrorCasesTest) {
Interpreter interpreter;
interpreter.AddTensors(5);
constexpr float kFoo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<const char*>(kFoo), sizeof(kFoo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "d", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(4, kTfLiteFloat32, "e", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({4});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const TfLiteRegistration* reg2 = resolver.FindOp(BuiltinOperator_RELU, 1);
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, reg2);
const TfLiteRegistration* reg3 = resolver.FindOp(BuiltinOperator_RELU6, 1);
interpreter.AddNodeWithParameters({3}, {4}, nullptr, 0, nullptr, reg3);
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({2}, {3},
{0, 1}),
kTfLiteError);
EXPECT_EQ(writer.SetCustomInputOutput({0, 1}, {4},
{0, 1}),
kTfLiteError);
EXPECT_EQ(writer.SetCustomInputOutput({0, 1}, {3},
{0, 1}),
kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputVariableTensorTest) {
Interpreter interpreter;
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "b", {3},
TfLiteQuantization(),
true);
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0});
interpreter.SetOutputs({2});
interpreter.SetVariables({1});
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
interpreter.AddNodeWithParameters({0, 1}, {2}, nullptr, 0,
reinterpret_cast<void*>(builtin_data),
resolver.FindOp(BuiltinOperator_ADD, 1));
const std::string test_file = CreateFilePath("test_variables.tflite");
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({0}, {2},
{0}),
kTfLiteOk);
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, PerTensorQuantizedModelTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(
0, kTfLiteUInt8, "a", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetTensorParametersReadWrite(
1, kTfLiteUInt8, "b", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetTensorParametersReadWrite(
2, kTfLiteUInt8, "c", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_uint8.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, OpVersioningTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {1, 4},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(1, kTfLiteInt32, "b", {2},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {4, 4},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
const TfLiteRegistration* reg =
resolver.FindOp(BuiltinOperator_BROADCAST_TO, 2);
interpreter.AddNodeWithParameters({0, 1}, {2},
nullptr, 0,
nullptr, reg);
const std::string test_file = CreateFilePath("test_float.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(new_interpreter->nodes_size(), 1);
TfLiteRegistration output_reg =
new_interpreter->node_and_registration(0)->second;
ASSERT_EQ(output_reg.builtin_code, BuiltinOperator_BROADCAST_TO);
CHECK_EQ(output_reg.version, 2);
}
TEST_P(SingleSubgraphTest, DynamicShapeTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
std::vector<int> dims = {1, 3};
std::vector<int> dims_signature = {-1, 3};
interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "a", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetTensorParametersReadWrite(
1, kTfLiteFloat32, "b", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetTensorParametersReadWrite(
2, kTfLiteFloat32, "c", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_dynamic_shape.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
TfLiteTensor* tensor0 = new_interpreter->tensor(0);
CHECK_NOTNULL(tensor0->dims_signature);
TfLiteIntArrayView shape_view(tensor0->dims_signature);
CHECK_EQ(shape_view.size(), 2);
CHECK_EQ(shape_view[0], -1);
}
INSTANTIATE_TEST_SUITE_P(Writer, SingleSubgraphTest, ::testing::Bool());
struct ReshapeTestPattern {
int num_inputs;
bool is_param_valid;
bool has_buggy_non_flatten_shape;
};
class ReshapeLayerTest : public ::testing::TestWithParam<ReshapeTestPattern> {};
TEST_P(ReshapeLayerTest, ReshapeLayerTest) {
const auto param = GetParam();
Interpreter interpreter;
const int total_tensors = param.num_inputs + 1;
interpreter.AddTensors(total_tensors);
int output_shape[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32,
"a", {6},
TfLiteQuantization());
ASSERT_LE(param.num_inputs, 2);
if (param.num_inputs == 2) {
if (param.has_buggy_non_flatten_shape) {
interpreter.SetTensorParametersReadOnly(
1, kTfLiteInt32, "b", {3, 1},
TfLiteQuantization(), reinterpret_cast<char*>(output_shape),
sizeof(output_shape));
} else {
interpreter.SetTensorParametersReadOnly(
1, kTfLiteInt32, "b", {3},
TfLiteQuantization(), reinterpret_cast<char*>(output_shape),
sizeof(output_shape));
}
}
interpreter.SetTensorParametersReadWrite(total_tensors - 1,
kTfLiteFloat32, "c",
{3}, TfLiteQuantization());
std::vector<int> input_tensors(param.num_inputs);
std::iota(input_tensors.begin(), input_tensors.end(), 0);
interpreter.SetInputs(input_tensors);
interpreter.SetOutputs({total_tensors - 1});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteReshapeParams* builtin_data = reinterpret_cast<TfLiteReshapeParams*>(
malloc(sizeof(TfLiteReshapeParams)));
memset(builtin_data, 0, sizeof(TfLiteReshapeParams));
if (param.is_param_valid) {
builtin_data->num_dimensions = 3;
for (int dim = 0; dim < builtin_data->num_dimensions; ++dim) {
builtin_data->shape[dim] = output_shape[dim];
}
}
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_RESHAPE, 1);
interpreter.AddNodeWithParameters(input_tensors,
{total_tensors - 1},
initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
SubgraphWriter writer(&interpreter.primary_subgraph());
std::stringstream ss;
ss << CreateFilePath("test_reshape_") << param.num_inputs
<< param.is_param_valid << ".tflite";
std::string filename = ss.str();
writer.Write(filename);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(filename.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
INSTANTIATE_TEST_SUITE_P(
Writer, ReshapeLayerTest,
::testing::Values(ReshapeTestPattern{2,
true,
false},
ReshapeTestPattern{2,
false,
false},
ReshapeTestPattern{1,
true,
false},
ReshapeTestPattern{2,
true,
true}),
[](const ::testing::TestParamInfo<ReshapeLayerTest::ParamType>& info) {
std::stringstream ss;
ss << "num_inputs_" << info.param.num_inputs << "_valid_param_"
<< info.param.is_param_valid << "_buggy_shape_"
<< info.param.has_buggy_non_flatten_shape;
std::string name = ss.str();
return name;
});
class WhileTest : public subgraph_test_util::ControlFlowOpTest {
protected:
TfLiteCustomAllocation NewCustomAlloc(size_t num_bytes,
int required_alignment) {
char* new_alloc = new char[num_bytes + required_alignment];
char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
AlignTo(required_alignment, reinterpret_cast<intptr_t>(new_alloc)));
custom_alloc_buffers_.emplace_back(new_alloc);
return TfLiteCustomAllocation(
{new_underlying_buffer_aligned_ptr, num_bytes});
}
intptr_t AlignTo(size_t alignment, intptr_t offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
std::vector<std::unique_ptr<char[]>> custom_alloc_buffers_;
};
TEST_F(WhileTest, TestTriangularNumberSequence) {
const int kSeqNumber = 4;
const int kExpectedValue = 15;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
ModelWriter writer(interpreter_.get());
const std::string test_file = CreateFilePath("test_while.tflite");
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[0], {1});
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[1], {1});
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[0]), {1});
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[1]), {1});
ASSERT_EQ(new_interpreter->Invoke(), kTfLiteOk);
output1 = new_interpreter->tensor(new_interpreter->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
output2 = new_interpreter->tensor(new_interpreter->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
}
TEST_F(WhileTest, TestModelWriterFromSubgraphs) {
const int kSeqNumber = 4;
const int kExpectedValue = 15;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
ModelWriter writer_1(interpreter_.get());
const std::string test_file_1 = CreateFilePath("test_while_1.tflite");
writer_1.Write(test_file_1);
std::vector<Subgraph*> subgraphs;
for (int i = 0; i < interpreter_->subgraphs_size(); ++i) {
subgraphs.push_back(interpreter_->subgraph(i));
}
ModelWriter writer_2(subgraphs);
const std::string test_file_2 = CreateFilePath("test_while_2.tflite");
writer_2.Write(test_file_2);
std::ifstream file_ifs_1(test_file_1, std::ios::in);
std::ostringstream model_content_1;
model_content_1 << file_ifs_1.rdbuf();
std::ifstream file_ifs_2(test_file_2, std::ios::in);
std::ostringstream model_content_2;
model_content_2 << file_ifs_2.rdbuf();
EXPECT_FALSE(model_content_1.str().empty());
EXPECT_EQ(model_content_1.str(), model_content_2.str());
}
TEST_F(WhileTest, TestUpdateSubgraphIndices) {
const int kSeqNumber1 = 4;
const int kSeqNumber2 = 5;
const int kExpectedValue1 = 15;
const int kExpectedValue2 = 21;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(4);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber1);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(3), kSeqNumber2);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(4));
Subgraph* primary_subgraph = &interpreter_->primary_subgraph();
const int kInput1 = 0;
const int kInput2 = 1;
const int kUnused1 = 2;
const int kUnused2 = 3;
const int kOutput1 = 4;
const int kOutput2 = 5;
const int kTensorCount = 6;
int first_new_tensor_index;
ASSERT_EQ(primary_subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(primary_subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(primary_subgraph->SetOutputs({kOutput1, kOutput2}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
ASSERT_EQ(primary_subgraph->SetTensorParametersReadWrite(
i, kTfLiteInt32, "", 0, nullptr, {}, false),
kTfLiteOk);
}
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
TfLiteWhileParams* params1 =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params1->cond_subgraph_index = 1;
params1->body_subgraph_index = 2;
TfLiteWhileParams* params2 =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params2->cond_subgraph_index = 3;
params2->body_subgraph_index = 4;
int while1_index, while2_index;
primary_subgraph->AddNodeWithParameters({kInput1, kInput2},
{kUnused1, kOutput1}, {}, nullptr, 0,
params1, while_reg, &while1_index);
primary_subgraph->AddNodeWithParameters({kInput1, kInput2},
{kUnused2, kOutput2}, {}, nullptr, 0,
params2, while_reg, &while2_index);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kExpectedValue1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue2});
ModelWriter writer({interpreter_->subgraph(0), interpreter_->subgraph(3),
interpreter_->subgraph(4)});
writer.SetCustomInputOutput(0, {kInput1, kInput2},
{kOutput2}, {while2_index});
const std::string test_file = CreateFilePath("test_while.tflite");
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[0], {1});
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[1], {1});
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[0]), {1});
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[1]), {1});
ASSERT_EQ(new_interpreter->Invoke(), kTfLiteOk);
ASSERT_EQ(new_interpreter->outputs().size(), 1);
TfLiteTensor* output = new_interpreter->tensor(new_interpreter->outputs()[0]);
CheckIntTensor(output, {1}, {kExpectedValue2});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79d42034-7362-4f03-af6b-9524fcede270 | cpp | tensorflow/tensorflow | evaluation_delegate_provider | tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc | tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc | #include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include <string>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kNnapiDelegate[] = "nnapi";
constexpr char kGpuDelegate[] = "gpu";
constexpr char kHexagonDelegate[] = "hexagon";
constexpr char kXnnpackDelegate[] = "xnnpack";
constexpr char kCoremlDelegate[] = "coreml";
}
TfliteInferenceParams::Delegate ParseStringToDelegateType(
const std::string& val) {
if (val == kNnapiDelegate) return TfliteInferenceParams::NNAPI;
if (val == kGpuDelegate) return TfliteInferenceParams::GPU;
if (val == kHexagonDelegate) return TfliteInferenceParams::HEXAGON;
if (val == kXnnpackDelegate) return TfliteInferenceParams::XNNPACK;
if (val == kCoremlDelegate) return TfliteInferenceParams::COREML;
return TfliteInferenceParams::NONE;
}
TfLiteDelegatePtr CreateTfLiteDelegate(const TfliteInferenceParams& params,
std::string* error_msg) {
const auto type = params.delegate();
switch (type) {
case TfliteInferenceParams::NNAPI: {
auto p = CreateNNAPIDelegate();
if (!p && error_msg) *error_msg = "NNAPI not supported";
return p;
}
case TfliteInferenceParams::GPU: {
auto p = CreateGPUDelegate();
if (!p && error_msg) *error_msg = "GPU delegate not supported.";
return p;
}
case TfliteInferenceParams::HEXAGON: {
auto p = CreateHexagonDelegate("",
false);
if (!p && error_msg) {
*error_msg =
"Hexagon delegate is not supported on the platform or required "
"libraries are missing.";
}
return p;
}
case TfliteInferenceParams::XNNPACK: {
auto p = CreateXNNPACKDelegate(params.num_threads(), false);
if (!p && error_msg) *error_msg = "XNNPACK delegate not supported.";
return p;
}
case TfliteInferenceParams::COREML: {
auto p = CreateCoreMlDelegate();
if (!p && error_msg) *error_msg = "CoreML delegate not supported.";
return p;
}
case TfliteInferenceParams::NONE:
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
default:
if (error_msg) {
*error_msg = "Creation of delegate type: " +
TfliteInferenceParams::Delegate_Name(type) +
" not supported yet.";
}
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
}
DelegateProviders::DelegateProviders()
: delegate_list_util_(¶ms_),
delegates_map_([=]() -> std::unordered_map<std::string, int> {
std::unordered_map<std::string, int> delegates_map;
const auto& providers = delegate_list_util_.providers();
for (int i = 0; i < providers.size(); ++i) {
delegates_map[providers[i]->GetName()] = i;
}
return delegates_map;
}()) {
delegate_list_util_.AddAllDelegateParams();
}
std::vector<Flag> DelegateProviders::GetFlags() {
std::vector<Flag> flags;
delegate_list_util_.AppendCmdlineFlags(flags);
return flags;
}
bool DelegateProviders::InitFromCmdlineArgs(int* argc, const char** argv) {
std::vector<Flag> flags = GetFlags();
bool parse_result = Flags::Parse(argc, argv, flags);
if (!parse_result || params_.Get<bool>("help")) {
std::string usage = Flags::Usage(argv[0], flags);
TFLITE_LOG(ERROR) << usage;
parse_result = false;
}
return parse_result;
}
TfLiteDelegatePtr DelegateProviders::CreateDelegate(
const std::string& name) const {
const auto it = delegates_map_.find(name);
if (it == delegates_map_.end()) {
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
const auto& providers = delegate_list_util_.providers();
return providers[it->second]->CreateTfLiteDelegate(params_);
}
tools::ToolParams DelegateProviders::GetAllParams(
const TfliteInferenceParams& params) const {
tools::ToolParams tool_params;
tool_params.Merge(params_, false);
if (params.has_num_threads()) {
tool_params.Set<int32_t>("num_threads", params.num_threads());
}
const auto type = params.delegate();
switch (type) {
case TfliteInferenceParams::NNAPI:
if (tool_params.HasParam("use_nnapi")) {
tool_params.Set<bool>("use_nnapi", true);
}
break;
case TfliteInferenceParams::GPU:
if (tool_params.HasParam("use_gpu")) {
tool_params.Set<bool>("use_gpu", true);
}
break;
case TfliteInferenceParams::HEXAGON:
if (tool_params.HasParam("use_hexagon")) {
tool_params.Set<bool>("use_hexagon", true);
}
break;
case TfliteInferenceParams::XNNPACK:
if (tool_params.HasParam("use_xnnpack")) {
tool_params.Set<bool>("use_xnnpack", true);
}
if (tool_params.HasParam("xnnpack_force_fp16")) {
tool_params.Set<bool>("xnnpack_force_fp16", true);
}
break;
case TfliteInferenceParams::COREML:
if (tool_params.HasParam("use_coreml")) {
tool_params.Set<bool>("use_coreml", true);
}
break;
default:
break;
}
return tool_params;
}
}
} | #include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace evaluation {
namespace {
TEST(EvaluationDelegateProviderTest, ParseStringToDelegateType) {
EXPECT_EQ(TfliteInferenceParams::NNAPI, ParseStringToDelegateType("nnapi"));
EXPECT_EQ(TfliteInferenceParams::GPU, ParseStringToDelegateType("gpu"));
EXPECT_EQ(TfliteInferenceParams::HEXAGON,
ParseStringToDelegateType("hexagon"));
EXPECT_EQ(TfliteInferenceParams::XNNPACK,
ParseStringToDelegateType("xnnpack"));
EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Gpu"));
EXPECT_EQ(TfliteInferenceParams::NONE, ParseStringToDelegateType("Testing"));
}
TEST(EvaluationDelegateProviderTest, CreateTfLiteDelegate) {
TfliteInferenceParams params;
params.set_delegate(TfliteInferenceParams::NONE);
EXPECT_TRUE(!CreateTfLiteDelegate(params));
}
TEST(EvaluationDelegateProviderTest, DelegateProvidersParams) {
DelegateProviders providers;
const auto& params = providers.GetAllParams();
EXPECT_TRUE(params.HasParam("use_nnapi"));
EXPECT_TRUE(params.HasParam("use_gpu"));
int argc = 3;
const char* argv[] = {"program_name", "--use_gpu=true",
"--other_undefined_flag=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
EXPECT_TRUE(params.Get<bool>("use_gpu"));
EXPECT_EQ(2, argc);
EXPECT_EQ("--other_undefined_flag=1", argv[1]);
}
TEST(EvaluationDelegateProviderTest, GetAllParamsWithTfliteInferenceParams) {
DelegateProviders providers;
int argc = 2;
const char* argv[] = {"program_name", "--num_threads=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
const auto& default_params = providers.GetAllParams();
EXPECT_EQ(1, default_params.Get<int>("num_threads"));
TfliteInferenceParams params;
params.set_delegate(TfliteInferenceParams::NONE);
params.set_num_threads(4);
tools::ToolParams tool_params = providers.GetAllParams(params);
EXPECT_EQ(4, tool_params.Get<int>("num_threads"));
EXPECT_EQ(1, argc);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a521486a-75e5-4dda-8e18-e58c91d51eb9 | cpp | tensorflow/tensorflow | inference_profiler_stage | tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.cc | tensorflow/lite/tools/evaluation/stages/inference_profiler_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include "fp16.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr float kGaussianFloatMean = 0.5;
constexpr float kGaussianStdDev = 1.0 / 3;
template <typename T>
void GenerateRandomGaussianData(int64_t num_elements, float min, float max,
std::vector<T>* data) {
data->clear();
data->reserve(num_elements);
static std::normal_distribution<double> distribution(kGaussianFloatMean,
kGaussianStdDev);
static std::default_random_engine generator;
for (int i = 0; i < num_elements; ++i) {
auto rand_n = distribution(generator);
while (rand_n < 0 || rand_n >= 1) {
rand_n = distribution(generator);
}
auto rand_float = min + (max - min) * static_cast<float>(rand_n);
data->push_back(static_cast<T>(rand_float));
}
}
template <typename T>
float CalculateAverageError(T* reference, T* test, int64_t num_elements) {
float error = 0;
for (int i = 0; i < num_elements; i++) {
float test_value = static_cast<float>(test[i]);
float reference_value = static_cast<float>(reference[i]);
error += std::abs(test_value - reference_value);
}
error /= num_elements;
return error;
}
}
TfLiteStatus InferenceProfilerStage::Init(
const DelegateProviders* delegate_providers) {
test_stage_ = std::make_unique<TfliteInferenceStage>(config_);
if (test_stage_->Init(delegate_providers) != kTfLiteOk) return kTfLiteError;
LOG(INFO) << "Test interpreter has been initialized.";
EvaluationStageConfig reference_config;
reference_config.set_name("reference_inference");
auto* params = reference_config.mutable_specification()
->mutable_tflite_inference_params();
params->set_model_file_path(
config_.specification().tflite_inference_params().model_file_path());
params->set_invocations_per_run(
config_.specification().tflite_inference_params().invocations_per_run());
reference_stage_ = std::make_unique<TfliteInferenceStage>(reference_config);
if (reference_stage_->Init() != kTfLiteOk) return kTfLiteError;
LOG(INFO) << "Reference interpreter (1 thread on CPU) has been initialized.";
model_info_ = reference_stage_->GetModelInfo();
for (int i = 0; i < model_info_->inputs.size(); ++i) {
const TfLiteType model_input_type = model_info_->inputs[i]->type;
if (model_input_type == kTfLiteUInt8 || model_input_type == kTfLiteInt8 ||
model_input_type == kTfLiteInt32 || model_input_type == kTfLiteInt64 ||
model_input_type == kTfLiteBool || model_input_type == kTfLiteFloat32 ||
model_input_type == kTfLiteFloat16) {
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float16/float32/int8/uint8/int32/int64/bool "
"input types";
return kTfLiteError;
}
auto* input_shape = model_info_->inputs[i]->dims;
int64_t total_num_elements = 1;
for (int i = 0; i < input_shape->size; i++) {
total_num_elements *= input_shape->data[i];
}
input_num_elements_.push_back(total_num_elements);
float_tensors_.emplace_back();
uint8_tensors_.emplace_back();
int8_tensors_.emplace_back();
float16_tensors_.emplace_back();
int64_tensors_.emplace_back();
int32_tensors_.emplace_back();
bool_tensors_.emplace_back();
}
for (int i = 0; i < model_info_->outputs.size(); ++i) {
const TfLiteType model_output_type = model_info_->outputs[i]->type;
if (model_output_type == kTfLiteUInt8 || model_output_type == kTfLiteInt8 ||
model_output_type == kTfLiteInt32 || model_output_type == kTfLiteBool ||
model_output_type == kTfLiteFloat32) {
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float32/int8/uint8/int32/bool "
"output types";
return kTfLiteError;
}
auto* output_shape = model_info_->outputs[i]->dims;
int64_t total_num_elements = 1;
for (int i = 0; i < output_shape->size; i++) {
total_num_elements *= output_shape->data[i];
}
output_num_elements_.push_back(total_num_elements);
error_stats_.emplace_back();
}
return kTfLiteOk;
}
TfLiteStatus InferenceProfilerStage::Run() {
std::vector<void*> input_ptrs;
for (int i = 0; i < model_info_->inputs.size(); ++i) {
const TfLiteType model_input_type = model_info_->inputs[i]->type;
if (model_input_type == kTfLiteUInt8) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(), &uint8_tensors_[i]);
input_ptrs.push_back(uint8_tensors_[i].data());
} else if (model_input_type == kTfLiteInt8) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max(), &int8_tensors_[i]);
input_ptrs.push_back(int8_tensors_[i].data());
} else if (model_input_type == kTfLiteInt32) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max(), &int32_tensors_[i]);
input_ptrs.push_back(int32_tensors_[i].data());
} else if (model_input_type == kTfLiteInt64) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max(), &int64_tensors_[i]);
input_ptrs.push_back(int64_tensors_[i].data());
} else if (model_input_type == kTfLiteBool) {
GenerateRandomGaussianData(input_num_elements_[i], 0, 1,
&bool_tensors_[i]);
input_ptrs.push_back(bool_tensors_[i].data());
} else if (model_input_type == kTfLiteFloat32) {
GenerateRandomGaussianData(input_num_elements_[i], -1, 1,
&(float_tensors_[i]));
input_ptrs.push_back(float_tensors_[i].data());
} else if (model_input_type == kTfLiteFloat16) {
GenerateRandomGaussianData(input_num_elements_[i], -1, 1,
&(float_tensors_[i]));
for (size_t j = 0; j < float_tensors_[i].size(); j++) {
float16_tensors_[i][j] =
fp16_ieee_from_fp32_value(float_tensors_[i][j]);
}
input_ptrs.push_back(float16_tensors_[i].data());
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float16/float32/int8/uint8/int32/int64/bool "
"input types";
return kTfLiteError;
}
}
test_stage_->SetInputs(input_ptrs);
reference_stage_->SetInputs(input_ptrs);
if (test_stage_->Run() != kTfLiteOk) return kTfLiteError;
if (reference_stage_->Run() != kTfLiteOk) return kTfLiteError;
for (int i = 0; i < model_info_->outputs.size(); ++i) {
const TfLiteType model_output_type = model_info_->outputs[i]->type;
void* reference_ptr = reference_stage_->GetOutputs()->at(i);
void* test_ptr = test_stage_->GetOutputs()->at(i);
float output_diff = 0;
if (model_output_type == kTfLiteUInt8) {
output_diff = CalculateAverageError(static_cast<uint8_t*>(reference_ptr),
static_cast<uint8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteInt8) {
output_diff = CalculateAverageError(static_cast<int8_t*>(reference_ptr),
static_cast<int8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteInt32) {
output_diff = CalculateAverageError(static_cast<int32_t*>(reference_ptr),
static_cast<int32_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteBool) {
output_diff = CalculateAverageError(static_cast<int8_t*>(reference_ptr),
static_cast<int8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteFloat32) {
output_diff = CalculateAverageError(static_cast<float*>(reference_ptr),
static_cast<float*>(test_ptr),
output_num_elements_[i]);
}
error_stats_[i].UpdateStat(output_diff);
}
return kTfLiteOk;
}
EvaluationStageMetrics InferenceProfilerStage::LatestMetrics() {
EvaluationStageMetrics metrics;
const auto& reference_metrics = reference_stage_->LatestMetrics();
metrics.set_num_runs(reference_metrics.num_runs());
auto* inference_profiler_metrics =
metrics.mutable_process_metrics()->mutable_inference_profiler_metrics();
*inference_profiler_metrics->mutable_reference_latency() =
reference_metrics.process_metrics().total_latency();
*inference_profiler_metrics->mutable_test_latency() =
test_stage_->LatestMetrics().process_metrics().total_latency();
for (int i = 0; i < error_stats_.size(); ++i) {
AccuracyMetrics* diff = inference_profiler_metrics->add_output_errors();
diff->set_avg_value(error_stats_[i].avg());
diff->set_std_deviation(error_stats_[i].std_deviation());
diff->set_min_value(error_stats_[i].min());
if (error_stats_[i].avg() != 0) {
diff->set_max_value(error_stats_[i].max());
} else {
diff->set_max_value(0);
}
}
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kInferenceProfilerStageName[] = "inference_profiler_stage";
constexpr char kModelPath[] =
"tensorflow/lite/testdata/add_quantized.bin";
EvaluationStageConfig GetInferenceProfilerStageConfig(int num_threads = 1) {
EvaluationStageConfig config;
config.set_name(kInferenceProfilerStageName);
auto* params =
config.mutable_specification()->mutable_tflite_inference_params();
params->set_model_file_path(kModelPath);
params->set_invocations_per_run(2);
params->set_num_threads(num_threads);
return config;
}
TEST(InferenceProfilerStage, NoParams) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
config.mutable_specification()->clear_tflite_inference_params();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(InferenceProfilerStage, NoModelPath) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->clear_model_file_path();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(InferenceProfilerStage, NoOutputDiffForDefaultConfig) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(stage.Run(), kTfLiteOk);
}
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_TRUE(metrics.process_metrics().has_inference_profiler_metrics());
auto profiler_metrics =
metrics.process_metrics().inference_profiler_metrics();
EXPECT_TRUE(profiler_metrics.has_reference_latency());
EXPECT_TRUE(profiler_metrics.has_test_latency());
EXPECT_EQ(profiler_metrics.output_errors_size(), 1);
EXPECT_EQ(profiler_metrics.output_errors(0).avg_value(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/inference_profiler_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28c9bf17-b5d5-4830-a34f-6402572d733a | cpp | tensorflow/tensorflow | tflite_inference_stage | tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.cc | tensorflow/lite/tools/evaluation/stages/tflite_inference_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
#include <cstring>
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/profiling/time.h"
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
void ABSL_ATTRIBUTE_WEAK
RegisterSelectedOps(::tflite::MutableOpResolver* resolver) {}
namespace tflite {
namespace evaluation {
namespace {
TfLiteModelInfo GetTfliteModelInfo(const Interpreter& interpreter) {
TfLiteModelInfo model_info;
for (int i : interpreter.inputs()) {
model_info.inputs.push_back(interpreter.tensor(i));
}
for (int i : interpreter.outputs()) {
model_info.outputs.push_back(interpreter.tensor(i));
}
return model_info;
}
}
void TfliteInferenceStage::UpdateModelInfo() {
model_info_ = GetTfliteModelInfo(*interpreter_);
outputs_.clear();
outputs_.reserve(interpreter_->outputs().size());
for (int i : interpreter_->outputs()) {
TfLiteTensor* tensor = interpreter_->tensor(i);
outputs_.push_back(tensor->data.raw);
}
}
TfLiteStatus TfliteInferenceStage::ResizeInputs(
const std::vector<std::vector<int>>& shapes) {
const std::vector<int>& interpreter_inputs = interpreter_->inputs();
if (interpreter_inputs.size() != shapes.size()) {
LOG(ERROR) << "New shape is not compatible";
return kTfLiteError;
}
for (int j = 0; j < shapes.size(); ++j) {
int i = interpreter_inputs[j];
TfLiteTensor* t = interpreter_->tensor(i);
if (t->type != kTfLiteString) {
TF_LITE_ENSURE_STATUS(interpreter_->ResizeInputTensor(i, shapes[j]));
}
}
TF_LITE_ENSURE_STATUS(interpreter_->AllocateTensors());
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::ApplyCustomDelegate(
Interpreter::TfLiteDelegatePtr delegate) {
if (!interpreter_) {
LOG(ERROR) << "Stage not initialized before calling ApplyCustomDelegate";
return kTfLiteError;
}
if (!delegate) {
LOG(WARNING)
<< "Tried to apply null TfLiteDelegatePtr to TfliteInferenceStage";
return kTfLiteOk;
}
delegates_.push_back(std::move(delegate));
TF_LITE_ENSURE_STATUS(
interpreter_->ModifyGraphWithDelegate(delegates_.back().get()));
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::Init(
const DelegateProviders* delegate_providers) {
if (!config_.specification().has_tflite_inference_params()) {
LOG(ERROR) << "TfliteInferenceParams not provided";
return kTfLiteError;
}
auto& params = config_.specification().tflite_inference_params();
if (!params.has_model_file_path()) {
LOG(ERROR) << "Model path not provided";
return kTfLiteError;
}
std::ifstream model_check(params.model_file_path());
if (!model_check.good()) {
LOG(ERROR) << "Model file not found";
return kTfLiteError;
}
model_ = FlatBufferModel::BuildFromFile(params.model_file_path().c_str());
bool apply_default_delegates = true;
if (delegate_providers != nullptr) {
const auto& provider_params = delegate_providers->GetAllParams();
if (provider_params.HasParam("use_xnnpack") &&
provider_params.HasValueSet<bool>("use_xnnpack") &&
!provider_params.Get<bool>("use_xnnpack")) {
apply_default_delegates = false;
}
}
if (apply_default_delegates) {
resolver_ = std::make_unique<ops::builtin::BuiltinOpResolver>();
} else {
resolver_ = std::make_unique<
ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
}
RegisterSelectedOps(resolver_.get());
InterpreterBuilder(*model_, *resolver_)(&interpreter_);
if (!interpreter_) {
LOG(ERROR) << "Could not build interpreter";
return kTfLiteError;
}
interpreter_->SetNumThreads(params.num_threads());
if (!delegate_providers) {
std::string error_message;
auto delegate = CreateTfLiteDelegate(params, &error_message);
if (delegate) {
delegates_.push_back(std::move(delegate));
LOG(INFO) << "Successfully created "
<< params.Delegate_Name(params.delegate()) << " delegate.";
} else {
LOG(WARNING) << error_message;
}
} else {
auto delegates = delegate_providers->CreateAllDelegates(params);
for (auto& one : delegates) delegates_.push_back(std::move(one.delegate));
}
for (int i = 0; i < delegates_.size(); ++i) {
if (interpreter_->ModifyGraphWithDelegate(delegates_[i].get()) !=
kTfLiteOk) {
LOG(FATAL) << "Failed to apply delegate " << i;
}
}
interpreter_->AllocateTensors();
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::Run() {
if (!inputs_) {
LOG(ERROR) << "Input data not set";
return kTfLiteError;
}
for (int i = 0; i < interpreter_->inputs().size(); ++i) {
TfLiteTensor* tensor = interpreter_->tensor(interpreter_->inputs()[i]);
tensor->data.raw = static_cast<char*>(inputs_->at(i));
}
auto& params = config_.specification().tflite_inference_params();
for (int i = 0; i < params.invocations_per_run(); ++i) {
int64_t start_us = profiling::time::NowMicros();
if (interpreter_->Invoke() != kTfLiteOk) {
LOG(ERROR) << "TFLite interpreter failed to invoke at run " << i;
return kTfLiteError;
}
latency_stats_.UpdateStat(profiling::time::NowMicros() - start_us);
}
return kTfLiteOk;
}
EvaluationStageMetrics TfliteInferenceStage::LatestMetrics() {
auto& params = config_.specification().tflite_inference_params();
EvaluationStageMetrics metrics;
auto* latency_metrics =
metrics.mutable_process_metrics()->mutable_total_latency();
latency_metrics->set_last_us(latency_stats_.newest());
latency_metrics->set_max_us(latency_stats_.max());
latency_metrics->set_min_us(latency_stats_.min());
latency_metrics->set_sum_us(latency_stats_.sum());
latency_metrics->set_avg_us(latency_stats_.avg());
latency_metrics->set_std_deviation_us(latency_stats_.std_deviation());
metrics.set_num_runs(
static_cast<int>(latency_stats_.count() / params.invocations_per_run()));
auto* inference_metrics =
metrics.mutable_process_metrics()->mutable_tflite_inference_metrics();
inference_metrics->set_num_inferences(latency_stats_.count());
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kTfliteInferenceStageName[] = "tflite_inference_stage";
constexpr char kModelPath[] =
"tensorflow/lite/testdata/add_quantized.bin";
constexpr int kTotalElements = 1 * 8 * 8 * 3;
template <typename T>
T* SetValues(T array[], T value) {
for (int i = 0; i < kTotalElements; i++) {
array[i] = value;
}
return array;
}
EvaluationStageConfig GetTfliteInferenceStageConfig() {
EvaluationStageConfig config;
config.set_name(kTfliteInferenceStageName);
auto* params =
config.mutable_specification()->mutable_tflite_inference_params();
params->set_model_file_path(kModelPath);
params->set_invocations_per_run(2);
return config;
}
TEST(TfliteInferenceStage, NoParams) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()->clear_tflite_inference_params();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, NoModelPath) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->clear_model_file_path();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, IncorrectModelPath) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->set_model_file_path("xyz.tflite");
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, NoInputData) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TfliteInferenceStage, CorrectModelInfo) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const TfLiteModelInfo* model_info = stage.GetModelInfo();
EXPECT_EQ(model_info->inputs.size(), 1);
const TfLiteTensor* tensor = model_info->inputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, kTotalElements);
const TfLiteIntArray* input_shape = tensor->dims;
EXPECT_EQ(input_shape->data[0], 1);
EXPECT_EQ(input_shape->data[1], 8);
EXPECT_EQ(input_shape->data[2], 8);
EXPECT_EQ(input_shape->data[3], 3);
EXPECT_EQ(model_info->outputs.size(), 1);
tensor = model_info->outputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, kTotalElements);
const TfLiteIntArray* output_shape = tensor->dims;
EXPECT_EQ(output_shape->data[0], 1);
EXPECT_EQ(output_shape->data[1], 8);
EXPECT_EQ(output_shape->data[2], 8);
EXPECT_EQ(output_shape->data[3], 3);
}
TEST(TfliteInferenceStage, TestResizeModel) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.ResizeInputs({{3, 8, 8, 3}}), kTfLiteOk);
const TfLiteModelInfo* model_info = stage.GetModelInfo();
EXPECT_EQ(model_info->inputs.size(), 1);
const TfLiteTensor* tensor = model_info->inputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, 3 * kTotalElements);
const TfLiteIntArray* input_shape = tensor->dims;
EXPECT_EQ(input_shape->data[0], 3);
EXPECT_EQ(input_shape->data[1], 8);
EXPECT_EQ(input_shape->data[2], 8);
EXPECT_EQ(input_shape->data[3], 3);
EXPECT_EQ(model_info->outputs.size(), 1);
tensor = model_info->outputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, 3 * kTotalElements);
const TfLiteIntArray* output_shape = tensor->dims;
EXPECT_EQ(output_shape->data[0], 3);
EXPECT_EQ(output_shape->data[1], 8);
EXPECT_EQ(output_shape->data[2], 8);
EXPECT_EQ(output_shape->data[3], 3);
}
TEST(TfliteInferenceStage, CorrectOutput) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
uint8_t input_tensor[kTotalElements];
SetValues(input_tensor, static_cast<uint8_t>(2));
std::vector<void*> inputs;
inputs.push_back(input_tensor);
stage.SetInputs(inputs);
EXPECT_EQ(stage.Run(), kTfLiteOk);
uint8_t* output_tensor = static_cast<uint8_t*>(stage.GetOutputs()->at(0));
for (int i = 0; i < kTotalElements; i++) {
EXPECT_EQ(output_tensor[i], static_cast<uint8_t>(6));
}
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(metrics.num_runs(), 1);
const auto& latency = metrics.process_metrics().total_latency();
const auto max_latency = latency.max_us();
EXPECT_GT(max_latency, 0);
EXPECT_LT(max_latency, 1e7);
EXPECT_LE(latency.last_us(), max_latency);
EXPECT_LE(latency.min_us(), max_latency);
EXPECT_GE(latency.sum_us(), max_latency);
EXPECT_LE(latency.avg_us(), max_latency);
EXPECT_TRUE(latency.has_std_deviation_us());
EXPECT_EQ(
metrics.process_metrics().tflite_inference_metrics().num_inferences(), 2);
}
TEST(TfliteInferenceStage, CustomDelegate) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
Interpreter::TfLiteDelegatePtr test_delegate = CreateNNAPIDelegate();
EXPECT_NE(stage.ApplyCustomDelegate(std::move(test_delegate)), kTfLiteOk);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.ApplyCustomDelegate(std::move(test_delegate)), kTfLiteOk);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/tflite_inference_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66f3c55a-dbb9-4df7-90ab-dabb19c6b4bc | cpp | tensorflow/tensorflow | topk_accuracy_eval_stage | tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.cc | tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <algorithm>
#include <numeric>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
std::vector<int> GetTopKIndices(const std::vector<float>& values, int k) {
std::vector<int> indices(values.size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&values](int a, int b) { return values[a] > values[b]; });
indices.resize(k);
return indices;
}
}
TfLiteStatus TopkAccuracyEvalStage::Init() {
num_runs_ = 0;
auto& params = config_.specification().topk_accuracy_eval_params();
if (!params.has_k()) {
LOG(ERROR) << "Value of k not provided for TopkAccuracyEvalStage";
return kTfLiteError;
}
accuracy_counts_ = std::vector<int>(params.k(), 0);
if (ground_truth_labels_.empty()) {
LOG(ERROR) << "Ground-truth labels are empty";
return kTfLiteError;
}
num_total_labels_ = ground_truth_labels_.size();
if (params.k() > num_total_labels_) {
LOG(ERROR) << "k is too large";
return kTfLiteError;
}
if (!model_output_shape_) {
LOG(ERROR) << "Model output details not correctly set";
return kTfLiteError;
}
if (!(model_output_shape_->size == 2) ||
!(model_output_shape_->data[0] == 1) ||
!(model_output_shape_->data[1] == num_total_labels_)) {
LOG(ERROR) << "Invalid model_output_shape_";
return kTfLiteError;
}
if (model_output_type_ != kTfLiteFloat32 &&
model_output_type_ != kTfLiteUInt8 && model_output_type_ != kTfLiteInt8) {
LOG(ERROR) << "model_output_type_ not supported";
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus TopkAccuracyEvalStage::Run() {
if (!model_output_) {
LOG(ERROR) << "model_output_ not set correctly";
return kTfLiteError;
}
if (!ground_truth_label_) {
LOG(ERROR) << "ground_truth_label_ not provided";
return kTfLiteError;
}
auto& params = config_.specification().topk_accuracy_eval_params();
std::vector<float> probabilities;
probabilities.reserve(num_total_labels_);
if (model_output_type_ == kTfLiteFloat32) {
auto probs = static_cast<float*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteUInt8) {
auto probs = static_cast<uint8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteInt8) {
auto probs = static_cast<int8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
}
std::vector<int> top_k = GetTopKIndices(probabilities, params.k());
UpdateCounts(top_k);
return kTfLiteOk;
}
EvaluationStageMetrics TopkAccuracyEvalStage::LatestMetrics() {
EvaluationStageMetrics metrics;
if (num_runs_ == 0) return metrics;
metrics.set_num_runs(num_runs_);
auto* topk_metrics =
metrics.mutable_process_metrics()->mutable_topk_accuracy_metrics();
for (const auto& count : accuracy_counts_) {
topk_metrics->add_topk_accuracies(static_cast<float>(count) / num_runs_);
}
return metrics;
}
void TopkAccuracyEvalStage::UpdateCounts(const std::vector<int>& topk_indices) {
for (size_t i = 0; i < topk_indices.size(); ++i) {
if (*ground_truth_label_ == ground_truth_labels_[topk_indices[i]]) {
for (size_t j = i; j < topk_indices.size(); j++) {
accuracy_counts_[j] += 1;
}
break;
}
}
num_runs_++;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kTopkAccuracyEvalStageName[] = "topk_accuracy_eval_stage";
constexpr int kNumCategories = 1001;
EvaluationStageConfig GetTopkAccuracyEvalStageConfig() {
EvaluationStageConfig config;
config.set_name(kTopkAccuracyEvalStageName);
auto* params =
config.mutable_specification()->mutable_topk_accuracy_eval_params();
params->set_k(5);
return config;
}
template <typename T>
T* ResetOutputArray(T array[]) {
for (int i = 0; i < kNumCategories; i++) {
array[i] = 0;
}
return array;
}
std::vector<std::string> CreateGroundTruthLabels() {
std::vector<std::string> ground_truth_labels;
ground_truth_labels.reserve(kNumCategories);
for (int i = 0; i < kNumCategories; i++) {
ground_truth_labels.push_back(std::to_string(i));
}
return ground_truth_labels;
}
TEST(TopkAccuracyEvalStage, NoInitializers) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, NoK) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()
->mutable_topk_accuracy_eval_params()
->clear_k();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoGroundTruthLabels) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = {};
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, KTooLarge) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()->mutable_topk_accuracy_eval_params()->set_k(
10000);
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, WeirdModelOutputShape) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, UnsupportedModelOutputType) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteComplex64;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoInputs) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, InvalidGroundTruth) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[0] = 0.8;
stage.SetEvalInputs(tensor, nullptr);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, FloatTest_CorrectLabelsAtLastIndices) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[4] = 0.9;
tensor[3] = 0.8;
tensor[2] = 0.7;
tensor[1] = 0.6;
tensor[0] = 0.5;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
for (int i = 0; i < 4; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
EXPECT_FLOAT_EQ(0.5, accuracy_metrics.topk_accuracies(3));
for (int i = 0; i < 3; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
}
class CorrectTopkAccuracyEvalTest : public ::testing::Test {
protected:
template <typename T>
void VerifyCorrectBehaviorForType(T ground_truth_0_value,
T ground_truth_1_value,
TfLiteType model_output_type) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
stage.SetTaskInfo(ground_truth_labels, model_output_type,
model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(0, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_EQ(0, accuracy_metrics.topk_accuracies_size());
T array[kNumCategories];
T* tensor = ResetOutputArray(array);
tensor[0] = ground_truth_0_value;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
tensor[1] = ground_truth_1_value;
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
}
};
TEST_F(CorrectTopkAccuracyEvalTest, FloatTest) {
VerifyCorrectBehaviorForType(static_cast<float>(0.8), static_cast<float>(0.9),
kTfLiteFloat32);
}
TEST_F(CorrectTopkAccuracyEvalTest, Int8Test) {
VerifyCorrectBehaviorForType(static_cast<int8_t>(1), static_cast<int8_t>(2),
kTfLiteInt8);
}
TEST_F(CorrectTopkAccuracyEvalTest, UInt8Test) {
VerifyCorrectBehaviorForType(static_cast<uint8_t>(1), static_cast<uint8_t>(2),
kTfLiteUInt8);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
91de9e22-48a4-4a8d-8ce5-150626fc675b | cpp | tensorflow/tensorflow | image_preprocessing_stage | tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage.cc | tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <fstream>
#include <ios>
#include <iterator>
#include <memory>
#include <streambuf>
#include <string>
#include <vector>
#include "absl/base/casts.h"
#include "absl/strings/ascii.h"
#include "jpeglib.h"
#include "tensorflow/core/lib/jpeg/jpeg_mem.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/kernels/internal/reference/pad.h"
#include "tensorflow/lite/kernels/internal/reference/resize_bilinear.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/profiling/time.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/preprocessing_steps.pb.h"
namespace tflite {
namespace evaluation {
namespace {
const int kNumChannels = 3;
inline int ImageArrayOffset(int height, int width, int h, int w, int c) {
return (h * width + w) * kNumChannels + c;
}
struct ImageData {
uint32_t width;
uint32_t height;
std::unique_ptr<std::vector<float>> data;
float GetData(int h, int w, int c) {
return data->at(ImageArrayOffset(height, width, h, w, c));
}
};
inline void LoadImageRaw(std::string* filename, ImageData* image_data) {
std::ifstream stream(filename->c_str(), std::ios::in | std::ios::binary);
std::vector<uint8_t> raw_data((std::istreambuf_iterator<char>(stream)),
std::istreambuf_iterator<char>());
std::vector<float>* orig_image = new std::vector<float>();
orig_image->reserve(raw_data.size());
for (int i = 0; i < raw_data.size(); ++i) {
orig_image->push_back(static_cast<float>(raw_data[i]));
}
image_data->data.reset(orig_image);
}
inline void LoadImageJpeg(std::string* filename, ImageData* image_data) {
std::ifstream t(*filename);
std::string image_str((std::istreambuf_iterator<char>(t)),
std::istreambuf_iterator<char>());
const int fsize = image_str.size();
auto temp = absl::bit_cast<const uint8_t*>(image_str.data());
std::unique_ptr<uint8_t[]> original_image;
int original_width, original_height, original_channels;
tensorflow::jpeg::UncompressFlags flags;
flags.dct_method = JDCT_ISLOW;
flags.components = kNumChannels;
original_image.reset(Uncompress(temp, fsize, flags, &original_width,
&original_height, &original_channels,
nullptr));
image_data->width = original_width;
image_data->height = original_height;
int original_size = original_height * original_width * original_channels;
std::vector<float>* float_image = new std::vector<float>();
float_image->reserve(original_size);
for (int i = 0; i < original_size; ++i) {
float_image->push_back(static_cast<float>(original_image[i]));
}
image_data->data.reset(float_image);
}
inline void Crop(ImageData* image_data, const CroppingParams& crop_params) {
int crop_height, crop_width;
int input_width = image_data->width;
int input_height = image_data->height;
if (crop_params.has_cropping_fraction()) {
crop_height =
static_cast<int>(round(crop_params.cropping_fraction() * input_height));
crop_width =
static_cast<int>(round(crop_params.cropping_fraction() * input_width));
} else if (crop_params.has_target_size()) {
crop_height = crop_params.target_size().height();
crop_width = crop_params.target_size().width();
}
if (crop_params.has_cropping_fraction() && crop_params.square_cropping()) {
crop_height = std::min(crop_height, crop_width);
crop_width = crop_height;
}
int start_w = static_cast<int>(round((input_width - crop_width) / 2.0));
int start_h = static_cast<int>(round((input_height - crop_height) / 2.0));
std::vector<float>* cropped_image = new std::vector<float>();
cropped_image->reserve(crop_height * crop_width * kNumChannels);
for (int in_h = start_h; in_h < start_h + crop_height; ++in_h) {
for (int in_w = start_w; in_w < start_w + crop_width; ++in_w) {
for (int c = 0; c < kNumChannels; ++c) {
cropped_image->push_back(image_data->GetData(in_h, in_w, c));
}
}
}
image_data->height = crop_height;
image_data->width = crop_width;
image_data->data.reset(cropped_image);
}
inline void ResizeBilinear(ImageData* image_data,
const ResizingParams& params) {
tflite::ResizeBilinearParams resize_params;
resize_params.align_corners = false;
resize_params.half_pixel_centers = false;
tflite::RuntimeShape input_shape({1, static_cast<int>(image_data->height),
static_cast<int>(image_data->width),
kNumChannels});
int output_height, output_width;
if (params.aspect_preserving()) {
float ratio_w =
params.target_size().width() / static_cast<float>(image_data->width);
float ratio_h =
params.target_size().height() / static_cast<float>(image_data->height);
if (ratio_w >= ratio_h) {
output_width = params.target_size().width();
output_height = static_cast<int>(round(image_data->height * ratio_w));
} else {
output_width = static_cast<int>(round(image_data->width * ratio_h));
output_height = params.target_size().height();
}
} else {
output_height = params.target_size().height();
output_width = params.target_size().width();
}
tflite::RuntimeShape output_size_dims({1, 1, 1, 2});
std::vector<int32_t> output_size_data = {output_height, output_width};
tflite::RuntimeShape output_shape(
{1, output_height, output_width, kNumChannels});
int output_size = output_width * output_height * kNumChannels;
std::vector<float>* output_data = new std::vector<float>(output_size, 0);
tflite::reference_ops::ResizeBilinear(
resize_params, input_shape, image_data->data->data(), output_size_dims,
output_size_data.data(), output_shape, output_data->data());
image_data->height = output_height;
image_data->width = output_width;
image_data->data.reset(output_data);
}
inline void Pad(ImageData* image_data, const PaddingParams& params) {
int output_width = params.target_size().width();
int output_height = params.target_size().height();
int pad_value = params.padding_value();
tflite::PadParams pad_params;
pad_params.left_padding_count = 4;
std::uninitialized_fill_n(pad_params.left_padding, 4, 0);
pad_params.left_padding[1] =
static_cast<int>(round((output_height - image_data->height) / 2.0));
pad_params.left_padding[2] =
static_cast<int>(round((output_width - image_data->width) / 2.0));
pad_params.right_padding_count = 4;
std::uninitialized_fill_n(pad_params.right_padding, 4, 0);
pad_params.right_padding[1] =
output_height - pad_params.left_padding[1] - image_data->height;
pad_params.right_padding[2] =
output_width - pad_params.left_padding[2] - image_data->width;
tflite::RuntimeShape input_shape({1, static_cast<int>(image_data->height),
static_cast<int>(image_data->width),
kNumChannels});
tflite::RuntimeShape output_shape(
{1, output_height, output_width, kNumChannels});
int output_size = output_width * output_height * kNumChannels;
std::vector<float>* output_data = new std::vector<float>(output_size, 0);
tflite::reference_ops::Pad(pad_params, input_shape, image_data->data->data(),
&pad_value, output_shape, output_data->data());
image_data->height = output_height;
image_data->width = output_width;
image_data->data.reset(output_data);
}
inline void Normalize(ImageData* image_data,
const NormalizationParams& params) {
float scale = params.scale();
float* data_end = image_data->data->data() + image_data->data->size();
if (params.has_channelwise_mean()) {
float mean = params.channelwise_mean();
for (float* data = image_data->data->data(); data < data_end; ++data) {
*data = (*data - mean) * scale;
}
} else {
float r_mean = params.means().r_mean();
float g_mean = params.means().g_mean();
float b_mean = params.means().b_mean();
for (float* data = image_data->data->data(); data < data_end;) {
*data = (*data - r_mean) * scale;
++data;
*data = (*data - g_mean) * scale;
++data;
*data = (*data - b_mean) * scale;
++data;
}
}
}
}
TfLiteStatus ImagePreprocessingStage::Init() {
if (!config_.has_specification() ||
!config_.specification().has_image_preprocessing_params()) {
LOG(ERROR) << "No preprocessing params";
return kTfLiteError;
}
const ImagePreprocessingParams& params =
config_.specification().image_preprocessing_params();
for (const ImagePreprocessingStepParams& param : params.steps()) {
if (param.has_cropping_params()) {
const CroppingParams& crop_params = param.cropping_params();
if (crop_params.has_cropping_fraction() &&
(crop_params.cropping_fraction() <= 0 ||
crop_params.cropping_fraction() > 1.0)) {
LOG(ERROR) << "Invalid cropping fraction";
return kTfLiteError;
}
}
}
output_type_ = static_cast<TfLiteType>(params.output_type());
return kTfLiteOk;
}
TfLiteStatus ImagePreprocessingStage::Run() {
if (!image_path_) {
LOG(ERROR) << "Image path not set";
return kTfLiteError;
}
ImageData image_data;
const ImagePreprocessingParams& params =
config_.specification().image_preprocessing_params();
int64_t start_us = profiling::time::NowMicros();
string image_ext = image_path_->substr(image_path_->find_last_of("."));
absl::AsciiStrToLower(&image_ext);
bool is_raw_image = (image_ext == ".rgb8");
if (image_ext == ".rgb8") {
LoadImageRaw(image_path_, &image_data);
} else if (image_ext == ".jpg" || image_ext == ".jpeg") {
LoadImageJpeg(image_path_, &image_data);
} else {
LOG(ERROR) << "Extension " << image_ext << " is not supported";
return kTfLiteError;
}
for (const ImagePreprocessingStepParams& param : params.steps()) {
if (param.has_cropping_params()) {
if (is_raw_image) {
LOG(WARNING) << "Image cropping will not be performed on raw images";
continue;
}
Crop(&image_data, param.cropping_params());
} else if (param.has_resizing_params()) {
if (is_raw_image) {
LOG(WARNING) << "Image resizing will not be performed on raw images";
continue;
}
ResizeBilinear(&image_data, param.resizing_params());
} else if (param.has_padding_params()) {
if (is_raw_image) {
LOG(WARNING) << "Image padding will not be performed on raw images";
continue;
}
Pad(&image_data, param.padding_params());
} else if (param.has_normalization_params()) {
Normalize(&image_data, param.normalization_params());
}
}
if (output_type_ == kTfLiteUInt8) {
uint8_preprocessed_image_.clear();
uint8_preprocessed_image_.resize(image_data.data->size() +
16);
for (int i = 0; i < image_data.data->size(); ++i) {
uint8_preprocessed_image_[i] =
static_cast<uint8_t>(image_data.data->at(i));
}
} else if (output_type_ == kTfLiteInt8) {
int8_preprocessed_image_.clear();
int8_preprocessed_image_.resize(image_data.data->size() +
16);
for (int i = 0; i < image_data.data->size(); ++i) {
int8_preprocessed_image_[i] = static_cast<int8_t>(image_data.data->at(i));
}
} else if (output_type_ == kTfLiteFloat32) {
float_preprocessed_image_ = *image_data.data;
}
latency_stats_.UpdateStat(profiling::time::NowMicros() - start_us);
return kTfLiteOk;
}
void* ImagePreprocessingStage::GetPreprocessedImageData() {
if (latency_stats_.count() == 0) return nullptr;
if (output_type_ == kTfLiteUInt8) {
return uint8_preprocessed_image_.data();
} else if (output_type_ == kTfLiteInt8) {
return int8_preprocessed_image_.data();
} else if (output_type_ == kTfLiteFloat32) {
return float_preprocessed_image_.data();
}
return nullptr;
}
EvaluationStageMetrics ImagePreprocessingStage::LatestMetrics() {
EvaluationStageMetrics metrics;
auto* latency_metrics =
metrics.mutable_process_metrics()->mutable_total_latency();
latency_metrics->set_last_us(latency_stats_.newest());
latency_metrics->set_max_us(latency_stats_.max());
latency_metrics->set_min_us(latency_stats_.min());
latency_metrics->set_sum_us(latency_stats_.sum());
latency_metrics->set_avg_us(latency_stats_.avg());
metrics.set_num_runs(static_cast<int>(latency_stats_.count()));
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kImagePreprocessingStageName[] = "inception_preprocessing_stage";
constexpr char kTestImage[] =
"tensorflow/lite/tools/evaluation/stages/testdata/"
"grace_hopper.jpg";
constexpr int kImageDim = 224;
TEST(ImagePreprocessingStage, NoParams) {
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
EvaluationStageConfig config = builder.build();
config.mutable_specification()->clear_image_preprocessing_params();
ImagePreprocessingStage stage = ImagePreprocessingStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(ImagePreprocessingStage, InvalidCroppingFraction) {
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddCroppingStep(-0.8);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(ImagePreprocessingStage, ImagePathNotSet) {
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.Run(), kTfLiteError);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingFloat) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddNormalizationStep(127.5, 1.0 / 127.5);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
float* preprocessed_image_ptr =
static_cast<float*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[0], -0.74901962);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[1], -0.74901962);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[2], -0.68627453);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingFloat_NoCrop) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddResizingStep(224, 224, false);
builder.AddNormalizationStep(127.5, 1.0 / 127.5);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
float* preprocessed_image_ptr =
static_cast<float*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[0], -0.83529419);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[1], -0.7960785);
EXPECT_FLOAT_EQ(preprocessed_image_ptr[2], -0.35686275);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingUInt8Quantized) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteUInt8);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
uint8_t* preprocessed_image_ptr =
static_cast<uint8_t*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], 32);
EXPECT_EQ(preprocessed_image_ptr[1], 32);
EXPECT_EQ(preprocessed_image_ptr[2], 40);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingInt8Quantized) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteInt8);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddNormalizationStep(128.0, 1.0);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
int8_t* preprocessed_image_ptr =
static_cast<int8_t*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], -96);
EXPECT_EQ(preprocessed_image_ptr[1], -96);
EXPECT_EQ(preprocessed_image_ptr[2], -88);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingPadding) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteInt8);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddPaddingStep(225, 225, 0);
builder.AddNormalizationStep(128.0, 1.0);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
int8_t* preprocessed_image_ptr =
static_cast<int8_t*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], -128);
EXPECT_EQ(preprocessed_image_ptr[224], -128);
EXPECT_EQ(preprocessed_image_ptr[225 * 3], -128);
EXPECT_EQ(preprocessed_image_ptr[225 * 3 + 3], -96);
EXPECT_EQ(preprocessed_image_ptr[225 * 3 + 4], -96);
EXPECT_EQ(preprocessed_image_ptr[225 * 3 + 5], -88);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
TEST(ImagePreprocessingStage, TestImagePreprocessingSubtractMean) {
std::string image_path = kTestImage;
ImagePreprocessingConfigBuilder builder(kImagePreprocessingStageName,
kTfLiteFloat32);
builder.AddCroppingStep(0.875);
builder.AddResizingStep(224, 224, false);
builder.AddPerChannelNormalizationStep(110.0, 120.0, 123.0, 1.0);
ImagePreprocessingStage stage = ImagePreprocessingStage(builder.build());
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.GetPreprocessedImageData(), nullptr);
stage.SetImagePath(&image_path);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
float* preprocessed_image_ptr =
static_cast<float*>(stage.GetPreprocessedImageData());
EXPECT_NE(preprocessed_image_ptr, nullptr);
EXPECT_EQ(preprocessed_image_ptr[0], -78);
EXPECT_EQ(preprocessed_image_ptr[1], -88);
EXPECT_EQ(preprocessed_image_ptr[2], -83);
EXPECT_EQ(metrics.num_runs(), 1);
const auto& last_latency =
metrics.process_metrics().total_latency().last_us();
EXPECT_GT(last_latency, 0);
EXPECT_LT(last_latency, 1e7);
EXPECT_EQ(metrics.process_metrics().total_latency().max_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().min_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().sum_us(), last_latency);
EXPECT_EQ(metrics.process_metrics().total_latency().avg_us(), last_latency);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/image_preprocessing_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cadcf136-1281-4209-bb13-7b782228d794 | cpp | tensorflow/tensorflow | object_detection_average_precision_stage | tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.cc | tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.h"
#include <stdint.h>
#include <numeric>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h"
namespace tflite {
namespace evaluation {
namespace {
image::Detection ConvertProtoToDetection(
const ObjectDetectionResult::ObjectInstance& input, int image_id) {
image::Detection detection;
detection.box.x.min = input.bounding_box().normalized_left();
detection.box.x.max = input.bounding_box().normalized_right();
detection.box.y.min = input.bounding_box().normalized_top();
detection.box.y.max = input.bounding_box().normalized_bottom();
detection.imgid = image_id;
detection.score = input.score();
return detection;
}
}
TfLiteStatus ObjectDetectionAveragePrecisionStage::Init() {
num_classes_ = config_.specification()
.object_detection_average_precision_params()
.num_classes();
if (num_classes_ <= 0) {
LOG(ERROR) << "num_classes cannot be <= 0";
return kTfLiteError;
}
for (int i = 0; i < num_classes_; ++i) {
ground_truth_object_vectors_.emplace_back();
predicted_object_vectors_.emplace_back();
}
return kTfLiteOk;
}
TfLiteStatus ObjectDetectionAveragePrecisionStage::Run() {
for (int i = 0; i < ground_truth_objects_.objects_size(); ++i) {
const int class_id = ground_truth_objects_.objects(i).class_id();
if (class_id >= num_classes_) {
LOG(ERROR) << "Encountered invalid class ID: " << class_id;
return kTfLiteError;
}
ground_truth_object_vectors_[class_id].push_back(ConvertProtoToDetection(
ground_truth_objects_.objects(i), current_image_index_));
}
for (int i = 0; i < predicted_objects_.objects_size(); ++i) {
const int class_id = predicted_objects_.objects(i).class_id();
if (class_id >= num_classes_) {
LOG(ERROR) << "Encountered invalid class ID: " << class_id;
return kTfLiteError;
}
predicted_object_vectors_[class_id].push_back(ConvertProtoToDetection(
predicted_objects_.objects(i), current_image_index_));
}
current_image_index_++;
return kTfLiteOk;
}
EvaluationStageMetrics ObjectDetectionAveragePrecisionStage::LatestMetrics() {
EvaluationStageMetrics metrics;
if (current_image_index_ == 0) return metrics;
metrics.set_num_runs(current_image_index_);
auto* ap_metrics = metrics.mutable_process_metrics()
->mutable_object_detection_average_precision_metrics();
auto& ap_params =
config_.specification().object_detection_average_precision_params();
std::vector<float> iou_thresholds;
if (ap_params.iou_thresholds_size() == 0) {
float threshold = 0.5;
for (int i = 0; i < 10; ++i) {
iou_thresholds.push_back(threshold + i * 0.05);
}
} else {
for (auto& threshold : ap_params.iou_thresholds()) {
iou_thresholds.push_back(threshold);
}
}
image::AveragePrecision::Options opts;
opts.num_recall_points = ap_params.num_recall_points();
float ap_sum = 0;
int num_total_aps = 0;
for (float threshold : iou_thresholds) {
float threshold_ap_sum = 0;
int num_counted_classes = 0;
for (int i = 0; i < num_classes_; ++i) {
if (ground_truth_object_vectors_[i].empty() &&
predicted_object_vectors_[i].empty())
continue;
float ap_value = 0.0;
if (!ground_truth_object_vectors_[i].empty()) {
opts.iou_threshold = threshold;
ap_value = image::AveragePrecision(opts).FromBoxes(
ground_truth_object_vectors_[i], predicted_object_vectors_[i]);
}
ap_sum += ap_value;
num_total_aps += 1;
threshold_ap_sum += ap_value;
num_counted_classes += 1;
}
if (num_counted_classes == 0) continue;
auto* threshold_ap = ap_metrics->add_individual_average_precisions();
threshold_ap->set_average_precision(threshold_ap_sum / num_counted_classes);
threshold_ap->set_iou_threshold(threshold);
}
if (num_total_aps == 0) return metrics;
ap_metrics->set_overall_mean_average_precision(ap_sum / num_total_aps);
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kAveragePrecisionStageName[] =
"object_detection_average_precision";
EvaluationStageConfig GetAveragePrecisionStageConfig(int num_classes) {
EvaluationStageConfig config;
config.set_name(kAveragePrecisionStageName);
auto* params = config.mutable_specification()
->mutable_object_detection_average_precision_params();
params->add_iou_thresholds(0.5);
params->add_iou_thresholds(0.999);
params->set_num_classes(num_classes);
return config;
}
ObjectDetectionResult GetGroundTruthDetectionResult() {
ObjectDetectionResult ground_truth;
ground_truth.set_image_name("some_image.jpg");
auto* object_1 = ground_truth.add_objects();
object_1->set_class_id(1);
auto* object_1_bbox = object_1->mutable_bounding_box();
object_1_bbox->set_normalized_top(0.5);
object_1_bbox->set_normalized_bottom(1.0);
object_1_bbox->set_normalized_left(0.5);
object_1_bbox->set_normalized_right(1.0);
auto* object_2 = ground_truth.add_objects();
object_2->set_class_id(1);
auto* object_2_bbox = object_2->mutable_bounding_box();
object_2_bbox->set_normalized_top(0);
object_2_bbox->set_normalized_bottom(1.0);
object_2_bbox->set_normalized_left(0);
object_2_bbox->set_normalized_right(1.0);
auto* object_3 = ground_truth.add_objects();
object_3->set_class_id(2);
auto* object_3_bbox = object_3->mutable_bounding_box();
object_3_bbox->set_normalized_top(0.5);
object_3_bbox->set_normalized_bottom(1.0);
object_3_bbox->set_normalized_left(0.5);
object_3_bbox->set_normalized_right(1.0);
return ground_truth;
}
ObjectDetectionResult GetPredictedDetectionResult() {
ObjectDetectionResult predicted;
auto* object_1 = predicted.add_objects();
object_1->set_class_id(1);
object_1->set_score(0.8);
auto* object_1_bbox = object_1->mutable_bounding_box();
object_1_bbox->set_normalized_top(0.091);
object_1_bbox->set_normalized_bottom(1.0);
object_1_bbox->set_normalized_left(0.091);
object_1_bbox->set_normalized_right(1.0);
auto* object_2 = predicted.add_objects();
object_2->set_class_id(1);
object_2->set_score(0.9);
auto* object_2_bbox = object_2->mutable_bounding_box();
object_2_bbox->set_normalized_top(0.474);
object_2_bbox->set_normalized_bottom(1.0);
object_2_bbox->set_normalized_left(0.474);
object_2_bbox->set_normalized_right(1.0);
auto* object_3 = predicted.add_objects();
object_3->set_class_id(1);
object_3->set_score(0.95);
auto* object_3_bbox = object_3->mutable_bounding_box();
object_3_bbox->set_normalized_top(0.474);
object_3_bbox->set_normalized_bottom(1.0);
object_3_bbox->set_normalized_left(0.474);
object_3_bbox->set_normalized_right(1.0);
return predicted;
}
TEST(ObjectDetectionAveragePrecisionStage, ZeroClasses) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(0);
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(ObjectDetectionAveragePrecisionStage, SampleInputs) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(3);
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const ObjectDetectionResult ground_truth = GetGroundTruthDetectionResult();
const ObjectDetectionResult predicted = GetPredictedDetectionResult();
stage.SetEvalInputs(ObjectDetectionResult(), ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
ObjectDetectionAveragePrecisionMetrics detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(), 0.0);
EXPECT_EQ(detection_metrics.individual_average_precisions_size(), 2);
stage.SetEvalInputs(ground_truth, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(),
0.50495052);
EXPECT_EQ(metrics.num_runs(), 2);
stage.SetEvalInputs(predicted, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).iou_threshold(), 0.5);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).average_precision(),
0.4841584);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(1).iou_threshold(),
0.999);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(1).average_precision(),
0.33663365);
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(),
0.41039604);
}
TEST(ObjectDetectionAveragePrecisionStage, DefaultIoUThresholds) {
EvaluationStageConfig config = GetAveragePrecisionStageConfig(3);
auto* params = config.mutable_specification()
->mutable_object_detection_average_precision_params();
params->clear_iou_thresholds();
ObjectDetectionAveragePrecisionStage stage =
ObjectDetectionAveragePrecisionStage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const ObjectDetectionResult ground_truth = GetGroundTruthDetectionResult();
const ObjectDetectionResult predicted = GetPredictedDetectionResult();
stage.SetEvalInputs(ground_truth, ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
ObjectDetectionAveragePrecisionMetrics detection_metrics =
metrics.process_metrics().object_detection_average_precision_metrics();
EXPECT_FLOAT_EQ(detection_metrics.overall_mean_average_precision(), 1.0);
EXPECT_EQ(detection_metrics.individual_average_precisions_size(), 10);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(0).iou_threshold(), 0.5);
EXPECT_FLOAT_EQ(
detection_metrics.individual_average_precisions(9).iou_threshold(), 0.95);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/object_detection_average_precision_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
63ccecee-2364-4308-a602-e2ce86fff0bc | cpp | tensorflow/tensorflow | image_metrics | tensorflow/lite/tools/evaluation/stages/utils/image_metrics.cc | tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc | #include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h"
#include <algorithm>
#include <cmath>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/logging.h"
namespace tflite {
namespace evaluation {
namespace image {
float Box2D::Length(const Box2D::Interval& a) {
return std::max(0.f, a.max - a.min);
}
float Box2D::Intersection(const Box2D::Interval& a, const Box2D::Interval& b) {
return Length(Interval{std::max(a.min, b.min), std::min(a.max, b.max)});
}
float Box2D::Area() const { return Length(x) * Length(y); }
float Box2D::Intersection(const Box2D& other) const {
return Intersection(x, other.x) * Intersection(y, other.y);
}
float Box2D::Union(const Box2D& other) const {
return Area() + other.Area() - Intersection(other);
}
float Box2D::IoU(const Box2D& other) const {
const float total = Union(other);
if (total > 0) {
return Intersection(other) / total;
} else {
return 0.0;
}
}
float Box2D::Overlap(const Box2D& other) const {
const float intersection = Intersection(other);
return intersection > 0 ? intersection / Area() : 0.0;
}
float AveragePrecision::FromPRCurve(const std::vector<PR>& pr,
std::vector<PR>* pr_out) {
float p = 0;
float sum = 0;
int r_level = opts_.num_recall_points;
for (int i = pr.size() - 1; i >= 0; --i) {
const PR& item = pr[i];
if (i > 0) {
if (item.r < pr[i - 1].r) {
LOG(ERROR) << "recall points are not in order: " << pr[i - 1].r << ", "
<< item.r;
return 0;
}
}
while (item.r * opts_.num_recall_points < r_level) {
const float recall =
static_cast<float>(r_level) / opts_.num_recall_points;
if (r_level < 0) {
LOG(ERROR) << "Number of recall points should be > 0";
return 0;
}
sum += p;
r_level -= 1;
if (pr_out != nullptr) {
pr_out->emplace_back(p, recall);
}
}
p = std::max(p, item.p);
}
for (; r_level >= 0; --r_level) {
const float recall = static_cast<float>(r_level) / opts_.num_recall_points;
sum += p;
if (pr_out != nullptr) {
pr_out->emplace_back(p, recall);
}
}
return sum / (1 + opts_.num_recall_points);
}
float AveragePrecision::FromBoxes(const std::vector<Detection>& groundtruth,
const std::vector<Detection>& prediction,
std::vector<PR>* pr_out) {
absl::flat_hash_map<int64_t, std::list<Detection>> gt;
int num_gt = 0;
for (auto& box : groundtruth) {
gt[box.imgid].push_back(box);
if (!box.difficult && box.ignore == kDontIgnore) {
++num_gt;
}
}
if (num_gt == 0) {
return NAN;
}
std::vector<Detection> pd = prediction;
std::sort(pd.begin(), pd.end(), [](const Detection& a, const Detection& b) {
return a.score > b.score;
});
std::vector<PR> pr;
int correct = 0;
int num_pd = 0;
for (int i = 0; i < pd.size(); ++i) {
const Detection& b = pd[i];
auto* g = >[b.imgid];
auto best = g->end();
float best_iou = -INFINITY;
for (auto it = g->begin(); it != g->end(); ++it) {
const auto iou = b.box.IoU(it->box);
if (iou > best_iou) {
best = it;
best_iou = iou;
}
}
if ((best != g->end()) && (best_iou >= opts_.iou_threshold)) {
if (best->difficult) {
continue;
}
switch (best->ignore) {
case kDontIgnore: {
++correct;
++num_pd;
g->erase(best);
pr.push_back({static_cast<float>(correct) / num_pd,
static_cast<float>(correct) / num_gt});
break;
}
case kIgnoreOneMatch: {
g->erase(best);
break;
}
case kIgnoreAllMatches: {
break;
}
}
} else {
++num_pd;
pr.push_back({static_cast<float>(correct) / num_pd,
static_cast<float>(correct) / num_gt});
}
}
return FromPRCurve(pr, pr_out);
}
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h"
#include <stdint.h>
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <gtest/gtest.h>
namespace tflite {
namespace evaluation {
namespace image {
float MaxP(float minr, const std::vector<PR>& prs) {
float p = 0;
for (auto& pr : prs) {
if (pr.r >= minr) p = std::max(p, pr.p);
}
return p;
}
float ExpectedAP(const std::vector<PR>& prs) {
float sum = 0;
for (float r = 0; r <= 1.0; r += 0.01) {
sum += MaxP(r, prs);
}
return sum / 101;
}
float GenerateRandomFraction() {
return static_cast<float>(std::rand()) / RAND_MAX;
}
TEST(ImageMetricsTest, APBasic) {
std::vector<PR> prs;
prs = {{1., 1.}, {0.5, 1.0}, {1 / 3, 1.0}};
EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6);
prs = {{1.0, 0.01}};
EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6);
prs = {{1.0, 0.2}, {1.0, 0.4}, {0.67, 0.4}, {0.5, 0.4}, {0.4, 0.4},
{0.5, 0.6}, {0.57, 0.8}, {0.5, 0.8}, {0.44, 0.8}, {0.5, 1.0}};
EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6);
}
TEST(ImageMetricsTest, APRandom) {
std::vector<PR> prs;
for (int i = 0; i < 5000; ++i) {
float p = GenerateRandomFraction();
float r = GenerateRandomFraction();
prs.push_back({p, r});
}
const float expected = ExpectedAP(prs);
std::sort(std::begin(prs), std::end(prs),
[](const PR& a, const PR& b) { return a.r < b.r; });
const float actual = AveragePrecision().FromPRCurve(prs);
EXPECT_NEAR(expected, actual, 1e-5);
}
TEST(ImageMetricsTest, BBoxAPBasic) {
std::vector<Detection> gt;
gt.push_back(Detection({false, 100, 1, {{0, 1}, {0, 1}}}));
gt.push_back(Detection({false, 200, 1, {{1, 2}, {1, 2}}}));
std::vector<Detection> pd;
pd.push_back(Detection({false, 100, 0.8, {{0.1, 1.1}, {0.1, 1.1}}}));
pd.push_back(Detection({false, 200, 0.8, {{0.9, 1.9}, {0.9, 1.9}}}));
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
AveragePrecision::Options opts;
opts.iou_threshold = 0.85;
EXPECT_NEAR(0.0, AveragePrecision(opts).FromBoxes(gt, pd), 1e-6);
}
TEST(ImageMetricsTest, Box2DOverlap) {
Box2D a({{0, 1}, {0, 1}});
Box2D b({{0.5, 2.5}, {0.5, 2.5}});
EXPECT_NEAR(0.25, a.Overlap(b), 1e-6);
EXPECT_NEAR(0.0625, b.Overlap(a), 1e-6);
}
TEST(ImageMetricsTest, BBoxAPwithIgnoredGroundTruth) {
std::vector<Detection> gt;
std::vector<Detection> pd;
gt.push_back(Detection({false, 100, 1, {{1, 2}, {1, 2}}, kIgnoreOneMatch}));
pd.push_back(Detection({false, 100, 0.8, {{0.1, 1.1}, {0.1, 1.1}}}));
EXPECT_TRUE(std::isnan(AveragePrecision().FromBoxes(gt, pd)));
gt.push_back({false, 100, 1, {{0, 1}, {0, 1}}});
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
pd.push_back({false, 100, 0.9, {{0.9, 1.9}, {0.9, 1.9}}});
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
pd.push_back({false, 100, 0.95, {{0.9, 1.9}, {0.9, 1.9}}});
EXPECT_NEAR(0.5, AveragePrecision().FromBoxes(gt, pd), 1e-6);
gt[0].ignore = kIgnoreAllMatches;
EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6);
}
TEST(ImageMetricsTest, BBoxAPRandom) {
auto rand = [](int64_t id) {
auto xmin = GenerateRandomFraction();
auto xmax = xmin + GenerateRandomFraction();
auto ymin = GenerateRandomFraction();
auto ymax = ymin + GenerateRandomFraction();
return Detection(
{false, id, GenerateRandomFraction(), {{xmin, xmax}, {ymin, ymax}}});
};
std::vector<Detection> gt;
for (int i = 0; i < 100; ++i) {
gt.push_back(rand(i % 10));
}
std::vector<Detection> pd = gt;
for (int i = 0; i < 10000; ++i) {
pd.push_back(rand(i % 10));
}
std::vector<PR> pr;
AveragePrecision().FromBoxes(gt, pd, &pr);
EXPECT_EQ(101, pr.size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/utils/image_metrics.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5dee0d93-d903-4902-acfb-be17f27a8659 | cpp | tensorflow/tensorflow | delegate_provider | tensorflow/lite/tools/delegates/delegate_provider.cc | tensorflow/lite/tools/delegates/delegate_provider_test.cc | #include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace tflite {
namespace tools {
TfLiteDelegatePtr CreateNullDelegate() {
return TfLiteDelegatePtr(nullptr, [](TfLiteOpaqueDelegate*) {});
}
void ProvidedDelegateList::AddAllDelegateParams() const {
for (const auto& provider : providers_) {
params_->Merge(provider->DefaultParams());
}
}
void ProvidedDelegateList::AppendCmdlineFlags(std::vector<Flag>& flags) const {
for (const auto& provider : providers_) {
auto delegate_flags = provider->CreateFlags(params_);
flags.insert(flags.end(), delegate_flags.begin(), delegate_flags.end());
}
}
void ProvidedDelegateList::RemoveCmdlineFlag(std::vector<Flag>& flags,
const std::string& name) const {
decltype(flags.begin()) it;
for (it = flags.begin(); it < flags.end();) {
if (it->GetFlagName() == name) {
it = flags.erase(it);
} else {
++it;
}
}
}
std::vector<ProvidedDelegateList::ProvidedDelegate>
ProvidedDelegateList::CreateAllRankedDelegates(const ToolParams& params) const {
std::vector<ProvidedDelegateList::ProvidedDelegate> delegates;
for (const auto& provider : providers_) {
auto ptr_rank = provider->CreateRankedTfLiteDelegate(params);
if (ptr_rank.first == nullptr) continue;
static bool already_logged = false;
if (!already_logged) {
TFLITE_LOG(INFO) << provider->GetName() << " delegate created.";
#ifndef NDEBUG
provider->LogParams(params, false);
#endif
already_logged = true;
}
ProvidedDelegateList::ProvidedDelegate info;
info.provider = provider.get();
info.delegate = std::move(ptr_rank.first);
info.rank = ptr_rank.second;
delegates.emplace_back(std::move(info));
}
std::sort(delegates.begin(), delegates.end(),
[](const ProvidedDelegateList::ProvidedDelegate& a,
const ProvidedDelegateList::ProvidedDelegate& b) {
return a.rank < b.rank;
});
return delegates;
}
}
} | #include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/test_util.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
TEST(ProvidedDelegateListTest, AddAllDelegateParams) {
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
#if !TFLITE_WITH_STABLE_ABI
EXPECT_TRUE(params.HasParam("use_nnapi"));
#endif
}
TEST(ProvidedDelegateListTest, AppendCmdlineFlags) {
std::vector<Flag> flags;
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
providers.AppendCmdlineFlags(flags);
EXPECT_FALSE(flags.empty());
}
TEST(KernelTestDelegateProvidersTest, CreateAllRankedDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
#if TFLITE_WITH_STABLE_ABI
ASSERT_EQ(TfLiteInitializeShimsForTest(), 0);
params.Set<bool>("use_xnnpack", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("XNNPACK", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
#else
params.Set<bool>("use_xnnpack", true, 2);
params.Set<bool>("use_dummy_delegate", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(2, delegates.size());
EXPECT_EQ("DummyDelegate", delegates.front().provider->GetName());
EXPECT_EQ(1, delegates.front().rank);
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ("XNNPACK", delegates.back().provider->GetName());
EXPECT_NE(nullptr, delegates.back().delegate.get());
EXPECT_EQ(2, delegates.back().rank);
#endif
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
924450a7-b081-4c29-b3f6-2bb71a1b42d4 | cpp | tensorflow/tensorflow | xnnpack_delegate_provider | tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc | tensorflow/lite/tools/delegates/xnnpack_delegate_provider_test.cc | #include <string>
#include <utility>
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace tools {
class XnnpackDelegateProvider : public DelegateProvider {
public:
XnnpackDelegateProvider() {
default_params_.AddParam("use_xnnpack", ToolParam::Create<bool>(false));
default_params_.AddParam("xnnpack_force_fp16",
ToolParam::Create<bool>(false));
default_params_.AddParam("xnnpack_weight_cache_file_path",
ToolParam::Create<std::string>(""));
}
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const ToolParams& params, bool verbose) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate(
const ToolParams& params) const final;
std::string GetName() const final { return "XNNPACK"; }
};
REGISTER_DELEGATE_PROVIDER(XnnpackDelegateProvider);
std::vector<Flag> XnnpackDelegateProvider::CreateFlags(
ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<bool>("use_xnnpack", params,
"explicitly apply the XNNPACK delegate. Note the "
"XNNPACK delegate could "
"be implicitly applied by the TF Lite runtime "
"regardless the value of "
"this parameter. To disable this implicit application, "
"set the value to "
"false explicitly."),
CreateFlag<bool>("xnnpack_force_fp16", params,
"enforce float16 inference."),
CreateFlag<std::string>("xnnpack_weight_cache_file_path", params,
"enable file-backed weight caching."),
};
return flags;
}
void XnnpackDelegateProvider::LogParams(const ToolParams& params,
bool verbose) const {
LOG_TOOL_PARAM(params, bool, "use_xnnpack", "Use xnnpack", verbose);
LOG_TOOL_PARAM(params, bool, "xnnpack_force_fp16", "xnnpack_force_fp16",
verbose);
LOG_TOOL_PARAM(params, std::string, "xnnpack_weight_cache_file_path",
"xnnpack_weight_cache_file_path", verbose);
}
TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate(
const ToolParams& params) const {
if (params.Get<bool>("use_xnnpack")) {
return evaluation::CreateXNNPACKDelegate(
params.Get<int32_t>("num_threads"),
params.Get<bool>("xnnpack_force_fp16"),
params.Get<std::string>("xnnpack_weight_cache_file_path").c_str());
}
return CreateNullDelegate();
}
std::pair<TfLiteDelegatePtr, int>
XnnpackDelegateProvider::CreateRankedTfLiteDelegate(
const ToolParams& params) const {
auto ptr = CreateTfLiteDelegate(params);
return std::make_pair(std::move(ptr),
params.GetPosition<bool>("use_xnnpack"));
}
}
} | #include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
TEST(XNNPackDelegateProviderTest, Test) {
const std::string kFakeCacheParam =
testing::TempDir() + "/XNNPackDelegateProviderTest.xnnpack_cache";
const auto& providers = GetRegisteredDelegateProviders();
ASSERT_EQ(providers.size(), 1);
ToolParams params;
const auto& xnnpack_provider = providers[0];
ASSERT_NE(xnnpack_provider, nullptr);
params.Merge(xnnpack_provider->DefaultParams());
params.AddParam("num_threads", ToolParam::Create<int32_t>(-1));
EXPECT_TRUE(params.HasParam("use_xnnpack"));
EXPECT_FALSE(params.HasValueSet<bool>("use_xnnpack"));
ASSERT_NE(params.GetParam("use_xnnpack"), nullptr);
EXPECT_TRUE(params.HasParam("xnnpack_force_fp16"));
EXPECT_FALSE(params.HasValueSet<bool>("xnnpack_force_fp16"));
ASSERT_NE(params.GetParam("xnnpack_force_fp16"), nullptr);
EXPECT_TRUE(params.HasParam("xnnpack_weight_cache_file_path"));
EXPECT_FALSE(
params.HasValueSet<std::string>("xnnpack_weight_cache_file_path"));
ASSERT_NE(params.GetParam("xnnpack_weight_cache_file_path"), nullptr);
params.Set<bool>("use_xnnpack", true, 0);
{
TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params);
const TfLiteXNNPackDelegateOptions* options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_NE(options, nullptr);
EXPECT_EQ(options->weight_cache_file_path, nullptr);
}
params.Set<bool>("xnnpack_force_fp16", true, 1);
params.Set<std::string>("xnnpack_weight_cache_file_path", kFakeCacheParam,
2);
{
TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params);
const TfLiteXNNPackDelegateOptions* options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_NE(options, nullptr);
EXPECT_THAT(options->weight_cache_file_path,
testing::StrEq(kFakeCacheParam));
EXPECT_TRUE(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/xnnpack_delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6824ada7-01ad-4c47-ba4e-358ca1192093 | cpp | tensorflow/tensorflow | gpu_delegate_compatibility_checker | tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.cc | tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker_test.cc | #include "tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.h"
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "absl/status/status.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace tools {
namespace {
void convertToValidationFailureType(absl::Status status,
proto::OpCompatibilityResult* op_result) {
auto compatibility_failure = op_result->add_compatibility_failures();
compatibility_failure->set_description(std::string(status.message()));
switch (status.code()) {
case absl::StatusCode::kInvalidArgument:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INVALID_ARGUMENT);
break;
case absl::StatusCode::kUnimplemented:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNIMPLEMENTED_ERROR);
break;
case absl::StatusCode::kInternal:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
break;
case absl::StatusCode::kOutOfRange:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_OUT_OF_RANGE);
break;
default:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
compatibility_failure->set_description(
"Unknown validation failure type.");
}
}
}
std::unordered_map<std::string, std::string>
tools::GpuDelegateCompatibilityChecker::getDccConfigurations() {
return {};
}
absl::Status tools::GpuDelegateCompatibilityChecker::setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) {
return absl::OkStatus();
}
absl::Status
tools::GpuDelegateCompatibilityChecker::checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) {
return absl::UnimplementedError(
"Online mode is not supported on GPU delegate compatibility checker.");
}
absl::Status tools::GpuDelegateCompatibilityChecker::checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) {
auto status = CheckGpuDelegateCompatibility(op_sig);
if (!status.ok()) {
convertToValidationFailureType(status, op_result);
op_result->set_is_supported(false);
} else {
op_result->set_is_supported(true);
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
namespace tflite {
namespace tools {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
}
TEST(GpuDelegateCompatibilityCheckerTest, CheckOnlineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_EQ(
gpu_dcc
.checkModelCompatibilityOnline(fb_model.get(), &compatibility_result)
.code(),
absl::StatusCode::kUnimplemented);
}
TEST(GpuDelegateCompatibilityCheckerTest, CompatibleModelOfflineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_OK(gpu_dcc.checkModelCompatibilityOffline(fb_model.get(),
&compatibility_result));
for (auto op_compatibility_result :
compatibility_result.compatibility_results()) {
EXPECT_TRUE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result.compatibility_results_size(), 2);
}
TEST(GpuDelegateCompatibilityCheckerTest, IncompatibleModelOfflineMode) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv3d_huge_im2col.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_OK(gpu_dcc.checkModelCompatibilityOffline(fb_model.get(),
&compatibility_result));
for (auto op_compatibility_result :
compatibility_result.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result.compatibility_results_size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd52ecf5-ef65-40cf-bffe-d2d368a8ecdf | cpp | tensorflow/tensorflow | nnapi_compatibility_lib | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib.cc | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib_test.cc | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib.h"
#include <map>
#include <utility>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace tools {
using ::tflite::delegate::nnapi::NNAPIValidationFailure;
TfLiteStatus CheckCompatibility(
TfLiteContext* context, int32_t runtime_feature_level,
std::vector<int>* supported_nodes,
std::map<int, std::vector<NNAPIValidationFailure>>* failures_by_node) {
if (!context) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_ERROR, "Context is nullptr.");
return kTfLiteError;
}
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
for (int node_index : TfLiteIntArrayView(execution_plan)) {
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Node index: %d", node_index);
TfLiteNode* node;
TfLiteRegistration* registration;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_index, &node, ®istration));
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
if (NNAPIDelegateKernel::Validate(
context, registration, runtime_feature_level, node,
true,
nullptr, &map_failures)) {
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Built-in Code: %d",
registration->builtin_code);
if (supported_nodes) {
supported_nodes->push_back(node_index);
}
} else {
if (failures_by_node) {
(*failures_by_node)[node_index] = std::move(map_failures);
}
}
}
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib.h"
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace tools {
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type,
CompatibilityCheckerDelegate* checker_delegate) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
SetDelegate(checker_delegate);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
}
TEST(NnapiDelegateCompabilityTest, InvalidInput) {
EXPECT_EQ(CheckCompatibility(nullptr, 0, nullptr, nullptr), kTfLiteError);
}
TEST(NnapiDelegateCompabilityTest, CompatibleModel) {
CompatibilityCheckerDelegate checker_delegate(
tflite::delegate::nnapi::kMinSdkVersionForNNAPI13);
AddOpModel add_op_model(
{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE, &checker_delegate);
EXPECT_EQ(checker_delegate.GetSupportedNodes().size(), 1);
EXPECT_EQ(checker_delegate.GetFailuresByNode().size(), 0);
}
TEST(NnapiDelegateCompabilityTest, IncompatibleModel) {
CompatibilityCheckerDelegate checker_delegate(
tflite::delegate::nnapi::kMinSdkVersionForNNAPI13);
AddOpModel add_op_model(
{TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_RELU_N1_TO_1,
&checker_delegate);
EXPECT_EQ(checker_delegate.GetSupportedNodes().size(), 0);
EXPECT_EQ(checker_delegate.GetFailuresByNode().size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_compatibility_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02ed9897-f442-4266-a42c-c0acb98922cf | cpp | tensorflow/tensorflow | nnapi_delegate_compatibility_checker | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.cc | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker_test.cc | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.h"
#include <cctype>
#include <cstdlib>
#include <functional>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/delegate_compatibility_checker_util.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/online_helper_delegate.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace tools {
namespace {
void getCanonicalFeatureLevel(int runtime_feature_level,
int& canonical_feature_level) {
switch (runtime_feature_level) {
case 1:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_1;
break;
case 2:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_2;
break;
case 3:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_3;
break;
case 4:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_4;
break;
case 5:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_5;
break;
case 6:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_6;
break;
case 7:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_7;
break;
case 8:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_8;
break;
default:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_8;
}
}
absl::Status IsValidFeatureLevelInt(const std::string& s) {
if (s.size() == 1 && std::isdigit(s[0]) && s[0] > '0' && s[0] < '9') {
return absl::OkStatus();
}
return absl::InvalidArgumentError("Invalid runtime feature level.");
}
absl::Status extractRuntimeFeatureLevel(
const std::unordered_map<std::string, std::string>& dcc_configs,
int& runtime_feature_level) {
std::string str_runtime_feature_level;
if (dcc_configs.find("nnapi-runtime_feature_level") == dcc_configs.end()) {
for (const auto& dcc_config : dcc_configs) {
if (absl::StrContains(dcc_config.first, "nnapi")) {
return absl::InvalidArgumentError(
"The correct flag name is 'nnapi-runtime_feature_level");
}
}
str_runtime_feature_level =
std::to_string(tools::kDefaultRuntimeFeatureLevel);
} else {
str_runtime_feature_level = dcc_configs.at("nnapi-runtime_feature_level");
RETURN_IF_ERROR(IsValidFeatureLevelInt(str_runtime_feature_level));
}
runtime_feature_level = std::stoi(str_runtime_feature_level);
return absl::OkStatus();
}
absl::Status convertToCompatibilityFailureType(
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures,
proto::OpCompatibilityResult* op_result) {
for (const auto& status : map_failures) {
auto compatibility_failure = op_result->add_compatibility_failures();
compatibility_failure->set_description(status.message);
switch (status.type) {
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperator:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedAndroidVersion:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_VERSION);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperatorVersion:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR_VERSION);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedInputType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_INPUT_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kNotRestrictedScaleCompliant:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_NOT_RESTRICTED_SCALE_COMPLIANT);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOutputType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OUTPUT_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperandSize:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_SIZE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperandValue:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_VALUE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedHybridOperator:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_HYBRID_OPERATOR);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedQuantizationType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_QUANTIZATION_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::kMissingRequiredOperand:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_MISSING_REQUIRED_OPERAND);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperandRank:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_RANK);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kInputTensorShouldHaveConstantShape:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_INPUT_TENSOR_SHOULD_HAVE_CONSTANT_SHAPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperatorVariant:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR_VARIANT);
break;
case delegate::nnapi::NNAPIValidationFailureType::kNoActivationExpected:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_NO_ACTIVATION_EXPECTED);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedQuantizationParameters:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_UNSUPPORTED_QUANTIZATION_PARAMETERS);
break;
default:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
compatibility_failure->set_description(
"Unknown validation failure type.");
}
}
return absl::OkStatus();
}
}
absl::Status
tools::NnapiDelegateCompatibilityChecker::checkOpCompatibilityOnline(
TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration,
std::unordered_map<std::string, std::string> dcc_configs,
tflite::proto::OpCompatibilityResult* op_result) {
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
int runtime_feature_level;
RETURN_IF_ERROR(
extractRuntimeFeatureLevel(dcc_configs, runtime_feature_level));
getCanonicalFeatureLevel(runtime_feature_level, runtime_feature_level);
if (NNAPIDelegateKernel::Validate(
context, registration, runtime_feature_level, node,
true,
nullptr, &map_failures)) {
op_result->set_is_supported(true);
} else {
RETURN_IF_ERROR(convertToCompatibilityFailureType(map_failures, op_result));
op_result->set_is_supported(false);
}
return absl::OkStatus();
}
std::unordered_map<std::string, std::string>
tools::NnapiDelegateCompatibilityChecker::getDccConfigurations() {
std::unordered_map<std::string, std::string> dcc_configs;
dcc_configs["nnapi-runtime_feature_level"] =
std::to_string(runtime_feature_level_);
return dcc_configs;
}
absl::Status tools::NnapiDelegateCompatibilityChecker::setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) {
RETURN_IF_ERROR(
extractRuntimeFeatureLevel(dcc_configs, runtime_feature_level_));
return absl::OkStatus();
}
absl::Status
tools::NnapiDelegateCompatibilityChecker::checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) {
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder interpreter_builder(*model_buffer, resolver);
auto dcc_configs = getDccConfigurations();
std::function<absl::Status(TfLiteContext*, const TfLiteNode*,
const TfLiteRegistration*,
std::unordered_map<std::string, std::string>,
proto::OpCompatibilityResult*)>
check_op_func_ptr = &checkOpCompatibilityOnline;
OnlineHelperDelegate delegate(dcc_configs, check_op_func_ptr, result);
interpreter_builder.AddDelegate(&delegate);
interpreter_builder(&interpreter);
return absl::OkStatus();
}
absl::Status tools::NnapiDelegateCompatibilityChecker::checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) {
return absl::UnimplementedError(
"Offline mode is not yet supported on NNAPI delegate compatibility "
"checker.");
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.h"
#include <cstdint>
#include <limits>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
namespace tflite {
namespace tools {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
class NnapiDccTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override { compatibility_result_.Clear(); }
NnapiDelegateCompatibilityChecker nnapi_dcc_;
proto::CompatibilityResult compatibility_result_;
};
}
TEST_F(NnapiDccTest, ValidRuntimeFeatureLevel) {
std::unordered_map dcc_configs = nnapi_dcc_.getDccConfigurations();
EXPECT_EQ(dcc_configs["nnapi-runtime_feature_level"], "8");
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs["nnapi-runtime_feature_level"] = "1";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs["nnapi-runtime_feature_level"] = "8";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs.clear();
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
EXPECT_EQ(nnapi_dcc_.getDccConfigurations()["nnapi-runtime_feature_level"],
"8");
}
TEST_F(NnapiDccTest, InvalidRuntimeFeatureLevel) {
std::unordered_map dcc_configs = nnapi_dcc_.getDccConfigurations();
dcc_configs["nnapi-runtime_feature_level"] = "03";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "a";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "28123497123489123841212344516";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "30.0";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "-30";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "9";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs.clear();
dcc_configs["nnapi-runtim_feature_level"] = "8";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(NnapiDccTest, CompatibleModelOnlineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
auto model = fb_model->GetModel();
EXPECT_EQ(model->subgraphs()->size(), 1);
EXPECT_EQ(model->subgraphs()->Get(0)->operators()->size(), 2);
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_TRUE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 2);
}
TEST_F(NnapiDccTest, IncompatibleModelOperation) {
AddOpModel add_op_model(
{TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_RELU_N1_TO_1);
auto fb_model = tflite::FlatBufferModel::BuildFromModel(
tflite::GetModel(add_op_model.GetModelBuffer()));
ASSERT_TRUE(fb_model);
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 1);
}
TEST_F(NnapiDccTest, IncompatibleModelFeatureLevel) {
AddOpModel add_op_model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_NONE);
auto fb_model = tflite::FlatBufferModel::BuildFromModel(
tflite::GetModel(add_op_model.GetModelBuffer()));
ASSERT_TRUE(fb_model);
auto nnapi_configs = nnapi_dcc_.getDccConfigurations();
nnapi_configs["nnapi-runtime_feature_level"] = "2";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(nnapi_configs));
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90c6fcba-9e56-454e-aa32-12dd171fd942 | cpp | tensorflow/tensorflow | stable_delegate_provider | tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider.cc | tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider_test.cc | #include <cstdint>
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
#if !defined(_WIN32)
#include "tensorflow/lite/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#endif
namespace tflite {
namespace tools {
#if !defined(_WIN32)
namespace {
TfLiteDelegatePtr CreateStableDelegate(
const std::string& json_settings_file_path);
class StableDelegatePluginLoader {
public:
static StableDelegatePluginLoader& GetInstance() {
static StableDelegatePluginLoader* const instance =
new StableDelegatePluginLoader;
return *instance;
}
TfLiteDelegatePtr CreateStableDelegate(
const std::string& json_settings_file_path);
private:
struct CacheEntry {
const TfLiteStableDelegate* stable_delegate = nullptr;
delegates::utils::TfLiteSettingsJsonParser parser;
const TFLiteSettings* parsed_settings = nullptr;
};
StableDelegatePluginLoader() = default;
const CacheEntry* LoadStableDelegatePlugin(
const std::string& json_settings_file_path);
std::map<std::string , CacheEntry> cache_;
};
const StableDelegatePluginLoader::CacheEntry*
StableDelegatePluginLoader::LoadStableDelegatePlugin(
const std::string& json_settings_file_path) {
auto it = cache_.find(json_settings_file_path);
if (it != cache_.end()) {
return &it->second;
}
CacheEntry result;
const TFLiteSettings* tflite_settings =
result.parser.Parse(json_settings_file_path);
result.parsed_settings = tflite_settings;
if (!tflite_settings || !tflite_settings->stable_delegate_loader_settings() ||
!tflite_settings->stable_delegate_loader_settings()->delegate_path()) {
TFLITE_LOG(ERROR) << "Invalid TFLiteSettings for the stable delegate.";
result.stable_delegate = nullptr;
} else {
std::string delegate_path =
tflite_settings->stable_delegate_loader_settings()
->delegate_path()
->str();
result.stable_delegate =
delegates::utils::LoadDelegateFromSharedLibrary(delegate_path);
if (!result.stable_delegate || !result.stable_delegate->delegate_plugin) {
TFLITE_LOG(ERROR) << "Failed to load stable ABI delegate from stable ABI "
"delegate binary ("
<< delegate_path << ").";
}
}
auto it2 = cache_.emplace(json_settings_file_path, std::move(result)).first;
return &it2->second;
}
TfLiteDelegatePtr CreateStableDelegate(
const std::string& json_settings_file_path) {
return StableDelegatePluginLoader::GetInstance().CreateStableDelegate(
json_settings_file_path);
}
TfLiteDelegatePtr StableDelegatePluginLoader::CreateStableDelegate(
const std::string& json_settings_file_path) {
if (json_settings_file_path.empty()) {
return CreateNullDelegate();
}
const CacheEntry* entry =
StableDelegatePluginLoader::GetInstance().LoadStableDelegatePlugin(
json_settings_file_path);
if (!entry || !entry->stable_delegate ||
!entry->stable_delegate->delegate_plugin) {
return CreateNullDelegate();
}
const TfLiteOpaqueDelegatePlugin* delegate_plugin =
entry->stable_delegate->delegate_plugin;
return TfLiteDelegatePtr(delegate_plugin->create(entry->parsed_settings),
delegate_plugin->destroy);
}
}
#endif
class StableAbiDelegateProvider : public DelegateProvider {
public:
StableAbiDelegateProvider() {
default_params_.AddParam("stable_delegate_settings_file",
ToolParam::Create<std::string>(""));
}
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const ToolParams& params, bool verbose) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate(
const ToolParams& params) const final;
std::string GetName() const final { return "STABLE_DELEGATE"; }
};
REGISTER_DELEGATE_PROVIDER(StableAbiDelegateProvider);
std::vector<Flag> StableAbiDelegateProvider::CreateFlags(
ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<std::string>("stable_delegate_settings_file", params,
"The path to the delegate settings JSON file.")};
return flags;
}
void StableAbiDelegateProvider::LogParams(const ToolParams& params,
bool verbose) const {
if (params.Get<std::string>("stable_delegate_settings_file").empty()) return;
LOG_TOOL_PARAM(params, std::string, "stable_delegate_settings_file",
"Delegate settings file path", verbose);
}
TfLiteDelegatePtr StableAbiDelegateProvider::CreateTfLiteDelegate(
const ToolParams& params) const {
#if !defined(_WIN32)
std::string stable_delegate_settings_file =
params.Get<std::string>("stable_delegate_settings_file");
return CreateStableDelegate(stable_delegate_settings_file);
#else
return CreateNullDelegate();
#endif
}
std::pair<TfLiteDelegatePtr, int>
StableAbiDelegateProvider::CreateRankedTfLiteDelegate(
const ToolParams& params) const {
auto ptr = CreateTfLiteDelegate(params);
return std::make_pair(std::move(ptr), params.GetPosition<std::string>(
"stable_delegate_settings_file"));
}
}
} | #include <string>
#include <vector>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
static constexpr char kTestSettingsSrcDir[] =
"tensorflow/lite/tools/delegates/experimental/stable_delegate/";
static constexpr char kGoodStableDelegateSettings[] =
"test_sample_stable_delegate_settings.json";
static constexpr char kGoodXNNPackDelegateSettings[] =
"test_stable_xnnpack_settings.json";
static constexpr char kBadMissingFile[] = "missing.json";
static constexpr char kBadInvalidSettings[] = "test_invalid_settings.json";
static constexpr char kBadMissingStableDelegateSettings[] =
"test_missing_stable_delegate_settings.json";
static constexpr char kBadMissingDelegatePathSettings[] =
"test_missing_delegate_path_settings.json";
std::vector<ProvidedDelegateList::ProvidedDelegate> CreateDelegates(
const std::string& settings_file_path) {
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
params.Set<std::string>("stable_delegate_settings_file", settings_file_path,
1);
return providers.CreateAllRankedDelegates();
}
TEST(StableAbiDelegateProviderTest, CreateDelegate) {
auto delegates = CreateDelegates(std::string(kTestSettingsSrcDir) +
kGoodStableDelegateSettings);
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("STABLE_DELEGATE", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
}
TEST(StableAbiDelegateProviderTest, CreateDelegateWithStableXNNPack) {
auto delegates = CreateDelegates(std::string(kTestSettingsSrcDir) +
kGoodXNNPackDelegateSettings);
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("STABLE_DELEGATE", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
pthreadpool_t threadpool = static_cast<pthreadpool_t>(
TfLiteXNNPackDelegateGetThreadPool(delegates.front().delegate.get()));
EXPECT_EQ(5, pthreadpool_get_threads_count(threadpool));
}
TEST(StableAbiDelegateProviderTest, CreateDelegateFailedWithInvalidSettings) {
std::vector<std::string> invalid_settings_names = {
kBadMissingFile, kBadInvalidSettings, kBadMissingStableDelegateSettings,
kBadMissingDelegatePathSettings};
for (const std::string& name : invalid_settings_names) {
auto delegates = CreateDelegates(std::string(kTestSettingsSrcDir) + name);
EXPECT_EQ(0, delegates.size());
}
}
TEST(StableAbiDelegateProviderTest, CreateDelegateFailedWithBlankSettingsPath) {
auto delegates = CreateDelegates("");
EXPECT_EQ(0, delegates.size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f219ac4f-dc25-438c-ac93-f9d42e2ee63b | cpp | tensorflow/tensorflow | op_signature | tensorflow/lite/tools/versioning/op_signature.cc | tensorflow/lite/tools/versioning/op_signature_test.cc | #include "tensorflow/lite/tools/versioning/op_signature.h"
#include <cstdlib>
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/stderr_reporter.h"
namespace tflite {
namespace {
class MallocDataAllocator : public BuiltinDataAllocator {
public:
void* Allocate(size_t size, size_t alignment_hint) override {
return malloc(size);
}
void Deallocate(void* data) override { free(data); }
};
inline int GetNumDims(const SubGraph* subgraph, const Operator* op, int idx) {
const flatbuffers::Vector<int32_t>* ret =
subgraph->tensors()->Get(op->inputs()->Get(idx))->shape();
if (ret) {
return ret->size();
} else {
return 0;
}
}
std::vector<OpSignatureTensorSpec> GetOpSignatureTensorSpecs(
const flatbuffers::Vector<int32_t>* tensors, const SubGraph* subgraph,
const Model* model) {
std::vector<OpSignatureTensorSpec> tensor_specs;
if (!tensors) {
return tensor_specs;
}
StderrReporter error_reporter;
for (int32_t i = 0; i < tensors->Length(); ++i) {
int32_t tensor_no = tensors->Get(i);
OpSignatureTensorSpec tensor_spec = {kTfLiteNoType};
if (tensor_no >= 0) {
if (subgraph->tensors() && tensor_no < subgraph->tensors()->Length()) {
auto* fb_tensor = subgraph->tensors()->Get(tensor_no);
ConvertTensorType(fb_tensor->type(), &tensor_spec.type,
&error_reporter);
auto buffer_idx = fb_tensor->buffer();
if (buffer_idx != 0 && buffer_idx < model->buffers()->Length()) {
auto* buffer = model->buffers()->Get(buffer_idx);
if (buffer->data() && buffer->data()->size() != 0) {
tensor_spec.is_const = true;
}
}
const flatbuffers::Vector<int32_t>* shape_vec = fb_tensor->shape();
if (shape_vec) {
for (int32_t j = 0; j < shape_vec->Length(); ++j) {
tensor_spec.dims.push_back(shape_vec->Get(j));
}
}
const flatbuffers::Vector<int32_t>* shape_signature_vec =
fb_tensor->shape_signature();
tensor_spec.is_shape_dynamic = false;
if (shape_signature_vec) {
for (int32_t j = 0; j < shape_signature_vec->Length(); ++j) {
if (shape_signature_vec->Get(j) == -1) {
tensor_spec.is_shape_dynamic = true;
break;
}
}
}
}
}
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> GetOpSignatureTensorSpecs(
TfLiteIntArray* tensors, const TfLiteContext* context,
const TfLiteNode* tflite_node) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (int32_t i = 0; i < tensors->size; ++i) {
int32_t tensor_no = tensors->data[i];
OpSignatureTensorSpec tensor_spec = {kTfLiteNoType};
if (tensor_no >= 0) {
const TfLiteTensor* tfl_tensor;
if (context->tensors != nullptr) {
tfl_tensor = &context->tensors[tensor_no];
} else {
tfl_tensor = context->GetTensor(context, tensor_no);
}
if (tfl_tensor != nullptr) {
tensor_spec.type = tfl_tensor->type;
tensor_spec.is_const = (tfl_tensor->allocation_type == kTfLiteMmapRo);
if (tfl_tensor->dims) {
for (int32_t j = 0; j < tfl_tensor->dims->size; ++j) {
tensor_spec.dims.push_back(tfl_tensor->dims->data[j]);
}
}
tensor_spec.is_shape_dynamic = HasUnspecifiedDimension(tfl_tensor);
}
}
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
}
OpSignature GetOpSignature(const OperatorCode* op_code, const Operator* op,
const SubGraph* subgraph, const Model* model) {
auto builtin_code = GetBuiltinCode(op_code);
OpSignature op_sig = {builtin_code};
std::memset(&op_sig.ext_options, 0, sizeof(op_sig.ext_options));
if (builtin_code != BuiltinOperator_CUSTOM) {
StderrReporter error_reporter;
MallocDataAllocator allocator;
ParseOpData(op, builtin_code, &error_reporter, &allocator,
&op_sig.builtin_data);
} else {
op_sig.custom_name = op_code->custom_code()->str();
}
switch (builtin_code) {
case BuiltinOperator_DEPTHWISE_CONV_2D: {
const Tensor* filter_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const QuantizationParameters* filter_quant =
filter_tensor->quantization();
int num_channels = filter_tensor->shape()->Get(3);
if (filter_quant && filter_quant->scale() &&
filter_quant->scale()->Length() &&
filter_quant->scale()->Length() == num_channels) {
op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized = true;
}
} break;
case BuiltinOperator_FULLY_CONNECTED: {
const Tensor* weight_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
op_sig.ext_options.fully_connected.sparse_weight =
(weight_tensor->sparsity() != nullptr);
const QuantizationParameters* weight_quant =
weight_tensor->quantization();
if (weight_quant && weight_quant->scale() &&
weight_quant->scale()->size() && weight_tensor->shape() &&
weight_tensor->shape()->size()) {
op_sig.ext_options.fully_connected.is_per_channel_quantized =
weight_quant->scale()->size() > 1 &&
weight_quant->scale()->size() == weight_tensor->shape()->Get(0);
}
} break;
case BuiltinOperator_MUL: {
if (op->inputs()->Length() < 2 || op->outputs()->Length() < 1) {
break;
}
const Tensor* input1_tensor =
subgraph->tensors()->Get(op->inputs()->Get(0));
const Tensor* input2_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const Tensor* output_tensor =
subgraph->tensors()->Get(op->outputs()->Get(0));
const QuantizationParameters* input1_quant =
input1_tensor->quantization();
const QuantizationParameters* input2_qunt = input2_tensor->quantization();
const QuantizationParameters* output_quant =
output_tensor->quantization();
if (input1_quant && input1_quant->scale() &&
input1_quant->scale()->Length() && input2_qunt &&
input2_qunt->scale() && input2_qunt->scale()->Length() &&
output_quant && output_quant->scale() &&
output_quant->scale()->Length()) {
op_sig.ext_options.mul.input1_scale = input1_quant->scale()->Get(0);
op_sig.ext_options.mul.input2_scale = input2_qunt->scale()->Get(0);
op_sig.ext_options.mul.output_scale = output_quant->scale()->Get(0);
}
if (input1_quant || input2_qunt) {
op_sig.ext_options.mul.input_quantized = true;
}
} break;
case BuiltinOperator_CONV_2D: {
const Tensor* input_tensor =
subgraph->tensors()->Get(op->inputs()->Get(0));
const Tensor* filter_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const QuantizationParameters* filter_quant =
filter_tensor->quantization();
int num_filters = filter_tensor->shape()->Get(0);
if (filter_quant && filter_quant->scale() &&
filter_quant->scale()->Length() &&
filter_quant->scale()->Length() == num_filters) {
op_sig.ext_options.conv_2d.is_per_channel_quantized = true;
}
if (input_tensor->shape() && input_tensor->shape()->size()) {
int num_input_channels = input_tensor->shape()->Get(3);
int num_filter_input_channels = filter_tensor->shape()->Get(3);
op_sig.ext_options.conv_2d.is_grouped_convolution =
num_input_channels != num_filter_input_channels;
} else {
op_sig.ext_options.conv_2d.is_grouped_convolution = false;
}
} break;
case BuiltinOperator_STRIDED_SLICE: {
op_sig.ext_options.strided_slice.num_dims = GetNumDims(subgraph, op, 0);
} break;
case BuiltinOperator_ABS: {
if (subgraph->tensors()->Get(op->inputs()->Get(0))->quantization()) {
op_sig.ext_options.abs.input_quantized = true;
}
} break;
case BuiltinOperator_DEQUANTIZE: {
const Tensor* input_tensor =
subgraph->tensors()->Get(op->inputs()->Get(0));
const QuantizationParameters* input_quant = input_tensor->quantization();
if (input_quant && input_quant->scale() &&
input_quant->scale()->Length() > 1 &&
input_quant->scale()->Length() ==
input_tensor->shape()->Get(input_quant->quantized_dimension())) {
op_sig.ext_options.dequantize.is_per_channel_quantized = true;
}
} break;
case BuiltinOperator_QUANTIZE: {
const Tensor* output_tensor =
subgraph->tensors()->Get(op->outputs()->Get(0));
const QuantizationParameters* output_quant =
output_tensor->quantization();
if (output_quant && output_quant->scale() &&
output_quant->scale()->Length() > 1 &&
output_quant->scale()->Length() ==
output_tensor->shape()->Get(
output_quant->quantized_dimension())) {
op_sig.ext_options.quantize.is_per_channel_quantized = true;
}
} break;
case BuiltinOperator_ADD: {
if (subgraph->tensors()->Get(op->inputs()->Get(0))->quantization()) {
op_sig.ext_options.add.input_quantized = true;
}
} break;
case BuiltinOperator_EMBEDDING_LOOKUP: {
const Tensor* table_tensor =
subgraph->tensors()->Get(op->inputs()->Get(1));
const QuantizationParameters* table_quant = table_tensor->quantization();
if (table_quant && table_quant->scale() && table_quant->scale()->size() &&
table_tensor->shape() && table_tensor->shape()->size()) {
op_sig.ext_options.embedding_lookup.is_per_channel_quantized =
table_quant->scale()->size() > 1 &&
table_quant->scale()->size() == table_tensor->shape()->Get(0);
}
} break;
default:
break;
}
op_sig.inputs = GetOpSignatureTensorSpecs(op->inputs(), subgraph, model);
op_sig.outputs = GetOpSignatureTensorSpecs(op->outputs(), subgraph, model);
op_sig.version = op_code->version();
return op_sig;
}
OpSignature GetOpSignature(const TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
OpSignature op_sig = {
static_cast<BuiltinOperator>(registration->builtin_code)};
op_sig.builtin_data = node->builtin_data;
if (op_sig.op == BuiltinOperator_CUSTOM) {
op_sig.custom_name = registration->custom_name;
op_sig.custom_initial_data = node->custom_initial_data;
}
std::memset(&op_sig.ext_options, 0, sizeof(op_sig.ext_options));
op_sig.inputs = GetOpSignatureTensorSpecs(node->inputs, context, node);
op_sig.outputs = GetOpSignatureTensorSpecs(node->outputs, context, node);
op_sig.version = registration->version;
return op_sig;
}
} | #include "tensorflow/lite/tools/versioning/op_signature.h"
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/model_builder.h"
namespace tflite {
class StubTfLiteContext : public TfLiteContext {
public:
StubTfLiteContext(const int builtin_code, const int op_version,
const int num_inputs)
: TfLiteContext({0}) {
exec_plan_ = TfLiteIntArrayCreate(3);
for (int i = 0; i < 3; ++i) exec_plan_->data[i] = i;
int tensor_no = 0;
std::memset(nodes_, 0, sizeof(nodes_));
std::memset(registrations_, 0, sizeof(registrations_));
nodes_[0].inputs = TfLiteIntArrayCreate(1);
nodes_[0].inputs->data[0] = tensor_no++;
nodes_[0].outputs = TfLiteIntArrayCreate(1);
nodes_[0].outputs->data[0] = tensor_no;
nodes_[0].builtin_data = nullptr;
nodes_[1].inputs = TfLiteIntArrayCreate(num_inputs);
for (int i = 0; i < num_inputs; i++) {
nodes_[1].inputs->data[i] = tensor_no++;
}
nodes_[1].outputs = TfLiteIntArrayCreate(1);
nodes_[1].outputs->data[0] = tensor_no;
nodes_[1].builtin_data = malloc(1024);
std::memset(nodes_[1].builtin_data, 0, 1024);
nodes_[2].inputs = TfLiteIntArrayCreate(1);
nodes_[2].inputs->data[0] = tensor_no++;
nodes_[2].outputs = TfLiteIntArrayCreate(1);
nodes_[2].outputs->data[0] = tensor_no++;
nodes_[2].builtin_data = nullptr;
tensors_.resize(tensor_no);
for (size_t i = 0; i < tensors_.size(); i++) {
std::memset(&tensors_[i], 0, sizeof(tensors_[i]));
tensors_[i].buffer_handle = kTfLiteNullBufferHandle;
tensors_[i].type = kTfLiteFloat32;
tensors_[i].dims = TfLiteIntArrayCreate(4);
for (int d = 0; d < 4; d++) {
tensors_[i].dims->data[d] = 1;
}
}
tensors = tensors_.data();
tensors_size = tensors_.size();
registrations_[0].builtin_code = kTfLiteBuiltinAdd;
registrations_[1].builtin_code = builtin_code;
registrations_[1].version = op_version;
registrations_[2].builtin_code = kTfLiteBuiltinAdd;
this->GetExecutionPlan = StubGetExecutionPlan;
this->GetNodeAndRegistration = StubGetNodeAndRegistration;
}
~StubTfLiteContext() {
for (auto& node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
if (node.builtin_data) {
free(node.builtin_data);
}
}
for (auto& tensor : tensors_) {
TfLiteIntArrayFree(tensor.dims);
}
TfLiteIntArrayFree(exec_plan_);
}
TfLiteIntArray* exec_plan() const { return exec_plan_; }
TfLiteNode* node() { return &nodes_[1]; }
TfLiteRegistration* registration() { return ®istrations_[1]; }
TfLiteNode* node(int node_index) { return &nodes_[node_index]; }
TfLiteRegistration* registration(int reg_index) {
return ®istrations_[reg_index];
}
TfLiteTensor* tensor(int tensor_index) { return &tensors_[tensor_index]; }
private:
static TfLiteStatus StubGetExecutionPlan(TfLiteContext* context,
TfLiteIntArray** execution_plan) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*execution_plan = stub->exec_plan();
return kTfLiteOk;
}
static TfLiteStatus StubGetNodeAndRegistration(
TfLiteContext* context, int node_index, TfLiteNode** node,
TfLiteRegistration** registration) {
StubTfLiteContext* stub = reinterpret_cast<StubTfLiteContext*>(context);
*node = stub->node(node_index);
*registration = stub->registration(node_index);
return kTfLiteOk;
}
TfLiteIntArray* exec_plan_;
TfLiteNode nodes_[3];
TfLiteRegistration registrations_[3];
std::vector<TfLiteTensor> tensors_;
};
TEST(GetOpSignature, FlatBufferModel) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
auto model = fb_model->GetModel();
auto subgraphs = model->subgraphs();
const SubGraph* subgraph = subgraphs->Get(0);
const Operator* op1 = subgraph->operators()->Get(0);
const OperatorCode* op_code1 =
model->operator_codes()->Get(op1->opcode_index());
OpSignature op_sig = GetOpSignature(op_code1, op1, subgraph, model);
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_FALSE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
free(op_sig.builtin_data);
const Operator* op2 = subgraph->operators()->Get(1);
const OperatorCode* op_code2 =
model->operator_codes()->Get(op2->opcode_index());
op_sig = GetOpSignature(op_code2, op2, subgraph, model);
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_FALSE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
free(op_sig.builtin_data);
const std::string& full_path3 = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/multi_signatures.bin");
auto fb_model3 = FlatBufferModel::BuildFromFile(full_path3.data());
ASSERT_TRUE(fb_model3);
auto model3 = fb_model3->GetModel();
auto subgraphs3 = model3->subgraphs();
const SubGraph* subgraph3 = subgraphs3->Get(0);
const Operator* op3 = subgraph3->operators()->Get(0);
const OperatorCode* op_code3 =
model3->operator_codes()->Get(op3->opcode_index());
op_sig = GetOpSignature(op_code3, op3, subgraph3, model3);
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 1);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_TRUE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 1);
EXPECT_TRUE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
free(op_sig.builtin_data);
}
TEST(GetOpSignature, TfLiteContext) {
auto context = std::make_unique<StubTfLiteContext>(kTfLiteBuiltinAdd,
1,
4);
OpSignature op_sig =
GetOpSignature(context.get(), context->node(), context->registration());
EXPECT_EQ(op_sig.op, BuiltinOperator_ADD);
EXPECT_EQ(op_sig.inputs[0].type, kTfLiteFloat32);
EXPECT_EQ(op_sig.inputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.inputs[0].is_const);
EXPECT_FALSE(op_sig.inputs[0].is_shape_dynamic);
EXPECT_EQ(op_sig.outputs[0].type, kTfLiteFloat32);
EXPECT_FALSE(op_sig.outputs[0].is_const);
EXPECT_EQ(op_sig.outputs[0].dims.size(), 4);
EXPECT_FALSE(op_sig.outputs[0].is_shape_dynamic);
EXPECT_NE(op_sig.builtin_data, nullptr);
EXPECT_EQ(op_sig.version, 1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/op_signature.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/op_signature_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe3e861d-aa7d-4a4b-9a82-7d51d95dcf7a | cpp | tensorflow/tensorflow | runtime_version | tensorflow/lite/tools/versioning/runtime_version.cc | tensorflow/lite/tools/versioning/runtime_version_test.cc | #include "tensorflow/lite/tools/versioning/runtime_version.h"
#include <cstring>
#include <map>
#include <string>
#include <utility>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
bool CompareRuntimeVersion(const std::string& v1, const std::string& v2) {
const std::vector<std::string> vec1 = absl::StrSplit(v1, '.');
const std::vector<std::string> vec2 = absl::StrSplit(v2, '.');
int i = 0;
while (i < vec1.size() && i < vec2.size()) {
int v1_val, v2_val;
if (absl::SimpleAtoi(vec1[i], &v1_val) &&
absl::SimpleAtoi(vec2[i], &v2_val)) {
if (v1_val != v2_val) return v1_val < v2_val;
}
++i;
}
return i < vec2.size();
}
std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
int op_version) {
static const std::map<std::pair<BuiltinOperator, int>,
std::string>* op_version_map =
new std::map<std::pair<BuiltinOperator, int>, std::string>(
{{{BuiltinOperator_AVERAGE_POOL_2D, 1}, "1.5.0"},
{{BuiltinOperator_AVERAGE_POOL_2D, 2}, "1.14.0"},
{{BuiltinOperator_AVERAGE_POOL_2D, 3}, "2.3.0"},
{{BuiltinOperator_BATCH_MATMUL, 1}, "2.3.0"},
{{BuiltinOperator_BATCH_MATMUL, 2}, "2.3.0"},
{{BuiltinOperator_BATCH_MATMUL, 3}, "2.4.0"},
{{BuiltinOperator_BATCH_MATMUL, 4}, "2.5.0"},
{{BuiltinOperator_BROADCAST_TO, 2}, "2.5.0"},
{{BuiltinOperator_BROADCAST_TO, 3}, "2.5.0"},
{{BuiltinOperator_CONV_2D, 1}, "1.5.0"},
{{BuiltinOperator_CONV_2D, 2}, "1.14.0"},
{{BuiltinOperator_CONV_2D, 3}, "1.14.0"},
{{BuiltinOperator_CONV_2D, 4}, "2.3.0"},
{{BuiltinOperator_CONV_2D, 5}, "2.4.0"},
{{BuiltinOperator_CONV_2D, 6}, "2.9.0"},
{{BuiltinOperator_CONV_2D, 7}, "2.11.0"},
{{BuiltinOperator_CONV_2D, 8}, "2.15.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 1}, "1.5.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 2}, "1.12.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 3}, "1.14.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 4}, "2.2.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 5}, "2.3.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 6}, "2.3.0"},
{{BuiltinOperator_DEPTHWISE_CONV_2D, 7}, "2.11.0"},
{{BuiltinOperator_ADD, 1}, "1.5.0"},
{{BuiltinOperator_ADD, 2}, "1.14.0"},
{{BuiltinOperator_ADD, 3}, "2.4.0"},
{{BuiltinOperator_ADD, 4}, "2.6.0"},
{{BuiltinOperator_ADD, 5}, "2.13.0"},
{{BuiltinOperator_ADD_N, 1}, "1.14.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 1}, "1.6.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 2}, "1.14.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 3}, "2.3.0"},
{{BuiltinOperator_SPACE_TO_BATCH_ND, 4}, "2.12.0"},
{{BuiltinOperator_SUB, 1}, "1.6.0"},
{{BuiltinOperator_SUB, 2}, "1.14.0"},
{{BuiltinOperator_SUB, 3}, "2.3.0"},
{{BuiltinOperator_SUB, 4}, "2.4.0"},
{{BuiltinOperator_SUB, 5}, "2.4.0"},
{{BuiltinOperator_DENSIFY, 1}, "2.2.0"},
{{BuiltinOperator_DIV, 1}, "1.6.0"},
{{BuiltinOperator_DIV, 2}, "2.3.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 1}, "1.6.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 2}, "1.14.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 3}, "2.3.0"},
{{BuiltinOperator_BATCH_TO_SPACE_ND, 4}, "2.12.0"},
{{BuiltinOperator_CAST, 1}, "1.5.0"},
{{BuiltinOperator_CAST, 2}, "2.7.0"},
{{BuiltinOperator_CAST, 3}, "2.8.0"},
{{BuiltinOperator_CAST, 4}, "2.9.0"},
{{BuiltinOperator_CAST, 5}, "2.12.0"},
{{BuiltinOperator_CAST, 6}, "2.15.0"},
{{BuiltinOperator_CONCATENATION, 1}, "1.5.0"},
{{BuiltinOperator_CONCATENATION, 2}, "1.14.0"},
{{BuiltinOperator_CONCATENATION, 3}, "2.3.0"},
{{BuiltinOperator_CONCATENATION, 4}, "2.14.0"},
{{BuiltinOperator_DEPTH_TO_SPACE, 1}, "2.1.0"},
{{BuiltinOperator_DEPTH_TO_SPACE, 2}, "2.5.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP, 1}, "1.13.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP, 2}, "1.14.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP, 3}, "1.14.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP, 4}, "2.18.0"},
{{BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, 1}, "1.5.0"},
{{BuiltinOperator_FAKE_QUANT, 1}, "1.5.0"},
{{BuiltinOperator_FAKE_QUANT, 2}, "1.10.0"},
{{BuiltinOperator_FULLY_CONNECTED, 1}, "1.5.0"},
{{BuiltinOperator_FULLY_CONNECTED, 2}, "1.10.0"},
{{BuiltinOperator_FULLY_CONNECTED, 3}, "1.14.0"},
{{BuiltinOperator_FULLY_CONNECTED, 4}, "1.14.0"},
{{BuiltinOperator_FULLY_CONNECTED, 5}, "2.0.0"},
{{BuiltinOperator_FULLY_CONNECTED, 6}, "2.1.0"},
{{BuiltinOperator_FULLY_CONNECTED, 7}, "2.3.0"},
{{BuiltinOperator_FULLY_CONNECTED, 8}, "2.3.0"},
{{BuiltinOperator_FULLY_CONNECTED, 9}, "2.3.0"},
{{BuiltinOperator_FULLY_CONNECTED, 10}, "2.11.0"},
{{BuiltinOperator_FULLY_CONNECTED, 11}, "2.15.0"},
{{BuiltinOperator_FULLY_CONNECTED, 12}, "2.17.0"},
{{BuiltinOperator_FULLY_CONNECTED, 13}, "2.18.0"},
{{BuiltinOperator_GATHER, 1}, "1.6.0"},
{{BuiltinOperator_GATHER, 2}, "1.14.0"},
{{BuiltinOperator_GATHER, 3}, "1.15.0"},
{{BuiltinOperator_GATHER, 4}, "2.4.0"},
{{BuiltinOperator_GATHER, 5}, "2.5.0"},
{{BuiltinOperator_GATHER, 6}, "2.13.0"},
{{BuiltinOperator_GATHER, 7}, "2.15.0"},
{{BuiltinOperator_GATHER_ND, 1}, "1.14.0"},
{{BuiltinOperator_GATHER_ND, 2}, "2.3.0"},
{{BuiltinOperator_GATHER_ND, 3}, "2.5.0"},
{{BuiltinOperator_GATHER_ND, 4}, "2.13.0"},
{{BuiltinOperator_GATHER_ND, 5}, "2.16.0"},
{{BuiltinOperator_HASHTABLE_LOOKUP, 1}, "1.5.0"},
{{BuiltinOperator_SVDF, 1}, "1.5.0"},
{{BuiltinOperator_SVDF, 2}, "1.14.0"},
{{BuiltinOperator_SVDF, 3}, "2.2.0"},
{{BuiltinOperator_SVDF, 4}, "2.3.0"},
{{BuiltinOperator_L2_NORMALIZATION, 1}, "1.5.0"},
{{BuiltinOperator_L2_NORMALIZATION, 2}, "1.14.0"},
{{BuiltinOperator_L2_POOL_2D, 1}, "1.5.0"},
{{BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, 1}, "1.5.0"},
{{BuiltinOperator_MAX_POOL_2D, 1}, "1.5.0"},
{{BuiltinOperator_MAX_POOL_2D, 2}, "1.14.0"},
{{BuiltinOperator_MAX_POOL_2D, 3}, "2.3.0"},
{{BuiltinOperator_MAXIMUM, 1}, "1.14.0"},
{{BuiltinOperator_MAXIMUM, 2}, "1.14.0"},
{{BuiltinOperator_MAXIMUM, 3}, "2.3.0"},
{{BuiltinOperator_MAXIMUM, 4}, "2.3.0"},
{{BuiltinOperator_MINIMUM, 1}, "1.14.0"},
{{BuiltinOperator_MINIMUM, 2}, "1.14.0"},
{{BuiltinOperator_MINIMUM, 3}, "2.3.0"},
{{BuiltinOperator_MINIMUM, 4}, "2.3.0"},
{{BuiltinOperator_MUL, 1}, "1.5.0"},
{{BuiltinOperator_MUL, 2}, "1.14.0"},
{{BuiltinOperator_MUL, 3}, "1.15.0"},
{{BuiltinOperator_MUL, 4}, "2.3.0"},
{{BuiltinOperator_MUL, 5}, "2.6.0"},
{{BuiltinOperator_MUL, 6}, "2.11.0"},
{{BuiltinOperator_MUL, 7}, "2.13.0"},
{{BuiltinOperator_NON_MAX_SUPPRESSION_V4, 1}, "2.1.0"},
{{BuiltinOperator_NON_MAX_SUPPRESSION_V5, 1}, "2.1.0"},
{{BuiltinOperator_PAD, 1}, "1.5.0"},
{{BuiltinOperator_PAD, 2}, "1.14.0"},
{{BuiltinOperator_PAD, 3}, "2.4.0"},
{{BuiltinOperator_PAD, 4}, "2.6.0"},
{{BuiltinOperator_TILE, 1}, "1.10.1"},
{{BuiltinOperator_TILE, 2}, "2.2.0"},
{{BuiltinOperator_TILE, 3}, "2.8.0"},
{{BuiltinOperator_PADV2, 1}, "1.9.0"},
{{BuiltinOperator_PADV2, 2}, "1.14.0"},
{{BuiltinOperator_PADV2, 3}, "2.4.0"},
{{BuiltinOperator_PADV2, 4}, "2.6.0"},
{{BuiltinOperator_RESHAPE, 1}, "1.5.0"},
{{BuiltinOperator_SOFTMAX, 1}, "1.5.0"},
{{BuiltinOperator_SOFTMAX, 2}, "1.14.0"},
{{BuiltinOperator_SOFTMAX, 3}, "2.3.0"},
{{BuiltinOperator_SPACE_TO_DEPTH, 1}, "1.5.0"},
{{BuiltinOperator_SPACE_TO_DEPTH, 2}, "1.14.0"},
{{BuiltinOperator_TRANSPOSE, 1}, "1.6.0"},
{{BuiltinOperator_TRANSPOSE, 2}, "1.14.0"},
{{BuiltinOperator_TRANSPOSE, 3}, "1.15.0"},
{{BuiltinOperator_TRANSPOSE, 4}, "2.3.0"},
{{BuiltinOperator_TRANSPOSE, 5}, "2.4.0"},
{{BuiltinOperator_TRANSPOSE, 6}, "2.12.0"},
{{BuiltinOperator_LSTM, 1}, "1.7.0"},
{{BuiltinOperator_LSTM, 2}, "1.10.0"},
{{BuiltinOperator_LSTM, 3}, "1.14.0"},
{{BuiltinOperator_LSTM, 4}, "2.3.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 1}, "1.13.1"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 2}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 3}, "2.3.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, 4}, "2.12.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, 1}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, 2}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, 3}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, 1}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, 2}, "1.14.0"},
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, 3}, "2.3.0"},
{{BuiltinOperator_MEAN, 1}, "1.6.0"},
{{BuiltinOperator_MEAN, 2}, "1.14.0"},
{{BuiltinOperator_MEAN, 3}, "2.4.0"},
{{BuiltinOperator_SUM, 1}, "1.10.0"},
{{BuiltinOperator_SUM, 2}, "1.15.0"},
{{BuiltinOperator_REDUCE_MAX, 1}, "1.11.0"},
{{BuiltinOperator_REDUCE_MAX, 2}, "1.14.0"},
{{BuiltinOperator_REDUCE_MAX, 3}, "2.5.0"},
{{BuiltinOperator_REDUCE_MIN, 1}, "1.11.0"},
{{BuiltinOperator_REDUCE_MIN, 2}, "1.14.0"},
{{BuiltinOperator_REDUCE_MIN, 3}, "2.5.0"},
{{BuiltinOperator_REDUCE_PROD, 1}, "1.11.0"},
{{BuiltinOperator_REDUCE_PROD, 2}, "2.6.0"},
{{BuiltinOperator_REDUCE_ANY, 1}, "1.11.0"},
{{BuiltinOperator_RELU6, 1}, "1.5.0"},
{{BuiltinOperator_RELU6, 2}, "1.14.0"},
{{BuiltinOperator_RELU6, 3}, "2.5.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 1}, "1.7.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 2}, "1.14.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 3}, "2.2.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 4}, "2.5.0"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 1}, "1.13.1"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 2}, "1.14.0"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 3}, "2.3.0"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 4}, "2.4.0"},
{{BuiltinOperator_RNN, 1}, "1.5.0"},
{{BuiltinOperator_RNN, 2}, "1.14.0"},
{{BuiltinOperator_RNN, 3}, "2.3.0"},
{{BuiltinOperator_SKIP_GRAM, 1}, "1.5.0"},
{{BuiltinOperator_SQUEEZE, 1}, "1.6.0"},
{{BuiltinOperator_SQUEEZE, 2}, "2.5.0"},
{{BuiltinOperator_SPLIT, 1}, "1.5.0"},
{{BuiltinOperator_SPLIT, 2}, "1.14.0"},
{{BuiltinOperator_SPLIT, 3}, "1.14.0"},
{{BuiltinOperator_SPLIT, 4}, "2.3.0"},
{{BuiltinOperator_SPLIT_V, 1}, "1.13.1"},
{{BuiltinOperator_SPLIT_V, 2}, "2.3.0"},
{{BuiltinOperator_STRIDED_SLICE, 1}, "1.6.0"},
{{BuiltinOperator_STRIDED_SLICE, 2}, "1.14.0"},
{{BuiltinOperator_STRIDED_SLICE, 3}, "2.1.0"},
{{BuiltinOperator_STRIDED_SLICE, 4}, "2.2.0"},
{{BuiltinOperator_STRIDED_SLICE, 5}, "2.5.0"},
{{BuiltinOperator_STRIDED_SLICE, 6}, "2.6.0"},
{{BuiltinOperator_STRIDED_SLICE, 7}, "2.14.0"},
{{BuiltinOperator_STRIDED_SLICE, 8}, "2.14.0"},
{{BuiltinOperator_TOPK_V2, 1}, "1.7.0"},
{{BuiltinOperator_TOPK_V2, 2}, "1.14.0"},
{{BuiltinOperator_TOPK_V2, 3}, "2.13.0"},
{{BuiltinOperator_ARG_MAX, 1}, "1.9.0"},
{{BuiltinOperator_ARG_MAX, 2}, "1.14.0"},
{{BuiltinOperator_ARG_MAX, 3}, "2.9.0"},
{{BuiltinOperator_ARG_MIN, 1}, "1.9.0"},
{{BuiltinOperator_ARG_MIN, 2}, "1.14.0"},
{{BuiltinOperator_ARG_MIN, 3}, "2.9.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 1}, "1.9.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 2}, "2.2.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 3}, "2.3.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 4}, "2.13.0"},
{{BuiltinOperator_TRANSPOSE_CONV, 5}, "2.15.0"},
{{BuiltinOperator_SPARSE_TO_DENSE, 1}, "1.9.0"},
{{BuiltinOperator_SPARSE_TO_DENSE, 2}, "1.14.0"},
{{BuiltinOperator_SPARSE_TO_DENSE, 3}, "1.15.0"},
{{BuiltinOperator_EXPAND_DIMS, 1}, "1.10.0"},
{{BuiltinOperator_PACK, 1}, "1.11.0"},
{{BuiltinOperator_PACK, 2}, "1.14.0"},
{{BuiltinOperator_PACK, 3}, "2.3.0"},
{{BuiltinOperator_PACK, 4}, "2.13.0"},
{{BuiltinOperator_SHAPE, 1}, "1.10.0"},
{{BuiltinOperator_SLICE, 1}, "1.14.0"},
{{BuiltinOperator_SLICE, 2}, "1.14.0"},
{{BuiltinOperator_SLICE, 3}, "1.14.0"},
{{BuiltinOperator_SLICE, 4}, "2.4.0"},
{{BuiltinOperator_SLICE, 5}, "2.5.0"},
{{BuiltinOperator_SLICE, 6}, "2.14.0"},
{{BuiltinOperator_TANH, 1}, "1.14.0"},
{{BuiltinOperator_TANH, 2}, "1.14.0"},
{{BuiltinOperator_TANH, 3}, "2.3.0"},
{{BuiltinOperator_ONE_HOT, 1}, "1.11.0"},
{{BuiltinOperator_UNPACK, 1}, "1.11.0"},
{{BuiltinOperator_UNPACK, 2}, "1.14.0"},
{{BuiltinOperator_UNPACK, 3}, "2.2.0"},
{{BuiltinOperator_UNPACK, 4}, "2.3.0"},
{{BuiltinOperator_LEAKY_RELU, 1}, "1.13.1"},
{{BuiltinOperator_LEAKY_RELU, 2}, "2.3.0"},
{{BuiltinOperator_LOGISTIC, 1}, "1.14.0"},
{{BuiltinOperator_LOGISTIC, 2}, "1.14.0"},
{{BuiltinOperator_LOGISTIC, 3}, "2.3.0"},
{{BuiltinOperator_LOG_SOFTMAX, 1}, "1.14.0"},
{{BuiltinOperator_LOG_SOFTMAX, 2}, "1.14.0"},
{{BuiltinOperator_LSH_PROJECTION, 1}, "1.5.0"},
{{BuiltinOperator_SQUARED_DIFFERENCE, 1}, "1.13.1"},
{{BuiltinOperator_SQUARED_DIFFERENCE, 2}, "2.5.0"},
{{BuiltinOperator_MIRROR_PAD, 1}, "1.13.1"},
{{BuiltinOperator_MIRROR_PAD, 2}, "2.3.0"},
{{BuiltinOperator_MIRROR_PAD, 3}, "2.12.0"},
{{BuiltinOperator_UNIQUE, 1}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 1}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 2}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 3}, "2.3.0"},
{{BuiltinOperator_WHERE, 1}, "1.14.0"},
{{BuiltinOperator_DEQUANTIZE, 1}, "1.13.1"},
{{BuiltinOperator_DEQUANTIZE, 2}, "1.14.0"},
{{BuiltinOperator_DEQUANTIZE, 3}, "1.15.0"},
{{BuiltinOperator_DEQUANTIZE, 4}, "2.2.0"},
{{BuiltinOperator_DEQUANTIZE, 5}, "2.7.0"},
{{BuiltinOperator_DEQUANTIZE, 6}, "2.18.0"},
{{BuiltinOperator_REVERSE_SEQUENCE, 1}, "1.14.0"},
{{BuiltinOperator_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_EQUAL, 3}, "2.3.0"},
{{BuiltinOperator_EQUAL, 4}, "2.13.0"},
{{BuiltinOperator_NOT_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_NOT_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_NOT_EQUAL, 3}, "2.3.0"},
{{BuiltinOperator_GREATER, 1}, "1.14.0"},
{{BuiltinOperator_GREATER, 2}, "1.14.0"},
{{BuiltinOperator_GREATER_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_GREATER_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_GREATER_EQUAL, 3}, "2.13.0"},
{{BuiltinOperator_LESS, 1}, "1.14.0"},
{{BuiltinOperator_LESS, 2}, "1.14.0"},
{{BuiltinOperator_LESS, 3}, "2.13.0"},
{{BuiltinOperator_LESS_EQUAL, 1}, "1.14.0"},
{{BuiltinOperator_LESS_EQUAL, 2}, "1.14.0"},
{{BuiltinOperator_SCATTER_ND, 1}, "2.1.0"},
{{BuiltinOperator_SEGMENT_SUM, 1}, "2.2.0"},
{{BuiltinOperator_SELECT, 1}, "1.14.0"},
{{BuiltinOperator_SELECT, 2}, "1.14.0"},
{{BuiltinOperator_SELECT, 3}, "2.12.0"},
{{BuiltinOperator_SELECT, 4}, "2.12.0"},
{{BuiltinOperator_SELECT_V2, 1}, "2.2.0"},
{{BuiltinOperator_SELECT_V2, 2}, "2.12.0"},
{{BuiltinOperator_IF, 1}, "1.15.0"},
{{BuiltinOperator_FLOOR_DIV, 1}, "1.14.0"},
{{BuiltinOperator_FLOOR_DIV, 2}, "1.14.0"},
{{BuiltinOperator_FLOOR_DIV, 3}, "2.13.0"},
{{BuiltinOperator_FLOOR, 1}, "1.9.0"},
{{BuiltinOperator_CEIL, 1}, "1.14.0"},
{{BuiltinOperator_MATRIX_DIAG, 1}, "1.14.0"},
{{BuiltinOperator_MATRIX_SET_DIAG, 1}, "1.14.0"},
{{BuiltinOperator_ELU, 1}, "1.14.0"},
{{BuiltinOperator_QUANTIZE, 1}, "1.14.0"},
{{BuiltinOperator_QUANTIZE, 2}, "1.15.0"},
{{BuiltinOperator_QUANTIZE, 3}, "2.7.0"},
{{BuiltinOperator_ROUND, 1}, "1.14.0"},
{{BuiltinOperator_RELU, 1}, "1.5.0"},
{{BuiltinOperator_RELU, 2}, "2.1.0"},
{{BuiltinOperator_RELU, 3}, "2.5.0"},
{{BuiltinOperator_RELU_N1_TO_1, 1}, "1.5.0"},
{{BuiltinOperator_RELU_0_TO_1, 1}, "2.10.0"},
{{BuiltinOperator_PRELU, 1}, "1.8.0"},
{{BuiltinOperator_EXP, 1}, "1.7.0"},
{{BuiltinOperator_EXP, 2}, "2.12.0"},
{{BuiltinOperator_COS, 1}, "1.14.0"},
{{BuiltinOperator_NEG, 1}, "1.9.0"},
{{BuiltinOperator_POW, 1}, "1.10.0"},
{{BuiltinOperator_LOGICAL_OR, 1}, "1.11.0"},
{{BuiltinOperator_LOGICAL_AND, 1}, "1.11.0"},
{{BuiltinOperator_LOGICAL_NOT, 1}, "1.11.0"},
{{BuiltinOperator_FLOOR_MOD, 1}, "1.13.0"},
{{BuiltinOperator_FLOOR_MOD, 2}, "2.13.0"},
{{BuiltinOperator_RANGE, 1}, "1.13.0"},
{{BuiltinOperator_RANGE, 2}, "2.14.0"},
{{BuiltinOperator_SIN, 1}, "1.9.0"},
{{BuiltinOperator_LOG, 1}, "1.14.0"},
{{BuiltinOperator_LOG, 2}, "2.15.0"},
{{BuiltinOperator_SQRT, 1}, "1.10.0"},
{{BuiltinOperator_RSQRT, 1}, "1.10.0"},
{{BuiltinOperator_RSQRT, 2}, "2.5.0"},
{{BuiltinOperator_RSQRT, 3}, "2.15.0"},
{{BuiltinOperator_SQUARE, 1}, "1.12.0"},
{{BuiltinOperator_ZEROS_LIKE, 1}, "1.12.0"},
{{BuiltinOperator_ABS, 1}, "1.13.0"},
{{BuiltinOperator_ABS, 2}, "2.4.0"},
{{BuiltinOperator_ABS, 3}, "2.5.0"},
{{BuiltinOperator_ABS, 4}, "2.6.0"},
{{BuiltinOperator_ABS, 5}, "2.12.0"},
{{BuiltinOperator_HARD_SWISH, 1}, "1.15.0"},
{{BuiltinOperator_FILL, 1}, "1.13.0"},
{{BuiltinOperator_FILL, 2}, "2.3.0"},
{{BuiltinOperator_FILL, 3}, "2.5.0"},
{{BuiltinOperator_FILL, 4}, "2.12.0"},
{{BuiltinOperator_REVERSE_V2, 1}, "1.14.0"},
{{BuiltinOperator_REVERSE_V2, 2}, "2.2.0"},
{{BuiltinOperator_REVERSE_V2, 3}, "2.5.0"},
{{BuiltinOperator_RANK, 1}, "1.14.0"},
{{BuiltinOperator_WHILE, 1}, "1.15.0"},
{{BuiltinOperator_CUMSUM, 1}, "2.4.0"},
{{BuiltinOperator_CALL_ONCE, 1}, "2.5.0"},
{{BuiltinOperator_RFFT2D, 1}, "2.5.0"},
{{BuiltinOperator_CONV_3D, 1}, "2.5.0"},
{{BuiltinOperator_IMAG, 1}, "2.5.0"},
{{BuiltinOperator_REAL, 1}, "2.5.0"},
{{BuiltinOperator_COMPLEX_ABS, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE_FIND, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE_IMPORT, 1}, "2.5.0"},
{{BuiltinOperator_HASHTABLE_SIZE, 1}, "2.5.0"},
{{BuiltinOperator_REDUCE_ALL, 1}, "2.6.0"},
{{BuiltinOperator_CONV_3D_TRANSPOSE, 1}, "2.6.0"},
{{BuiltinOperator_VAR_HANDLE, 1}, "2.6.0"},
{{BuiltinOperator_READ_VARIABLE, 1}, "2.6.0"},
{{BuiltinOperator_ASSIGN_VARIABLE, 1}, "2.6.0"},
{{BuiltinOperator_BROADCAST_ARGS, 1}, "2.6.0"},
{{BuiltinOperator_RANDOM_STANDARD_NORMAL, 1}, "2.8.0"},
{{BuiltinOperator_BUCKETIZE, 1}, "2.8.0"},
{{BuiltinOperator_WHERE, 2}, "2.8.0"},
{{BuiltinOperator_RANDOM_UNIFORM, 1}, "2.8.0"},
{{BuiltinOperator_MULTINOMIAL, 1}, "2.8.0"},
{{BuiltinOperator_GELU, 1}, "2.9.0"},
{{BuiltinOperator_GELU, 2}, "2.9.0"},
{{BuiltinOperator_DYNAMIC_UPDATE_SLICE, 1}, "2.9.0"},
{{BuiltinOperator_DYNAMIC_UPDATE_SLICE, 2}, "2.17.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_PROD, 1}, "2.10.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_MAX, 1}, "2.10.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_MIN, 1}, "2.11.0"},
{{BuiltinOperator_UNSORTED_SEGMENT_SUM, 1}, "2.10.0"},
{{BuiltinOperator_ATAN2, 1}, "2.10.0"},
{{BuiltinOperator_SIGN, 1}, "2.11.0"},
{{BuiltinOperator_SIGN, 2}, "2.12.0"},
{{BuiltinOperator_BITCAST, 1}, "2.13.0"},
{{BuiltinOperator_BITWISE_XOR, 1}, "2.13.0"},
{{BuiltinOperator_RIGHT_SHIFT, 1}, "2.13.0"},
{{BuiltinOperator_STABLEHLO_SCATTER, 1}, "2.15.0"},
{{BuiltinOperator_DILATE, 1}, "2.15.0"},
{{BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR, 1}, "2.15.0"},
{{BuiltinOperator_REDUCE_WINDOW, 1}, "2.15.0"},
{{BuiltinOperator_STABLEHLO_GATHER, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_ADD, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_MULTIPLY, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_REDUCE_WINDOW, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_MAXIMUM, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_MINIMUM, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_PAD, 1}, "2.16.0"},
{{BuiltinOperator_STABLEHLO_COMPOSITE, 1}, "2.17.0"},
{{BuiltinOperator_STABLEHLO_AND, 1}, "2.17.0"},
{{BuiltinOperator_STABLEHLO_SHIFT_LEFT, 1}, "2.17.0"},
{{BuiltinOperator_STABLEHLO_CBRT, 1}, "2.17.0"}});
std::pair<BuiltinOperator, int> version_key = {op_code, op_version};
auto it = op_version_map->find(version_key);
if (it == op_version_map->end()) {
return std::string();
}
return it->second;
}
void UpdateMinimumRuntimeVersionForModel(uint8_t* model_buffer_pointer) {
auto model = GetMutableModel(model_buffer_pointer);
std::string model_min_version;
auto subgraphs = model->subgraphs();
for (int i = 0; i < subgraphs->Length(); ++i) {
const SubGraph* subgraph = subgraphs->Get(i);
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
const OperatorCode* op_code =
model->operator_codes()->Get(op->opcode_index());
std::string runtime_version = FindMinimumRuntimeVersionForOp(
GetBuiltinCode(op_code), op_code->version());
if (runtime_version.empty()) {
continue;
}
if (CompareRuntimeVersion(model_min_version, runtime_version)) {
model_min_version = runtime_version;
}
}
}
if (model_min_version.size() >= 16) {
TFLITE_LOG(TFLITE_LOG_WARNING,
"Skip writing minimum runtime version string since it's "
"longer than 16 bytes.");
return;
}
for (int i = 0; i < model->metadata()->size(); ++i) {
if (model->metadata()->Get(i)->name()->str() == "min_runtime_version") {
auto buffer = model->metadata()->Get(i)->buffer();
auto buffer_data =
model->mutable_buffers()->GetMutableObject(buffer)->mutable_data();
memset(buffer_data->data(), 0, buffer_data->size());
memcpy(buffer_data->data(), model_min_version.data(),
model_min_version.size());
break;
}
}
}
} | #include "tensorflow/lite/tools/versioning/runtime_version.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
TEST(OpVersionTest, CompareRuntimeVersion) {
EXPECT_TRUE(CompareRuntimeVersion("1.9", "1.13"));
EXPECT_FALSE(CompareRuntimeVersion("1.13", "1.13"));
EXPECT_TRUE(CompareRuntimeVersion("1.14", "1.14.1"));
EXPECT_FALSE(CompareRuntimeVersion("1.14.1", "1.14"));
EXPECT_FALSE(CompareRuntimeVersion("1.14.1", "1.9"));
EXPECT_FALSE(CompareRuntimeVersion("1.0.9", "1.0.8"));
EXPECT_FALSE(CompareRuntimeVersion("2.1.0", "1.2.0"));
EXPECT_TRUE(CompareRuntimeVersion("", "1.13"));
EXPECT_FALSE(CompareRuntimeVersion("", ""));
}
TEST(OpVersionTest, OpversionMissing) {
tflite::ops::builtin::BuiltinOpResolver resolver;
for (int id = BuiltinOperator_MIN; id <= BuiltinOperator_MAX; ++id) {
for (int version = 1;; ++version) {
auto op_code = static_cast<tflite::BuiltinOperator>(id);
if (resolver.FindOp(op_code, version) == nullptr) break;
std::string runtime_version =
FindMinimumRuntimeVersionForOp(op_code, version);
EXPECT_NE(runtime_version, "")
<< "Please add the version " << version << " of "
<< tflite::EnumNamesBuiltinOperator()[op_code]
<< " to runtime_version.cc";
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/runtime_version.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/runtime_version_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35f936ca-0998-4bd1-b503-610007d31953 | cpp | tensorflow/tensorflow | op_version | tensorflow/lite/tools/versioning/op_version.cc | tensorflow/lite/tools/versioning/op_version_test.cc | #include "tensorflow/lite/tools/versioning/op_version.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace {
bool NeedBroadcastForBinaryInputs(const OpSignature& op_sig) {
if (op_sig.inputs.size() < 2) {
return false;
}
return (op_sig.inputs.at(0).dims != op_sig.inputs.at(1).dims);
}
int GetInputMaxDims(const OpSignature& op_sig) {
int max_dims = 0;
for (auto& input : op_sig.inputs) {
if (input.dims.size() > max_dims) {
max_dims = input.dims.size();
}
}
return max_dims;
}
}
int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
switch (op_sig.op) {
case BuiltinOperator_CONV_2D: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto conv_params =
reinterpret_cast<TfLiteConvParams*>(op_sig.builtin_data);
TFLITE_DCHECK(conv_params != nullptr);
if (conv_params->quantized_bias_type) {
return 8;
}
}
if (op_sig.ext_options.conv_2d.is_grouped_convolution) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 7;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (op_sig.ext_options.conv_2d.is_per_channel_quantized) {
return 5;
}
return 2;
}
return 1;
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized) {
return 6;
}
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 7;
}
auto depthwise_conv_params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(op_sig.builtin_data);
TFLITE_DCHECK(depthwise_conv_params != nullptr);
if (depthwise_conv_params->dilation_width_factor != 1 ||
depthwise_conv_params->dilation_height_factor != 1) {
return 2;
}
return 1;
}
case BuiltinOperator_EMBEDDING_LOOKUP: {
if (op_sig.inputs.at(1).type == kTfLiteInt4 ||
op_sig.ext_options.embedding_lookup.is_per_channel_quantized) {
return 4;
}
return 1;
}
case BuiltinOperator_FAKE_QUANT: {
auto fake_quant_params =
reinterpret_cast<TfLiteFakeQuantParams*>(op_sig.builtin_data);
TFLITE_DCHECK(fake_quant_params != nullptr);
if (fake_quant_params->narrow_range) {
return 2;
}
return 1;
}
case BuiltinOperator_FULLY_CONNECTED: {
auto fully_connected_params =
reinterpret_cast<TfLiteFullyConnectedParams*>(op_sig.builtin_data);
TFLITE_DCHECK(fully_connected_params != nullptr);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 13;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32 &&
op_sig.ext_options.fully_connected.is_per_channel_quantized) {
return 12;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
if (fully_connected_params->quantized_bias_type) {
return 11;
}
}
if (op_sig.ext_options.fully_connected.sparse_weight) {
return 8;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 7;
}
if (op_sig.inputs.size() == 2) {
return 6;
}
if (fully_connected_params->keep_num_dims) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 &&
op_sig.inputs.at(1).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 10;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (fully_connected_params->asymmetric_quantize_inputs) {
return 9;
}
return 3;
}
if (fully_connected_params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
return 2;
}
return 1;
}
case BuiltinOperator_GATHER: {
if (op_sig.inputs.at(0).type == kTfLiteInt4) {
return 7;
}
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 6;
}
auto gather_params =
reinterpret_cast<TfLiteGatherParams*>(op_sig.builtin_data);
if (gather_params && gather_params->batch_dims != 0) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SVDF: {
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto svdf_params =
reinterpret_cast<TfLiteSVDFParams*>(op_sig.builtin_data);
if (svdf_params && svdf_params->asymmetric_quantize_inputs) {
return 4;
}
return 2;
}
return 1;
}
case BuiltinOperator_SIGN:
if (op_sig.inputs.at(0).type == kTfLiteInt32) {
return 2;
}
return 1;
case BuiltinOperator_MUL:
if ((op_sig.inputs.at(0).type == kTfLiteInt16 &&
!op_sig.ext_options.mul.input_quantized) ||
op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 7;
}
if (op_sig.inputs.at(0).type == kTfLiteComplex64) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt64) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.ext_options.mul.input1_scale != 0 &&
op_sig.ext_options.mul.input2_scale != 0 &&
op_sig.ext_options.mul.output_scale != 0 &&
(op_sig.ext_options.mul.input1_scale *
op_sig.ext_options.mul.input2_scale /
op_sig.ext_options.mul.output_scale) >= 1.0) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_MAX_POOL_2D:
case BuiltinOperator_AVERAGE_POOL_2D:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TRANSPOSE:
if (op_sig.inputs.at(0).dims.size() > 5) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 5;
}
if (op_sig.inputs.at(0).dims.size() > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TRANSPOSE_CONV: {
auto transpose_conv_params =
reinterpret_cast<TfLiteTransposeConvParams*>(op_sig.builtin_data);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
TFLITE_DCHECK(transpose_conv_params != nullptr);
if (transpose_conv_params->quantized_bias_type) {
return 5;
}
}
if (transpose_conv_params != nullptr &&
transpose_conv_params->activation) {
return 4;
}
if (op_sig.inputs.size() == 4 &&
op_sig.inputs.at(3).type != kTfLiteNoType) {
return 3;
}
if (op_sig.inputs.at(1).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_LSTM: {
auto lstm_params =
reinterpret_cast<TfLiteLSTMParams*>(op_sig.builtin_data);
if (lstm_params->kernel_type == kTfLiteLSTMFullKernel &&
op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 5;
}
TFLITE_DCHECK(lstm_params != nullptr);
if (lstm_params->kernel_type == kTfLiteLSTMFullKernel &&
op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (lstm_params->asymmetric_quantize_inputs) {
return 4;
}
return 3;
}
if (lstm_params->kernel_type == kTfLiteLSTMBasicKernel) {
return 2;
}
return 1;
}
case BuiltinOperator_SPLIT:
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(1).type == kTfLiteInt32) {
return 3;
}
if (op_sig.inputs.at(1).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SPARSE_TO_DENSE:
if (op_sig.inputs.at(2).type == kTfLiteInt8 ||
op_sig.inputs.at(2).type == kTfLiteUInt8) {
return 3;
}
if (op_sig.inputs.at(2).type == kTfLiteInt64) {
return 2;
}
return 1;
case BuiltinOperator_SLICE:
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 6;
}
if (op_sig.inputs.at(0).dims.size() > 4) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_UNPACK:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 4;
}
return 1;
case BuiltinOperator_DEQUANTIZE:
if (op_sig.inputs.at(0).type == kTfLiteInt4) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteFloat16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
if (op_sig.ext_options.dequantize.is_per_channel_quantized) {
return 5;
}
return 2;
}
return 1;
case BuiltinOperator_QUANTIZE:
if (op_sig.inputs.at(0).type == kTfLiteInt4 ||
op_sig.outputs.at(0).type == kTfLiteInt4) {
return 4;
}
if (op_sig.ext_options.quantize.is_per_channel_quantized) {
return 3;
}
if (op_sig.outputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_FLOOR_DIV:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32) {
return 2;
}
return 1;
case BuiltinOperator_FLOOR_MOD:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_L2_NORMALIZATION:
if (op_sig.outputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_ABS:
if (op_sig.inputs.at(0).type == kTfLiteInt32) {
return 5;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return op_sig.ext_options.abs.input_quantized ? 3 : 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
case BuiltinOperator_RELU:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
case BuiltinOperator_STRIDED_SLICE: {
auto strided_slice_params =
reinterpret_cast<TfLiteStridedSliceParams*>(op_sig.builtin_data);
TFLITE_DCHECK(strided_slice_params != nullptr);
if (strided_slice_params->offset == true) {
return 8;
}
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 7;
}
if (strided_slice_params->ellipsis_mask != 0 ||
strided_slice_params->new_axis_mask != 0) {
return 6;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 5;
}
if (op_sig.ext_options.strided_slice.num_dims > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_REVERSE_V2:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 2;
}
return 1;
case BuiltinOperator_RESIZE_BILINEAR: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
auto resize_bilinear_params =
reinterpret_cast<TfLiteResizeBilinearParams*>(op_sig.builtin_data);
TFLITE_DCHECK(resize_bilinear_params != nullptr);
if (resize_bilinear_params->half_pixel_centers) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
auto resize_nearest_neighbor_params =
reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
op_sig.builtin_data);
TFLITE_DCHECK(resize_nearest_neighbor_params != nullptr);
if (resize_nearest_neighbor_params->half_pixel_centers ||
resize_nearest_neighbor_params->align_corners) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_MAXIMUM:
case BuiltinOperator_MINIMUM:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_PACK:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
return 1;
case BuiltinOperator_TILE:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_SQUEEZE:
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_SPACE_TO_BATCH_ND:
case BuiltinOperator_BATCH_TO_SPACE_ND:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).dims.size() != 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_ADD: {
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt16 &&
!op_sig.ext_options.add.input_quantized) {
return 5;
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto add_params =
reinterpret_cast<TfLiteAddParams*>(op_sig.builtin_data);
if (add_params && !add_params->pot_scale_int16) {
return 3;
}
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SUB: {
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
auto sub_params =
reinterpret_cast<TfLiteSubParams*>(op_sig.builtin_data);
if (sub_params && !sub_params->pot_scale_int16) {
return 5;
}
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteInt64) {
return 4;
}
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_GATHER_ND:
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 5;
}
if (op_sig.inputs.at(1).type == kTfLiteInt16) {
return 4;
}
if (!op_sig.inputs.empty() &&
(op_sig.inputs.at(0).type == kTfLiteInt16)) {
return 3;
}
if (!op_sig.inputs.empty() && op_sig.inputs.at(0).type == kTfLiteString) {
return 2;
}
return 1;
case BuiltinOperator_DIV:
if (NeedBroadcastForBinaryInputs(op_sig) && GetInputMaxDims(op_sig) > 4) {
return 2;
}
return 1;
case BuiltinOperator_TANH:
case BuiltinOperator_LOGISTIC:
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_FILL:
if (op_sig.inputs.size() >= 2) {
if (op_sig.inputs.at(1).type == kTfLiteFloat16) return 4;
if (op_sig.inputs.at(1).type == kTfLiteInt8 ||
op_sig.inputs.at(1).type == kTfLiteInt16) {
return 3;
} else if ((op_sig.inputs.at(1).type == kTfLiteBool ||
op_sig.inputs.at(1).type == kTfLiteString)) {
return 2;
}
}
return 1;
case BuiltinOperator_EQUAL:
if (!op_sig.inputs.empty()) {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
}
return 1;
case BuiltinOperator_NOT_EQUAL:
if (!op_sig.inputs.empty()) {
if (op_sig.inputs.at(0).type == kTfLiteString) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
}
return 1;
case BuiltinOperator_LEAKY_RELU:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_RANGE:
if (op_sig.inputs.at(0).type == kTfLiteInt64) {
return 2;
}
return 1;
case BuiltinOperator_BATCH_MATMUL: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto batch_mat_mul_params =
reinterpret_cast<TfLiteBatchMatMulParams*>(op_sig.builtin_data);
if (batch_mat_mul_params &&
batch_mat_mul_params->asymmetric_quantize_inputs) {
return 4;
}
}
return 1;
}
case BuiltinOperator_PAD:
case BuiltinOperator_PADV2:
if (op_sig.inputs.at(0).dims.size() > 4) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_CONCATENATION:
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SOFTMAX:
case BuiltinOperator_MEAN:
case BuiltinOperator_MIRROR_PAD:
case BuiltinOperator_REDUCE_MAX:
case BuiltinOperator_REDUCE_MIN:
case BuiltinOperator_RELU6:
case BuiltinOperator_RSQRT:
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto rnn_params =
reinterpret_cast<TfLiteRNNParams*>(op_sig.builtin_data);
if (rnn_params && rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto sequence_rnn_params =
reinterpret_cast<TfLiteSequenceRNNParams*>(op_sig.builtin_data);
if (sequence_rnn_params &&
sequence_rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto bidirectional_sequence_rnn_params =
reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
op_sig.builtin_data);
if (bidirectional_sequence_rnn_params &&
bidirectional_sequence_rnn_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
if (op_sig.inputs.at(1).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
auto bidirectional_sequence_lstm_params =
reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
op_sig.builtin_data);
if (bidirectional_sequence_lstm_params &&
bidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
return 3;
} else {
return 2;
}
}
return 1;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
auto unidirectional_sequence_lstm_params =
reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
op_sig.builtin_data);
if (op_sig.inputs.at(0).type == kTfLiteInt16 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteInt16) {
return 5;
}
if (unidirectional_sequence_lstm_params &&
unidirectional_sequence_lstm_params->diagonal_recurrent_tensors) {
return 4;
}
if (op_sig.inputs.at(0).type == kTfLiteFloat32 &&
op_sig.inputs.at(2).type == kTfLiteInt8 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
if (unidirectional_sequence_lstm_params &&
unidirectional_sequence_lstm_params->asymmetric_quantize_inputs) {
return 3;
}
return 2;
}
return 1;
}
case BuiltinOperator_ARG_MAX:
case BuiltinOperator_ARG_MIN:
if (op_sig.inputs.at(0).type == kTfLiteBool) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_SELECT: {
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 4;
}
if (op_sig.inputs.at(0).dims.size() == 5 ||
op_sig.inputs.at(1).dims.size() == 5 ||
op_sig.inputs.at(2).dims.size() == 5)
return 3;
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_LESS:
case BuiltinOperator_GREATER_EQUAL: {
if (op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
}
case BuiltinOperator_SELECT_V2: {
if (op_sig.inputs.at(0).type == kTfLiteUInt32) {
return 2;
}
return 1;
}
case BuiltinOperator_SPACE_TO_DEPTH:
case BuiltinOperator_SPLIT_V:
case BuiltinOperator_SUM:
case BuiltinOperator_LOG_SOFTMAX:
case BuiltinOperator_GREATER:
case BuiltinOperator_LESS_EQUAL:
case BuiltinOperator_SQUARED_DIFFERENCE:
case BuiltinOperator_DEPTH_TO_SPACE:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_TOPK_V2:
if (op_sig.inputs.at(0).type == kTfLiteInt16 ||
op_sig.inputs.at(1).type == kTfLiteInt16 ||
op_sig.outputs.at(1).type == kTfLiteInt16) {
return 3;
}
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
return 1;
case BuiltinOperator_EXP:
case BuiltinOperator_LOG:
case BuiltinOperator_REDUCE_PROD:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteInt16) {
return 2;
}
return 1;
case BuiltinOperator_DYNAMIC_UPDATE_SLICE:
if (op_sig.inputs.at(2).type == kTfLiteInt64) return 2;
return 1;
case BuiltinOperator_BROADCAST_TO:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteInt16) {
return 3;
}
return 2;
case BuiltinOperator_CAST:
if (op_sig.inputs.at(0).type == kTfLiteBFloat16 ||
op_sig.outputs.at(0).type == kTfLiteBFloat16) {
return 7;
} else if (op_sig.inputs.at(0).type == kTfLiteInt4 &&
op_sig.outputs.at(0).type == kTfLiteFloat32) {
return 6;
} else if (op_sig.inputs.at(0).type == kTfLiteFloat64 ||
op_sig.outputs.at(0).type == kTfLiteFloat64 ||
op_sig.inputs.at(0).type == kTfLiteFloat16 ||
op_sig.outputs.at(0).type == kTfLiteFloat16) {
return 5;
} else if (op_sig.inputs.at(0).type == kTfLiteUInt16 ||
op_sig.outputs.at(0).type == kTfLiteUInt16) {
return 4;
} else if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.outputs.at(0).type == kTfLiteInt8) {
return 3;
} else if (op_sig.inputs.at(0).type == kTfLiteUInt32 ||
op_sig.outputs.at(0).type == kTfLiteUInt32) {
return 2;
}
return 1;
case BuiltinOperator_WHERE:
if (op_sig.inputs.at(0).type == kTfLiteBool) return 1;
return 2;
case BuiltinOperator_GELU:
if (op_sig.inputs.at(0).type == kTfLiteInt8 ||
op_sig.inputs.at(0).type == kTfLiteUInt8) {
return 2;
}
return 1;
default:
return 1;
}
}
void UpdateOpVersion(uint8_t* model_buffer_pointer) {
auto model = GetMutableModel(model_buffer_pointer);
auto subgraphs = model->subgraphs();
for (int i = 0; i < subgraphs->Length(); ++i) {
const SubGraph* subgraph = subgraphs->Get(i);
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
OperatorCode* op_code =
model->mutable_operator_codes()->GetMutableObject(op->opcode_index());
auto builtin_code = GetBuiltinCode(op_code);
if (builtin_code != BuiltinOperator_CUSTOM) {
OpSignature op_sig = GetOpSignature(op_code, op, subgraph, model);
int32_t op_ver = GetBuiltinOperatorVersion(op_sig);
if (op_sig.builtin_data) {
free(op_sig.builtin_data);
}
if (op_ver <= op_code->version()) {
continue;
}
if (!op_code->mutate_version(op_ver)) {
LOG(ERROR) << "Can't set operator "
<< EnumNameBuiltinOperator(builtin_code) << " to version "
<< op_ver;
}
}
}
}
}
} | #include "tensorflow/lite/tools/versioning/op_version.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const std::vector<TfLiteType>& types) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (auto type : types) {
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const std::vector<TfLiteType>& types, int rank) {
std::vector<OpSignatureTensorSpec> tensor_specs;
for (auto type : types) {
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
for (int i = 0; i < rank; i++) {
tensor_spec.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec);
}
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
tensor_specs.push_back(tensor_spec);
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type, const int dim) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec = {};
tensor_spec.type = type;
for (int i = 0; i < dim; i++) {
tensor_spec.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec);
return tensor_specs;
}
std::vector<OpSignatureTensorSpec> CreateOpSignatureTensorSpecs(
const TfLiteType type, const int dim1, const int dim2) {
std::vector<OpSignatureTensorSpec> tensor_specs;
OpSignatureTensorSpec tensor_spec1 = {};
tensor_spec1.type = type;
for (int i = 0; i < dim1; i++) {
tensor_spec1.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec1);
OpSignatureTensorSpec tensor_spec2 = {};
tensor_spec2.type = type;
for (int i = 0; i < dim2; i++) {
tensor_spec2.dims.push_back(4);
}
tensor_specs.push_back(tensor_spec2);
return tensor_specs;
}
}
TEST(OpVersionTest, VersioningSpareToDense) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8, kTfLiteInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8, kTfLiteUInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt64, kTfLiteInt64, kTfLiteInt64}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_SPARSE_TO_DENSE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
void SimpleVersioningTest(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
void SimpleVersioningTestExtended(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
SimpleVersioningTest(op);
}
void SimpleOutputVersioningTest(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.inputs = std::vector<OpSignatureTensorSpec>{},
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = op,
.inputs = std::vector<OpSignatureTensorSpec>{},
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningEqualTest) {
SimpleVersioningTest(BuiltinOperator_EQUAL);
OpSignature fake_op_sig = {
.op = BuiltinOperator_EQUAL,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningNotEqualTest) {
SimpleVersioningTest(BuiltinOperator_NOT_EQUAL);
OpSignature fake_op_sig = {
.op = BuiltinOperator_NOT_EQUAL,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningLessTest) {
SimpleVersioningTest(BuiltinOperator_LESS);
}
TEST(OpVersionTest, VersioningLessEqualTest) {
SimpleVersioningTest(BuiltinOperator_LESS_EQUAL);
}
TEST(OpVersionTest, VersioningGreaterTest) {
SimpleVersioningTest(BuiltinOperator_GREATER);
}
TEST(OpVersionTest, VersioningGreaterEqualTest) {
SimpleVersioningTest(BuiltinOperator_GREATER_EQUAL);
}
TEST(OpVersionTest, VersioningSpaceToBatchNDTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SPACE_TO_BATCH_ND,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningLogSoftmaxTest) {
SimpleVersioningTest(BuiltinOperator_LOG_SOFTMAX);
}
TEST(OpVersionTest, VersioningPackTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_PACK;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningUnpackTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_UNPACK,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningRangeTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_RANGE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningReluTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_RELU,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningBatchToSpaceNDTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_BATCH_TO_SPACE_ND,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 3);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningTanhTest) {
SimpleVersioningTest(BuiltinOperator_TANH);
}
TEST(OpVersionTest, VersioningStridedSliceTest) {
TfLiteStridedSliceParams strided_slice_params = {};
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_STRIDED_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
fake_op_sig.builtin_data = reinterpret_cast<void*>(&strided_slice_params);
strided_slice_params.ellipsis_mask = 0;
strided_slice_params.new_axis_mask = 2;
fake_op_sig.ext_options.strided_slice.num_dims = 5;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
strided_slice_params.new_axis_mask = 0;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.ext_options.strided_slice.num_dims = 4;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
strided_slice_params.offset = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
}
TEST(OpVersionTest, VersioningSpaceToDepthTest) {
SimpleVersioningTest(BuiltinOperator_SPACE_TO_DEPTH);
}
TEST(OpVersionTest, VersioningSliceTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteString, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_SLICE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
}
TEST(OpVersionTest, VersioningLogisticTest) {
SimpleVersioningTest(BuiltinOperator_SPACE_TO_DEPTH);
}
TEST(OpVersionTest, VersioningL2NormTest) {
SimpleOutputVersioningTest(BuiltinOperator_L2_NORMALIZATION);
}
TEST(OpVersionTest, VersioningMaxTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MAXIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_MAXIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningMinTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MINIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_MINIMUM,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningMeanTest) {
SimpleVersioningTestExtended(BuiltinOperator_MEAN);
}
TEST(OpVersionTest, VersioningSumTest) {
SimpleVersioningTest(BuiltinOperator_SUM);
}
TEST(OpVersionTest, VersioningReduceMinTest) {
SimpleVersioningTestExtended(BuiltinOperator_REDUCE_MIN);
}
TEST(OpVersionTest, VersioningReduceMaxTest) {
SimpleVersioningTestExtended(BuiltinOperator_REDUCE_MAX);
}
TEST(OpVersionTest, VersioningMirrorPadTest) {
SimpleVersioningTestExtended(BuiltinOperator_MIRROR_PAD);
}
TEST(OpVersionTest, VersioningReduceProdTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_REDUCE_PROD;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningAddTest) {
TfLiteAddParams add_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_ADD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&add_params)};
add_params.pot_scale_int16 = false;
fake_op_sig.ext_options.add.input_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.ext_options.add.input_quantized = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
SimpleVersioningTest(BuiltinOperator_ADD);
}
TEST(OpVersionTest, VersioningSubTest) {
TfLiteSubParams sub_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_SUB,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&sub_params)};
sub_params.pot_scale_int16 = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8, 4, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
SimpleVersioningTest(BuiltinOperator_SUB);
}
TEST(OpVersionTest, VersioningMUL7TestInt16) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
fake_op_sig.ext_options.mul.input_quantized = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
}
TEST(OpVersionTest, VersioningMUL7TestUInt32) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 7);
}
TEST(OpVersionTest, VersioningMUL6Test) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteComplex64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
}
TEST(OpVersionTest, VersioningMUL5Test) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_MUL;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningSub4Test) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SUB,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt64),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
void SimpleMulVersioningTest(TfLiteType data_type, float multiplier,
int version) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_MUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{data_type, data_type}),
.outputs = CreateOpSignatureTensorSpecs(data_type),
};
fake_op_sig.ext_options.mul = {1.0f, 1.0f, 1.0f / multiplier};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), version);
}
TEST(OpVersionTest, VersioningMulTest) {
SimpleMulVersioningTest(kTfLiteUInt8, 0.5f, 1);
SimpleMulVersioningTest(kTfLiteInt8, 0.5f, 2);
SimpleMulVersioningTest(kTfLiteInt8, 2.0f, 3);
}
TEST(OpVersionTest, VersioningPadTest) {
SimpleVersioningTest(BuiltinOperator_PAD);
}
TEST(OpVersionTest, VersioningPadV2Test) {
SimpleVersioningTest(BuiltinOperator_PADV2);
}
TEST(OpVersionTest, VersioningConcatenationTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_CONCATENATION;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningSelectTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt32, kTfLiteUInt32, kTfLiteUInt32}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8, kTfLiteUInt8}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8, kTfLiteInt8}, 4);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32},
4);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningSelectV2Test) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT_V2;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt32, kTfLiteUInt32, kTfLiteUInt32}, 5);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SELECT_V2;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32, kTfLiteInt32}, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningRelu6Test) {
SimpleVersioningTestExtended(BuiltinOperator_RELU6);
}
TEST(OpVersionTest, VersioningFullyConnectedTest) {
TfLiteFullyConnectedParams fully_connected_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.weights_format =
kTfLiteFullyConnectedWeightsFormatDefault;
fake_op_sig.ext_options.fully_connected.sparse_weight = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.asymmetric_quantize_inputs = false;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fully_connected_params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 9);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fully_connected_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 11);
fake_op_sig = {
.op = BuiltinOperator_FULLY_CONNECTED,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&fully_connected_params),
};
fake_op_sig.ext_options.fully_connected.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 12);
}
TEST(OpVersionTest, VersioningDequantizeTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.ext_options.dequantize.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_DEQUANTIZE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningQuantizeTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_QUANTIZE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.ext_options.quantize.is_per_channel_quantized = false;
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.ext_options.quantize.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningConv2DTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteUInt8, kTfLiteUInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
fake_op_sig.ext_options.conv_2d.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig.op = BuiltinOperator_CONV_2D;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8});
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.ext_options.conv_2d.is_grouped_convolution = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
TfLiteConvParams conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&conv_params),
};
conv_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 8);
}
TEST(OpVersionTest, VersioningFloorDivOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_DIV,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningFloorModOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_FLOOR_MOD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_FLOOR_MOD,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningTransposeConvOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteUInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt8, kTfLiteInt8}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
const auto none_type = kTfLiteNoType;
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, none_type}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
TfLiteTransposeConvParams transpose_conv_params = {};
transpose_conv_params.activation = kTfLiteActRelu;
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt32, kTfLiteInt8, kTfLiteInt8, none_type}),
.builtin_data = reinterpret_cast<void*>(&transpose_conv_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
transpose_conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE_CONV,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&transpose_conv_params),
};
transpose_conv_params.quantized_bias_type = kTfLiteInt32;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningSVDFOperatorTest) {
TfLiteSVDFParams svdf_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_SVDF,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32,
kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&svdf_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_SVDF,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8, kTfLiteFloat32,
kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&svdf_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
svdf_params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
svdf_params = {};
fake_op_sig = {
.op = BuiltinOperator_SVDF,
.inputs = CreateOpSignatureTensorSpecs(std::vector<TfLiteType>{
kTfLiteInt8, kTfLiteInt8, kTfLiteInt32, kTfLiteInt32, kTfLiteInt16}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&svdf_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningDepthwiseConv2DTest) {
TfLiteDepthwiseConvParams depthwise_conv_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.ext_options.depthwise_conv_2d.is_per_channel_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 6);
depthwise_conv_params = {};
fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
depthwise_conv_params.dilation_width_factor = 2;
depthwise_conv_params.dilation_height_factor = 2;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_DEPTHWISE_CONV_2D,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params),
};
depthwise_conv_params.dilation_width_factor = 1;
depthwise_conv_params.dilation_height_factor = 1;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningTileOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_TILE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_TILE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteString),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningTransposeTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteBool, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteBool, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_TRANSPOSE,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningGatherNdOperatorTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteString, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt32}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt16}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {
.op = BuiltinOperator_GATHER_ND,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteBool, kTfLiteInt16}),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningDivTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_DIV,
};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 5, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 5, 5);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8, 4, 4);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTEst, VersioningFillTest) {
OpSignature fake_op_sig = {BuiltinOperator_FILL};
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteFloat16});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt64, kTfLiteFloat16});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt8});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt64, kTfLiteInt16});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteBool});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteString});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt32, kTfLiteInt32});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
TEST(OpVersionTest, VersioningResizeBilinearTest) {
TfLiteResizeBilinearParams resize_bilinear_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_RESIZE_BILINEAR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
resize_bilinear_params.align_corners = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
resize_bilinear_params.align_corners = false;
resize_bilinear_params.half_pixel_centers = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_bilinear_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_BILINEAR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
resize_bilinear_params.half_pixel_centers = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_bilinear_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_BILINEAR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningResizeNearestNeighborTest) {
TfLiteResizeNearestNeighborParams resize_nearest_neighbor_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&resize_nearest_neighbor_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
resize_nearest_neighbor_params.align_corners = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_nearest_neighbor_params.align_corners = false;
resize_nearest_neighbor_params.half_pixel_centers = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_nearest_neighbor_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&resize_nearest_neighbor_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
resize_nearest_neighbor_params.align_corners = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
resize_nearest_neighbor_params = {};
fake_op_sig = {
.op = BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&resize_nearest_neighbor_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningAbsTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
fake_op_sig.ext_options.abs.input_quantized = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_ABS,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_ABS;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 5);
}
TEST(OpVersionTest, VersioningSignTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_SIGN;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_SIGN;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningBatchMatMulTest) {
TfLiteBatchMatMulParams batch_mat_mul_params = {};
OpSignature fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
batch_mat_mul_params = {};
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt16, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_BATCH_MATMUL,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.builtin_data = reinterpret_cast<void*>(&batch_mat_mul_params),
};
batch_mat_mul_params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningSquaredDifferenceTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_SQUARED_DIFFERENCE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_SQUARED_DIFFERENCE,
.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteInt8, kTfLiteInt8}),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningRsqrtTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_RSQRT;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningBroadcastToTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_BROADCAST_TO,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_BROADCAST_TO,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
fake_op_sig = {
.op = BuiltinOperator_BROADCAST_TO,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
.outputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
}
TEST(OpVersionTest, VersioningGeluTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_GELU;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.op = BuiltinOperator_GELU;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.op = BuiltinOperator_GELU;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningUnidirectionalLstmTest) {
TfLiteUnidirectionalSequenceLSTMParams params = {};
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteFloat32});
fake_op_sig.outputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
fake_op_sig.builtin_data = reinterpret_cast<void*>(¶ms);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteInt8});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
params.asymmetric_quantize_inputs = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
params.diagonal_recurrent_tensors = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningExpTest) {
OpSignature fake_op_sig = {
.op = BuiltinOperator_EXP,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig = {
.op = BuiltinOperator_EXP,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig = {
.op = BuiltinOperator_EXP,
.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16),
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningLogTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_LOG;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteFloat32);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16);
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
TEST(OpVersionTest, VersioningDynamicUpdateSliceTest) {
OpSignature fake_op_sig = {};
fake_op_sig.op = BuiltinOperator_DYNAMIC_UPDATE_SLICE;
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteInt32});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
fake_op_sig.inputs = CreateOpSignatureTensorSpecs(
std::vector<TfLiteType>{kTfLiteFloat32, kTfLiteFloat32, kTfLiteInt64});
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/op_version.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/op_version_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60745d33-3a8b-4264-9983-da7112c95926 | cpp | tensorflow/tensorflow | signature_runner | tensorflow/lite/core/signature_runner.cc | tensorflow/lite/core/signature_runner_test.cc | #include "tensorflow/lite/core/signature_runner.h"
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/internal/signature_def.h"
namespace tflite {
namespace impl {
SignatureRunner::SignatureRunner(const internal::SignatureDef* signature_def,
Subgraph* subgraph)
: signature_def_(signature_def), subgraph_(subgraph) {
for (const auto& it : signature_def_->inputs) {
input_names_.push_back(it.first.c_str());
}
for (const auto& it : signature_def_->outputs) {
output_names_.push_back(it.first.c_str());
}
}
TfLiteTensor* SignatureRunner::input_tensor(const char* input_name) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
const TfLiteTensor* SignatureRunner::output_tensor(
const char* output_name) const {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
TfLiteStatus SignatureRunner::SetInputBufferHandle(
const char* input_name, TfLiteBufferHandle buffer_handle,
TfLiteDelegate* delegate, bool release_existing_buffer_handle) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->SetBufferHandle(it->second, buffer_handle, delegate,
release_existing_buffer_handle);
}
TfLiteStatus SignatureRunner::SetOutputBufferHandle(
const char* output_name, TfLiteBufferHandle buffer_handle,
TfLiteDelegate* delegate, bool release_existing_buffer_handle) {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return kTfLiteError;
}
return subgraph_->SetBufferHandle(it->second, buffer_handle, delegate,
release_existing_buffer_handle);
}
TfLiteStatus SignatureRunner::ResizeInputTensor(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensor(it->second, new_size);
}
TfLiteStatus SignatureRunner::ResizeInputTensorStrict(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensorStrict(it->second, new_size);
}
TfLiteStatus SignatureRunner::Invoke() {
if (subgraph_->continue_invocation_)
(void)subgraph_->continue_invocation_->test_and_set();
TF_LITE_ENSURE_STATUS(subgraph_->Invoke());
if (!allow_buffer_handle_output_) {
for (int tensor_index : subgraph_->outputs()) {
TF_LITE_ENSURE_STATUS(
subgraph_->EnsureTensorDataIsReadable(tensor_index));
}
}
return kTfLiteOk;
}
TfLiteStatus SignatureRunner::SetCustomAllocationForInputTensor(
const char* input_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
TfLiteStatus SignatureRunner::SetCustomAllocationForOutputTensor(
const char* output_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
}
} | #include "tensorflow/lite/core/signature_runner.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace impl {
namespace {
TEST(SignatureRunnerTest, TestMultiSignatures) {
TestErrorReporter reporter;
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/multi_signatures.bin", &reporter);
ASSERT_TRUE(model);
ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
std::vector<const std::string*> signature_defs =
interpreter->signature_keys();
ASSERT_EQ(signature_defs.size(), 2);
ASSERT_EQ(*(signature_defs[0]), "add");
ASSERT_EQ(*(signature_defs[1]), "sub");
ASSERT_EQ(interpreter->GetSignatureRunner("dummy"), nullptr);
SignatureRunner* add_runner =
interpreter->GetSignatureRunner(signature_defs[0]->c_str());
ASSERT_NE(add_runner, nullptr);
ASSERT_EQ(add_runner->signature_key(), "add");
const std::vector<const char*>& input_names = add_runner->input_names();
const std::vector<const char*>& output_names = add_runner->output_names();
ASSERT_EQ(input_names.size(), 1);
ASSERT_EQ(std::string(input_names[0]), "x");
ASSERT_EQ(output_names.size(), 1);
ASSERT_EQ(std::string(output_names[0]), "output_0");
ASSERT_EQ(add_runner->ResizeInputTensor("x", {2}), kTfLiteOk);
ASSERT_EQ(add_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* add_input = add_runner->input_tensor("x");
ASSERT_EQ(add_runner->input_tensor("dummy"), nullptr);
const TfLiteTensor* add_output = add_runner->output_tensor("output_0");
ASSERT_EQ(add_runner->output_tensor("dummy"), nullptr);
ASSERT_NE(add_input, nullptr);
ASSERT_NE(add_output, nullptr);
add_input->data.f[0] = 2;
add_input->data.f[1] = 4;
ASSERT_EQ(add_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(add_output->data.f[0], 4);
ASSERT_EQ(add_output->data.f[1], 6);
SignatureRunner* sub_runner = interpreter->GetSignatureRunner("sub");
ASSERT_NE(sub_runner, nullptr);
ASSERT_EQ(sub_runner->signature_key(), "sub");
const std::vector<const char*>& input_names2 = sub_runner->input_names();
const std::vector<const char*>& output_names2 = sub_runner->output_names();
ASSERT_EQ(input_names2.size(), 1);
ASSERT_EQ(std::string(input_names2[0]), "x");
ASSERT_EQ(output_names2.size(), 1);
ASSERT_EQ(std::string(output_names2[0]), "output_0");
ASSERT_EQ(sub_runner->ResizeInputTensor("x", {3}), kTfLiteOk);
ASSERT_EQ(sub_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* sub_input = sub_runner->input_tensor("x");
const TfLiteTensor* sub_output = sub_runner->output_tensor("output_0");
ASSERT_NE(sub_input, nullptr);
ASSERT_NE(sub_output, nullptr);
sub_input->data.f[0] = 2;
sub_input->data.f[1] = 4;
sub_input->data.f[2] = 6;
ASSERT_EQ(sub_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(sub_output->data.f[0], -1);
ASSERT_EQ(sub_output->data.f[1], 1);
ASSERT_EQ(sub_output->data.f[2], 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/signature_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/signature_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66e616d3-0a41-4d1a-8c15-b0138948b0b7 | cpp | tensorflow/tensorflow | xnnpack_plugin | tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin_test.cc | #include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
auto options(TfLiteXNNPackDelegateOptionsDefault());
const auto* xnnpack_settings = tflite_settings->xnnpack_settings();
if (xnnpack_settings) {
options.num_threads = xnnpack_settings->num_threads();
if (xnnpack_settings->flags()) {
options.flags = xnnpack_settings->flags();
}
if (xnnpack_settings->weight_cache_file_path()) {
options.weight_cache_file_path =
xnnpack_settings->weight_cache_file_path()->c_str();
}
}
return TfLiteXNNPackDelegateCreate(&options);
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
TfLiteXNNPackDelegateDelete(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class XnnpackTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
void SetUp() override {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~XnnpackTest() override = default;
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
constexpr int XnnpackTest::kNumThreadsForTest;
TEST_F(XnnpackTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteXnnpackDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, SetsCorrectThreadCount) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
pthreadpool_t threadpool =
static_cast<pthreadpool_t>(TfLiteXNNPackDelegateGetThreadPool(delegate));
int thread_count = pthreadpool_get_threads_count(threadpool);
EXPECT_EQ(thread_count, kNumThreadsForTest);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsByDefault) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesSpecifiedFlagsWhenNonzero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsWhenZero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e737928b-b99f-4fe5-8078-5b380e7871df | cpp | tensorflow/tensorflow | stable_delegate_registry | tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.cc | tensorflow/lite/core/acceleration/configuration/stable_delegate_registry_test.cc | #include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h"
#include <string>
#include "absl/synchronization/mutex.h"
#include "tensorflow/lite/core/acceleration/configuration/c/stable_delegate.h"
namespace tflite {
namespace delegates {
void StableDelegateRegistry::RegisterStableDelegate(
const TfLiteStableDelegate* delegate) {
auto* const instance = StableDelegateRegistry::GetSingleton();
instance->RegisterStableDelegateImpl(delegate);
}
const TfLiteStableDelegate* StableDelegateRegistry::RetrieveStableDelegate(
const std::string& name) {
auto* const instance = StableDelegateRegistry::GetSingleton();
return instance->RetrieveStableDelegateImpl(name);
}
void StableDelegateRegistry::RegisterStableDelegateImpl(
const TfLiteStableDelegate* delegate) {
absl::MutexLock lock(&mutex_);
registry_[delegate->delegate_name] = delegate;
}
const TfLiteStableDelegate* StableDelegateRegistry::RetrieveStableDelegateImpl(
const std::string& name) {
absl::MutexLock lock(&mutex_);
if (registry_.find(name) == registry_.end()) {
return nullptr;
} else {
return registry_[name];
}
}
StableDelegateRegistry* StableDelegateRegistry::GetSingleton() {
static auto* instance = new StableDelegateRegistry();
return instance;
}
}
} | #include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/acceleration/configuration/c/stable_delegate.h"
namespace {
using tflite::delegates::StableDelegateRegistry;
TfLiteStableDelegate CreateTestStableDelegate() {
TfLiteStableDelegate stable_delegate = {TFL_STABLE_DELEGATE_ABI_VERSION,
"test_delegate", "V1.0.0", nullptr};
return stable_delegate;
}
class StableDelegateRegistryTest : public testing::Test {
public:
void SetUp() override {
stable_delegate_ = CreateTestStableDelegate();
StableDelegateRegistry::RegisterStableDelegate(&stable_delegate_);
}
protected:
TfLiteStableDelegate stable_delegate_;
};
TEST_F(StableDelegateRegistryTest, TestRetrieval) {
EXPECT_EQ(StableDelegateRegistry::RetrieveStableDelegate("test_delegate"),
&stable_delegate_);
}
TEST_F(StableDelegateRegistryTest, NoRegistrationFound) {
EXPECT_EQ(
StableDelegateRegistry::RetrieveStableDelegate("not_valid_delegate"),
nullptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/stable_delegate_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
61891652-f14f-40b2-a1d0-94d7c26d4bf6 | cpp | tensorflow/tensorflow | nnapi_plugin | tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin_test.cc | #include "tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/core/acceleration/configuration/nnapi_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
tflite::delegates::NnapiPlugin nnapi_plugin(*tflite_settings);
auto support_library_handle = nnapi_plugin.GetSupportLibraryHandle();
if (support_library_handle) {
auto nnapi_support_library_driver =
reinterpret_cast<const NnApiSLDriverImplFL5*>(support_library_handle);
return new tflite::StatefulNnApiDelegate(nnapi_support_library_driver,
nnapi_plugin.Options());
}
return new tflite::StatefulNnApiDelegate(nnapi_plugin.Options());
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) {
auto nnapi_delegate =
static_cast<tflite::StatefulNnApiDelegate*>(from_delegate);
return nnapi_delegate->GetNnApiErrno();
}
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteNnapiDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.h"
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
class NnapiTest : public testing::Test {
public:
void SetUp() override {
NNAPISettingsBuilder nnapi_settings_builder(flatbuffer_builder_);
flatbuffers::Offset<NNAPISettings> nnapi_settings =
nnapi_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_nnapi_settings(nnapi_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~NnapiTest() override {}
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
TEST_F(NnapiTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate = TfLiteNnapiDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteNnapiDelegatePluginCApi()->destroy(delegate);
}
TEST_F(NnapiTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate = TfLiteNnapiDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteNnapiDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteNnapiDelegatePluginCApi()->destroy(delegate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8ad88dc-6c07-443c-8df2-ecbfd02997c0 | cpp | tensorflow/tensorflow | register | tensorflow/lite/core/kernels/register.cc | tensorflow/lite/core/kernels/register_test.cc | #include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tflite_with_xnnpack_optional.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_NUMERIC_VERIFY();
TfLiteRegistration* Register_AUDIO_SPECTROGRAM();
TfLiteRegistration* Register_MFCC();
TfLiteRegistration* Register_DETECTION_POSTPROCESS();
}
namespace builtin {
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_ABS, Register_ABS(), 1,
5);
AddBuiltin(BuiltinOperator_HARD_SWISH, Register_HARD_SWISH());
AddBuiltin(BuiltinOperator_RELU, Register_RELU(), 1,
3);
AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
AddBuiltin(BuiltinOperator_RELU_0_TO_1, Register_RELU_0_TO_1());
AddBuiltin(BuiltinOperator_RELU6, Register_RELU6(), 1,
3);
AddBuiltin(BuiltinOperator_TANH, Register_TANH(), 1,
3);
AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC(),
1,
3);
AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D());
AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(),
1,
8);
AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(),
1,
7);
AddBuiltin(BuiltinOperator_SVDF, Register_SVDF(),
1,
4);
AddBuiltin(BuiltinOperator_RNN, Register_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
Register_BIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
Register_UNIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP(),
1,
4);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
Register_EMBEDDING_LOOKUP_SPARSE());
AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(),
1,
13);
AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(),
1,
3);
AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION(),
1,
4);
AddBuiltin(BuiltinOperator_ADD, Register_ADD(),
1,
5);
AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND(),
1,
4);
AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND(),
1,
4);
AddBuiltin(BuiltinOperator_MUL, Register_MUL(), 1,
7);
AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION(),
1,
2);
AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
Register_LOCAL_RESPONSE_NORMALIZATION());
AddBuiltin(BuiltinOperator_LSTM, Register_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
Register_BIDIRECTIONAL_SEQUENCE_LSTM(), 1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
Register_UNIDIRECTIONAL_SEQUENCE_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_PAD, Register_PAD(), 1,
4);
AddBuiltin(BuiltinOperator_PADV2, Register_PADV2(), 1,
4);
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR(),
1,
4);
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
Register_RESIZE_NEAREST_NEIGHBOR(),
1,
4);
AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH(),
1,
2);
AddBuiltin(BuiltinOperator_DEPTH_TO_SPACE, Register_DEPTH_TO_SPACE(),
1,
2);
AddBuiltin(BuiltinOperator_GATHER, Register_GATHER(),
1,
7);
AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE(),
1,
6);
AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(),
1,
3);
AddBuiltin(BuiltinOperator_DIV, Register_DIV(),
1,
2);
AddBuiltin(BuiltinOperator_SUB, Register_SUB(),
1,
5);
AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(),
1,
4);
AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V(),
1,
2);
AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE(),
1,
2);
AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE(),
1,
8);
AddBuiltin(BuiltinOperator_EXP, Register_EXP(),
1,
2);
AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2(),
1,
3);
AddBuiltin(BuiltinOperator_LOG, Register_LOG(),
1,
2);
AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX(),
1,
2);
AddBuiltin(BuiltinOperator_CAST, Register_CAST(),
1,
6);
AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(),
1,
6);
AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_GREATER, Register_GREATER(),
1,
2);
AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_LESS, Register_LESS(),
1,
3);
AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL(),
1,
2);
AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR());
AddBuiltin(BuiltinOperator_CEIL, Register_CEIL());
AddBuiltin(BuiltinOperator_ROUND, Register_ROUND());
AddBuiltin(BuiltinOperator_NEG, Register_NEG());
AddBuiltin(BuiltinOperator_SELECT, Register_SELECT(),
1,
4);
AddBuiltin(BuiltinOperator_SELECT_V2, Register_SELECT_V2(),
1,
2);
AddBuiltin(BuiltinOperator_SLICE, Register_SLICE(),
1,
6);
AddBuiltin(BuiltinOperator_SIN, Register_SIN());
AddBuiltin(BuiltinOperator_COS, Register_COS());
AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV(),
1,
5);
AddBuiltin(BuiltinOperator_TILE, Register_TILE(),
1,
3);
AddBuiltin(BuiltinOperator_SUM, Register_SUM(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_ANY, Register_REDUCE_ANY());
AddBuiltin(BuiltinOperator_REDUCE_ALL, Register_REDUCE_ALL());
AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE(),
1,
3);
AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL(),
1,
4);
AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_SQRT, Register_SQRT());
AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT(),
1,
3);
AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
AddBuiltin(BuiltinOperator_RANK, Register_RANK());
AddBuiltin(BuiltinOperator_POW, Register_POW());
AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
AddBuiltin(BuiltinOperator_PACK, Register_PACK(),
1,
4);
AddBuiltin(BuiltinOperator_ONE_HOT, Register_ONE_HOT());
AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR());
AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND());
AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT());
AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK(),
1,
4);
AddBuiltin(BuiltinOperator_FLOOR_DIV, Register_FLOOR_DIV(),
1,
3);
AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE());
AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE());
AddBuiltin(BuiltinOperator_FLOOR_MOD, Register_FLOOR_MOD(),
1,
2);
AddBuiltin(BuiltinOperator_RANGE, Register_RANGE(),
1,
2);
AddBuiltin(BuiltinOperator_LEAKY_RELU, Register_LEAKY_RELU(),
1,
2);
AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, Register_SQUARED_DIFFERENCE(),
1,
2);
AddBuiltin(BuiltinOperator_FILL, Register_FILL(),
1,
4);
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD(),
1,
3);
AddBuiltin(BuiltinOperator_UNIQUE, Register_UNIQUE());
AddBuiltin(BuiltinOperator_REVERSE_V2, Register_REVERSE_V2(),
1,
3);
AddBuiltin(BuiltinOperator_ADD_N, Register_ADD_N());
AddBuiltin(BuiltinOperator_GATHER_ND, Register_GATHER_ND(),
1,
5);
AddBuiltin(BuiltinOperator_WHERE, Register_WHERE(),
1,
2);
AddBuiltin(BuiltinOperator_ELU, Register_ELU());
AddBuiltin(BuiltinOperator_REVERSE_SEQUENCE, Register_REVERSE_SEQUENCE());
AddBuiltin(BuiltinOperator_MATRIX_DIAG, Register_MATRIX_DIAG());
AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(),
1,
3);
AddBuiltin(BuiltinOperator_MATRIX_SET_DIAG, Register_MATRIX_SET_DIAG());
AddBuiltin(BuiltinOperator_IF, tflite::ops::builtin::Register_IF());
AddBuiltin(BuiltinOperator_WHILE, tflite::ops::builtin::Register_WHILE());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V4,
Register_NON_MAX_SUPPRESSION_V4());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V5,
Register_NON_MAX_SUPPRESSION_V5());
AddBuiltin(BuiltinOperator_SCATTER_ND, Register_SCATTER_ND());
AddBuiltin(BuiltinOperator_DENSIFY, Register_DENSIFY());
AddBuiltin(BuiltinOperator_SEGMENT_SUM, Register_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_BATCH_MATMUL, Register_BATCH_MATMUL(),
1,
4);
AddBuiltin(BuiltinOperator_CUMSUM, Register_CUMSUM());
AddBuiltin(BuiltinOperator_BROADCAST_TO, Register_BROADCAST_TO(),
2,
3);
AddBuiltin(BuiltinOperator_CALL_ONCE,
tflite::ops::builtin::Register_CALL_ONCE());
AddBuiltin(BuiltinOperator_RFFT2D, Register_RFFT2D());
AddBuiltin(BuiltinOperator_CONV_3D, Register_CONV_3D());
AddBuiltin(BuiltinOperator_IMAG, Register_IMAG());
AddBuiltin(BuiltinOperator_REAL, Register_REAL());
AddBuiltin(BuiltinOperator_COMPLEX_ABS, Register_COMPLEX_ABS());
AddBuiltin(BuiltinOperator_BROADCAST_ARGS, Register_BROADCAST_ARGS());
AddBuiltin(BuiltinOperator_HASHTABLE, Register_HASHTABLE());
AddBuiltin(BuiltinOperator_HASHTABLE_FIND, Register_HASHTABLE_FIND());
AddBuiltin(BuiltinOperator_HASHTABLE_IMPORT, Register_HASHTABLE_IMPORT());
AddBuiltin(BuiltinOperator_HASHTABLE_SIZE, Register_HASHTABLE_SIZE());
AddBuiltin(BuiltinOperator_CONV_3D_TRANSPOSE, Register_CONV_3D_TRANSPOSE());
AddBuiltin(BuiltinOperator_VAR_HANDLE, Register_VAR_HANDLE());
AddBuiltin(BuiltinOperator_READ_VARIABLE, Register_READ_VARIABLE());
AddBuiltin(BuiltinOperator_ASSIGN_VARIABLE, Register_ASSIGN_VARIABLE());
AddBuiltin(BuiltinOperator_MULTINOMIAL, Register_MULTINOMIAL());
AddBuiltin(BuiltinOperator_RANDOM_STANDARD_NORMAL,
Register_RANDOM_STANDARD_NORMAL());
AddBuiltin(BuiltinOperator_BUCKETIZE, Register_BUCKETIZE());
AddBuiltin(BuiltinOperator_RANDOM_UNIFORM, Register_RANDOM_UNIFORM());
AddBuiltin(BuiltinOperator_GELU, Register_GELU(),
1,
2);
AddBuiltin(BuiltinOperator_DYNAMIC_UPDATE_SLICE,
Register_DYNAMIC_UPDATE_SLICE(),
1,
2);
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_PROD,
Register_UNSORTED_SEGMENT_PROD());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MAX,
Register_UNSORTED_SEGMENT_MAX());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MIN,
Register_UNSORTED_SEGMENT_MIN());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_SUM,
Register_UNSORTED_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_ATAN2, Register_ATAN2());
AddBuiltin(BuiltinOperator_SIGN, Register_SIGN(),
1,
2);
AddBuiltin(BuiltinOperator_BITCAST, Register_BITCAST());
AddBuiltin(BuiltinOperator_BITWISE_XOR, Register_BITWISE_XOR());
AddBuiltin(BuiltinOperator_RIGHT_SHIFT, Register_RIGHT_SHIFT());
AddBuiltin(BuiltinOperator_STABLEHLO_SCATTER, Register_STABLEHLO_SCATTER());
AddBuiltin(BuiltinOperator_DILATE, Register_DILATE());
AddBuiltin(BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR,
Register_STABLEHLO_RNG_BIT_GENERATOR());
AddBuiltin(BuiltinOperator_REDUCE_WINDOW, Register_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
Register_STABLEHLO_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_GATHER, Register_STABLEHLO_GATHER());
AddBuiltin(BuiltinOperator_STABLEHLO_ADD, Register_STABLEHLO_ADD());
AddBuiltin(BuiltinOperator_STABLEHLO_AND, Register_STABLEHLO_AND());
AddBuiltin(BuiltinOperator_STABLEHLO_MULTIPLY, Register_STABLEHLO_MULTIPLY());
AddBuiltin(BuiltinOperator_STABLEHLO_MAXIMUM, Register_STABLEHLO_MAXIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_MINIMUM, Register_STABLEHLO_MINIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_SHIFT_LEFT,
Register_STABLEHLO_SHIFT_LEFT());
AddBuiltin(BuiltinOperator_STABLEHLO_PAD, Register_STABLEHLO_PAD());
AddBuiltin(BuiltinOperator_STABLEHLO_COMPOSITE,
Register_STABLEHLO_COMPOSITE());
AddCustom("NumericVerify", tflite::ops::custom::Register_NUMERIC_VERIFY());
AddCustom("Mfcc", tflite::ops::custom::Register_MFCC());
AddCustom("AudioSpectrogram",
tflite::ops::custom::Register_AUDIO_SPECTROGRAM());
AddCustom("TFLite_Detection_PostProcess",
tflite::ops::custom::Register_DETECTION_POSTPROCESS());
may_directly_contain_user_defined_ops_ = false;
delegate_creators_.push_back([](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context,
XNNPackQS8Options::default_value);
});
}
BuiltinOpResolverWithXNNPACK::BuiltinOpResolverWithXNNPACK(
bool enable_xnnpack_unsigned_quantized) {
delegate_creators_.clear();
XNNPackQS8Options xnnpack_qs8_options = enable_xnnpack_unsigned_quantized
? XNNPackQS8Options::enabled
: XNNPackQS8Options::disabled;
delegate_creators_.push_back([xnnpack_qs8_options](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context, xnnpack_qs8_options);
});
}
}
}
} | #include "tensorflow/lite/core/kernels/register.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite::ops::builtin {
namespace {
TEST(BuiltinOpResolverTest, SupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
const TfLiteRegistration *add =
builtin_op_resolver.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
TEST(BuiltinOpResolverTest, CopySupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
MutableOpResolver copy = builtin_op_resolver;
const TfLiteRegistration *add = copy.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
#if defined(TFLITE_WITHOUT_XNNPACK)
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8_QU8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, Disable_QU8) {
BuiltinOpResolverWithXNNPACK builtin_op_resolver(false);
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, 0);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/kernels/register.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/kernels/register_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0239edc1-2484-43d0-8cc0-3407d549341e | cpp | tensorflow/tensorflow | c_api | tensorflow/c/eager/c_api.cc | tensorflow/c/eager/c_api_test.cc | #include "tensorflow/c/eager/c_api.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "xla/tsl/c/tsl_status_internal.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/custom_device.h"
#include "tensorflow/core/common_runtime/eager/custom_device_op_handler.h"
#include "tensorflow/core/common_runtime/eager/execute.h"
#include "tensorflow/core/common_runtime/eager/placement_utils.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/version.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/common_runtime/eager/context_distributed_manager.h"
#endif
using tensorflow::string;
namespace {
string DeviceName(const tensorflow::Device* d) {
return (d == nullptr) ? "cpu:0" : d->name();
}
void AnnotateEagerRuntimeConstructionContext(
tensorflow::FunctionDef& function_def) {
tensorflow::AttrValue value;
SetAttrValue("kEagerRuntime", &value);
(*function_def.mutable_attr())["_construction_context"] = value;
}
}
extern "C" {
TFE_ContextOptions* TFE_NewContextOptions() { return new TFE_ContextOptions; }
void TFE_ContextOptionsSetConfig(TFE_ContextOptions* options, const void* proto,
size_t proto_len, TF_Status* status) {
TF_SetConfig(&options->session_options, proto, proto_len, status);
}
void TFE_ContextOptionsSetAsync(TFE_ContextOptions* options,
unsigned char enable) {
options->async = enable;
}
void TFE_ContextOptionsSetDevicePlacementPolicy(
TFE_ContextOptions* options, TFE_ContextDevicePlacementPolicy policy) {
options->device_placement_policy = policy;
}
void TFE_DeleteContextOptions(TFE_ContextOptions* options) { delete options; }
TFE_Context* TFE_NewContext(const TFE_ContextOptions* opts, TF_Status* status) {
if (opts->use_tfrt) {
status->status = tensorflow::errors::Unimplemented("TFRT is not supported");
return nullptr;
}
std::vector<std::unique_ptr<tensorflow::Device>> devices;
status->status = tensorflow::DeviceFactory::AddDevices(
opts->session_options.options, "/job:localhost/replica:0/task:0",
&devices);
if (!status->status.ok()) return nullptr;
std::unique_ptr<tensorflow::DeviceMgr> device_mgr(
new tensorflow::DynamicDeviceMgr(std::move(devices)));
auto r = tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
opts->session_options.options,
static_cast<tensorflow::ContextDevicePlacementPolicy>(
opts->device_placement_policy),
opts->async, device_mgr.release(),
true, std::move(r),
nullptr,
nullptr,
opts->run_eager_op_as_function,
opts->jit_compile_rewrite);
#if !defined(IS_MOBILE_PLATFORM)
eager_context->SetDistributedManager(
std::make_unique<tensorflow::EagerContextDistributedManager>(
eager_context));
#endif
return tensorflow::wrap(eager_context);
}
void TFE_DeleteContext(TFE_Context* ctx) {
if (ctx == nullptr) {
return;
}
tensorflow::unwrap(ctx)->Release();
}
TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx, TF_Status* status) {
TF_DeviceList* l = new TF_DeviceList;
tensorflow::unwrap(ctx)->ListDevices(&l->response);
return l;
}
void TFE_ContextClearCaches(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->ClearCachesAndThreadExecutors();
}
TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status) {
TFE_ContextSetServerDefWithTimeoutAndRetries(
ctx, keep_alive_secs, proto, proto_len, 0,
0, status, false);
}
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status,
bool clear_existing_contexts) {
TFE_ContextSetServerDefWithTimeoutAndRetries(
ctx, keep_alive_secs, proto, proto_len, init_timeout_in_ms,
0, status, clear_existing_contexts);
}
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeoutAndRetries(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, int retries, TF_Status* status,
bool clear_existing_contexts) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::errors::Unimplemented(
"TFE_ContextSetServerDef not supported on mobile");
#else
tensorflow::ServerDef server_def;
if (!server_def.ParseFromArray(proto, proto_len)) {
status->status = tensorflow::errors::InvalidArgument(
"Invalid tensorflow.ServerDef protocol buffer");
return;
}
status->status =
tensorflow::unwrap(ctx)->GetDistributedManager()->SetOrUpdateServerDef(
server_def, true, keep_alive_secs,
init_timeout_in_ms, retries, clear_existing_contexts);
#endif
}
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status) {
TFE_ContextUpdateServerDefWithTimeout(ctx, keep_alive_secs, proto, proto_len,
0, status);
}
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::errors::Unimplemented(
"TFE_ContextUpdateServerDef not supported on mobile");
#else
tensorflow::ServerDef server_def;
tensorflow::EagerContext* context =
tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
if (!server_def.ParseFromArray(proto, proto_len)) {
status->status = tensorflow::errors::InvalidArgument(
"Invalid tensorflow.ServerDef protocol buffer");
return;
} else if (context->GetContextId() ==
tensorflow::EagerContext::kInvalidContextId) {
status->status = tensorflow::errors::InvalidArgument(
"Trying to update a context with invalid context id.");
}
status->status =
tensorflow::unwrap(ctx)->GetDistributedManager()->SetOrUpdateServerDef(
server_def, false, keep_alive_secs,
init_timeout_in_ms, 0);
#endif
}
TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
const char* worker_name,
TF_Status* status) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::errors::Unimplemented(
"TFE_ContextSetServerDef not supported on mobile");
return false;
#else
bool is_alive;
status->status =
tensorflow::unwrap(ctx)->GetDistributedManager()->CheckRemoteAlive(
worker_name, &is_alive);
return is_alive;
#endif
}
TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
TF_Status* status) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::OkStatus();
#else
status->status = tensorflow::unwrap(ctx)->AsyncWait();
#endif
}
void TFE_ContextSetThreadLocalDevicePlacementPolicy(
TFE_Context* ctx, TFE_ContextDevicePlacementPolicy policy) {
tensorflow::unwrap(ctx)->SetThreadLocalDevicePlacementPolicy(
static_cast<tensorflow::ContextDevicePlacementPolicy>(policy));
}
extern TFE_ContextDevicePlacementPolicy TFE_ContextGetDevicePlacementPolicy(
TFE_Context* ctx) {
return static_cast<TFE_ContextDevicePlacementPolicy>(
tensorflow::unwrap(ctx)->GetDevicePlacementPolicy());
}
TFE_TensorHandle* TFE_NewTensorHandle(const TF_Tensor* t, TF_Status* status) {
tensorflow::Tensor tensor;
status->status = tensorflow::TF_TensorToTensor(t, &tensor);
if (!status->status.ok()) return nullptr;
return tensorflow::wrap(tensorflow::TensorHandle::CreateLocalHandle(tensor));
}
void TFE_DeleteTensorHandle(TFE_TensorHandle* h) {
if (h == nullptr) return;
tsl::profiler::TraceMe activity("TFE_DeleteTensorHandle",
tsl::profiler::TraceMeLevel::kInfo);
if (h) {
tensorflow::unwrap(h)->Unref();
}
}
TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h) {
return static_cast<TF_DataType>(tensorflow::unwrap(h)->DataType());
}
int TFE_TensorHandleNumDims(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return -1;
}
int num_dims = -1;
status->status = tensorflow::unwrap(h)->NumDims(&num_dims);
return num_dims;
}
int64_t TFE_TensorHandleNumElements(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return -1;
}
int64_t num_elements = -1;
status->status = tensorflow::unwrap(h)->NumElements(&num_elements);
return num_elements;
}
int64_t TFE_TensorHandleDim(TFE_TensorHandle* h, int dim_index,
TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return -1;
}
int64_t dim = -1;
status->status = tensorflow::unwrap(h)->Dim(dim_index, &dim);
return dim;
}
const char* TFE_TensorHandleDeviceName(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
return tensorflow::unwrap(h)->DeviceName(&status->status);
}
const char* TFE_TensorHandleBackingDeviceName(TFE_TensorHandle* h,
TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
return tensorflow::unwrap(h)->BackingDeviceName(&status->status);
}
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopySharingTensor(
TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::unwrap(h)->Ref();
return h;
}
TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::AbstractTensorInterface* t =
tensorflow::unwrap(h)->Resolve(&status->status);
if (t == nullptr) {
return nullptr;
}
return new TF_Tensor{t};
}
void* TFE_TensorHandleDevicePointer(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::ImmediateExecutionTensorHandle* unwrapped_handle =
tensorflow::unwrap(h);
if (tensorflow::CustomDeviceTensorHandle::classof(unwrapped_handle)) {
return tensorflow::down_cast<tensorflow::CustomDeviceTensorHandle*>(
unwrapped_handle)
->DevicePointer();
}
if (!tensorflow::TensorHandle::classof(unwrapped_handle)) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::TensorHandle* handle =
tensorflow::TensorHandleFromInterface(unwrapped_handle);
if (handle->Type() != tensorflow::TensorHandle::LOCAL) {
status->status = tensorflow::errors::InvalidArgument(
"TFE_TensorHandleDevicePointer may not be called on a ",
handle->TypeString(), " tensor handle.");
return nullptr;
}
tensorflow::Device* device(handle->device());
if (device != nullptr) {
status->status = device->Sync();
if (!status->status.ok()) {
return nullptr;
}
}
const tensorflow::Tensor* tensor;
status->status = handle->Tensor(&tensor);
if (!status->status.ok()) {
return nullptr;
}
return const_cast<void*>(
static_cast<const void*>(tensor->tensor_data().data()));
}
namespace tensorflow {
namespace {
class CustomDeviceAPI : public tensorflow::CustomDevice {
public:
CustomDeviceAPI(TFE_Context* context, TFE_CustomDevice device, void* info,
string name)
: context_(context), device_(device), info_(info), name_(name) {}
~CustomDeviceAPI() override { device_.delete_device(info_); }
const string& name() override { return name_; }
tensorflow::Status CopyTensorToDevice(
ImmediateExecutionTensorHandle* handle,
ImmediateExecutionTensorHandle** result) override {
handle->Ref();
TF_Status status;
TFE_TensorHandle* result_handle = device_.copy_tensor_to_device(
context_, tensorflow::wrap(handle), &status, info_);
handle->Unref();
if (!status.status.ok()) return status.status;
*result = tensorflow::unwrap(result_handle);
(*result)->Ref();
TFE_DeleteTensorHandle(result_handle);
return status.status;
}
tensorflow::Status CopyTensorFromDevice(
ImmediateExecutionTensorHandle* handle,
const tensorflow::string& target_device_name,
ImmediateExecutionTensorHandle** result) override {
TF_Status status;
handle->Ref();
TFE_TensorHandle* result_handle = device_.copy_tensor_from_device(
context_, tensorflow::wrap(handle), target_device_name.c_str(), &status,
info_);
handle->Unref();
if (!status.status.ok()) return status.status;
*result = tensorflow::unwrap(result_handle);
(*result)->Ref();
TFE_DeleteTensorHandle(result_handle);
return status.status;
}
tensorflow::Status Execute(const ImmediateExecutionOperation* op,
ImmediateExecutionTensorHandle** retvals,
int* num_retvals) override {
std::vector<TFE_TensorHandle*> outputs(*num_retvals);
TF_Status status;
device_.execute(tensorflow::wrap(op), num_retvals, outputs.data(), &status,
info_);
if (status.status.ok()) {
for (int i = 0; i < *num_retvals; ++i) {
retvals[i] = tensorflow::unwrap(outputs[i]);
retvals[i]->Ref();
TFE_DeleteTensorHandle(outputs[i]);
}
}
return status.status;
}
tensorflow::Status Pack(absl::Span<ImmediateExecutionTensorHandle*> handles,
ImmediateExecutionTensorHandle** result) override {
TF_Status status;
*result = tensorflow::unwrap(device_.pack(context_,
tensorflow::wrap(handles.data()),
handles.size(), &status, info_));
return status.status;
}
absl::StatusOr<bool> ShallPinToThisDevice(
const ImmediateExecutionOperation* op) override {
TF_Status status;
if (device_.shall_pin_to_this_device != nullptr) {
return device_.shall_pin_to_this_device(tensorflow::wrap(op), &status);
}
return errors::Unimplemented("No custom device pinning implementation.");
}
private:
TFE_Context* context_;
TFE_CustomDevice device_;
void* info_;
string name_;
};
class CAPICustomDeviceTensorHandle
: public tensorflow::CustomDeviceTensorHandle {
public:
CAPICustomDeviceTensorHandle(tensorflow::ImmediateExecutionContext* context,
tensorflow::CustomDevice* device,
tensorflow::DataType dtype, void* data,
TFE_CustomDeviceTensorHandleMethods methods)
: tensorflow::CustomDeviceTensorHandle(context, device, dtype),
data_(data),
methods_(methods) {}
~CAPICustomDeviceTensorHandle() override { methods_.deallocator(data_); }
void* DevicePointer() const override { return data_; }
Status NumDims(int* num_dims) const override {
TF_Status s;
*num_dims = methods_.num_dims(data_, &s);
return s.status;
}
Status Dim(int dim_index, int64_t* dim) const override {
TF_Status s;
*dim = methods_.dim(data_, dim_index, &s);
return s.status;
}
bool PreferCustomSummarizer() const override {
return methods_.summarize != nullptr;
}
Status SummarizeValue(std::string& summary) const override {
if (methods_.summarize == nullptr) {
return tensorflow::CustomDeviceTensorHandle::SummarizeValue(summary);
}
TF_Status c_status;
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> summary_buffer(
methods_.summarize(data_, &c_status), TF_DeleteBuffer);
if (!c_status.status.ok()) {
return c_status.status;
}
summary = std::string(reinterpret_cast<const char*>(summary_buffer->data),
summary_buffer->length);
return absl::OkStatus();
}
private:
void* const data_;
const TFE_CustomDeviceTensorHandleMethods methods_;
};
}
}
TFE_TensorHandle* TFE_NewCustomDeviceTensorHandle(
TFE_Context* ctx, const char* device_name, TF_DataType dtype, void* data,
TFE_CustomDeviceTensorHandleMethods methods, TF_Status* status) {
tensorflow::ImmediateExecutionContext* context = tensorflow::unwrap(ctx);
tensorflow::CustomDevice* device = nullptr;
if (!context->GetCustomDeviceOpHandler().FindCustomDeviceFromName(device_name,
&device)) {
methods.deallocator(data);
status->status =
tensorflow::errors::InvalidArgument(device_name, " unknown device.");
return nullptr;
}
return tensorflow::wrap(new tensorflow::CAPICustomDeviceTensorHandle(
context, device, *reinterpret_cast<tensorflow::DataType*>(&dtype), data,
methods));
}
TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
TFE_Context* ctx, const char* device_name, TF_DataType dtype,
const int64_t* dims, int num_dims, void* data, size_t len,
void (*deallocator)(void* data, size_t len, void* arg),
void* deallocator_arg, TF_Status* status) {
tensorflow::Device* device = nullptr;
tensorflow::EagerContext* context =
tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
status->status = context->FindDeviceFromName(device_name, &device);
if (!status->status.ok()) {
deallocator(data, len, deallocator_arg);
status->status =
tensorflow::errors::InvalidArgument(device_name, " unknown device.");
return nullptr;
}
std::vector<int64_t> dimvec(num_dims);
for (int i = 0; i < num_dims; ++i) {
dimvec[i] = static_cast<int64_t>(dims[i]);
}
TF_ManagedBuffer* buf =
new TF_ManagedBuffer(data, len, deallocator, deallocator_arg,
false);
tensorflow::Tensor t(static_cast<tensorflow::DataType>(dtype),
tensorflow::TensorShape(dimvec), buf);
buf->Unref();
return tensorflow::wrap(tensorflow::TensorHandle::CreateLocalHandle(
std::move(t), device, device, context));
}
size_t TFE_TensorHandleDeviceMemorySize(TFE_TensorHandle* h,
TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return 0;
}
tensorflow::TensorHandle* handle =
tensorflow::TensorHandleFromInterface(tensorflow::unwrap(h));
if (handle->Type() != tensorflow::TensorHandle::LOCAL) {
status->status = tensorflow::errors::InvalidArgument(
"TFE_TensorHandleDeviceMemorySize may not be called on a ",
handle->TypeString(), " tensor handle.");
return 0;
}
const tensorflow::Tensor* tensor;
status->status = handle->Tensor(&tensor);
if (!status->status.ok()) {
return 0;
}
return tensor->TotalBytes();
}
TFE_Op* TFE_NewOp(TFE_Context* ctx, const char* op_or_function_name,
TF_Status* status) {
tensorflow::ImmediateExecutionOperation* new_op =
tensorflow::unwrap(ctx)->CreateOperation();
status->status = new_op->Reset(op_or_function_name, nullptr);
if (!status->status.ok()) {
new_op->Release();
new_op = nullptr;
}
return tensorflow::wrap(new_op);
}
void TFE_DeleteOp(TFE_Op* op) {
if (op == nullptr) {
return;
}
tensorflow::unwrap(op)->Release();
}
const char* TFE_OpGetName(const TFE_Op* op, TF_Status* status) {
return tensorflow::unwrap(op)->Name().c_str();
}
TFE_Context* TFE_OpGetContext(const TFE_Op* op, TF_Status* status) {
return tensorflow::wrap(tensorflow::unwrap(op)->GetContext());
}
void TFE_OpSetDevice(TFE_Op* op, const char* device_name, TF_Status* status) {
status->status = tensorflow::unwrap(op)->SetDeviceName(device_name);
}
const char* TFE_OpGetDevice(const TFE_Op* op, TF_Status* status) {
return tensorflow::unwrap(op)->DeviceName().c_str();
}
void TFE_OpAddInput(TFE_Op* op, TFE_TensorHandle* input, TF_Status* status) {
status->status = tensorflow::unwrap(op)->AddInput(tensorflow::unwrap(input));
}
void TFE_OpAddInputList(TFE_Op* op, TFE_TensorHandle** inputs, int num_inputs,
TF_Status* status) {
status->status = tensorflow::unwrap(op)->AddInputList(
{reinterpret_cast<tensorflow::AbstractTensorHandle**>(
tensorflow::unwrap(inputs)),
static_cast<size_t>(num_inputs)});
}
extern int TFE_OpGetFlatInputCount(const TFE_Op* op, TF_Status* status) {
return tensorflow::unwrap(op)->GetInputs().size();
}
extern TFE_TensorHandle* TFE_OpGetFlatInput(const TFE_Op* op, int index,
TF_Status* status) {
return tensorflow::wrap(tensorflow::unwrap(op)->GetInputs()[index]);
}
TF_AttrType TFE_OpGetAttrType(TFE_Op* op, const char* attr_name,
unsigned char* is_list, TF_Status* status) {
TF_AttrType ret = TF_ATTR_INT;
const tensorflow::AttrTypeMap* attr_types_;
bool is_function;
status->status = tensorflow::AttrTypeMapForOp(
tensorflow::unwrap(op)->Name().c_str(), &attr_types_, &is_function);
if (!status->status.ok()) {
return ret;
}
status->status =
tensorflow::AttrTypeByName(*attr_types_, attr_name, &ret, is_list);
return ret;
}
TF_AttrType TFE_OpNameGetAttrType(TFE_Context* ctx,
const char* op_or_function_name,
const char* attr_name, unsigned char* is_list,
TF_Status* status) {
TF_AttrType ret;
TFE_Op* op = TFE_NewOp(ctx, op_or_function_name, status);
if (status->status.ok()) {
ret = TFE_OpGetAttrType(op, attr_name, is_list, status);
} else {
ret = TF_ATTR_INT;
}
TFE_DeleteOp(op);
return ret;
}
void TFE_OpSetAttrString(TFE_Op* op, const char* attr_name, const void* value,
size_t length) {
auto s = tensorflow::unwrap(op)->SetAttrString(
attr_name, static_cast<const char*>(value), length);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name, int64_t value) {
auto s = tensorflow::unwrap(op)->SetAttrInt(attr_name, value);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrFloat(TFE_Op* op, const char* attr_name, float value) {
auto s = tensorflow::unwrap(op)->SetAttrFloat(attr_name, value);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrBool(TFE_Op* op, const char* attr_name, unsigned char value) {
auto s = tensorflow::unwrap(op)->SetAttrBool(attr_name,
(value == 0) ? false : true);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrType(TFE_Op* op, const char* attr_name, TF_DataType value) {
auto s = tensorflow::unwrap(op)->SetAttrType(
attr_name, static_cast<tensorflow::DataType>(value));
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrShape(TFE_Op* op, const char* attr_name, const int64_t* dims,
const int num_dims, TF_Status* out_status) {
out_status->status =
tensorflow::unwrap(op)->SetAttrShape(attr_name, dims, num_dims);
}
void TFE_OpSetAttrFunction(TFE_Op* op, const char* attr_name,
const TFE_Op* value) {
auto s = tensorflow::unwrap(op)->SetAttrFunction(
attr_name, tensorflow::unwrap(const_cast<TFE_Op*>(value)));
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrFunctionName(TFE_Op* op, const char* attr_name,
const char* data, size_t length) {
auto s = tensorflow::unwrap(op)->SetAttrFunctionName(attr_name, data, length);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrTensor(TFE_Op* op, const char* attr_name, TF_Tensor* tensor,
TF_Status* status) {
tensorflow::Tensor t;
status->status = TF_TensorToTensor(tensor, &t);
tensorflow::TensorInterface interface(t);
status->status = tensorflow::unwrap(op)->SetAttrTensor(attr_name, &interface);
}
void TFE_OpSetAttrStringList(TFE_Op* op, const char* attr_name,
const void* const* values, const size_t* lengths,
int num_values) {
auto s = tensorflow::unwrap(op)->SetAttrStringList(attr_name, values, lengths,
num_values);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrFloatList(TFE_Op* op, const char* attr_name,
const float* values, int num_values) {
auto s =
tensorflow::unwrap(op)->SetAttrFloatList(attr_name, values, num_values);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrIntList(TFE_Op* op, const char* attr_name,
const int64_t* values, int num_values) {
auto s =
tensorflow::unwrap(op)->SetAttrIntList(attr_name, values, num_values);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrTypeList(TFE_Op* op, const char* attr_name,
const TF_DataType* values, int num_values) {
auto s = tensorflow::unwrap(op)->SetAttrTypeList(
attr_name, reinterpret_cast<const tensorflow::DataType*>(values),
num_values);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrBoolList(TFE_Op* op, const char* attr_name,
const unsigned char* values, int num_values) {
auto s =
tensorflow::unwrap(op)->SetAttrBoolList(attr_name, values, num_values);
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrShapeList(TFE_Op* op, const char* attr_name,
const int64_t** dims, const int* num_dims,
int num_values, TF_Status* out_status) {
out_status->status = tensorflow::unwrap(op)->SetAttrShapeList(
attr_name, dims, num_dims, num_values);
}
void TFE_OpSetAttrFunctionList(TFE_Op* op, const char* attr_name,
const TFE_Op** value, int num_values) {
auto s = tensorflow::unwrap(op)->SetAttrFunctionList(
attr_name, {reinterpret_cast<const tensorflow::AbstractOperation**>(
tensorflow::unwrap(value)),
static_cast<size_t>(num_values)});
if (!s.ok()) {
LOG(WARNING) << "Unable to set attribute: " << attr_name;
}
}
void TFE_OpSetAttrValueProto(const TFE_Op* op, const char* attr_name,
const void* proto, size_t proto_len,
TF_Status* status) {
tensorflow::AttrValue attr_value;
if (!attr_value.ParseFromArray(proto, proto_len)) {
status->status =
tensorflow::errors::InvalidArgument("Unparseable AttrValue proto");
return;
}
if (op == nullptr) {
status->status = tensorflow::errors::InvalidArgument(
"Got a null or uninitialized `op` argument");
return;
}
tensorflow::EagerOperation* operation =
OperationFromInterface(tensorflow::unwrap(const_cast<TFE_Op*>(op)));
operation->MutableAttrs()->Set(attr_name, attr_value);
}
TF_CAPI_EXPORT extern int TFE_OpGetInputLength(TFE_Op* op,
const char* input_name,
TF_Status* status) {
int ret = -1;
status->status = tensorflow::unwrap(op)->InputLength(input_name, &ret);
return ret;
}
TF_CAPI_EXPORT extern int TFE_OpGetOutputLength(TFE_Op* op,
const char* output_name,
TF_Status* status) {
int ret = -1;
status->status = tensorflow::unwrap(op)->OutputLength(output_name, &ret);
return ret;
}
void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals, int* num_retvals,
TF_Status* status) {
tensorflow::ImmediateExecutionOperation* unwrapped_op =
tensorflow::unwrap(op);
status->status =
unwrapped_op->GetContext()->GetCustomDeviceOpHandler().Execute(
unwrapped_op,
reinterpret_cast<tensorflow::ImmediateExecutionTensorHandle**>(
retvals),
num_retvals);
}
TFE_TensorHandle* TFE_TensorHandleCopyToDevice(TFE_TensorHandle* h,
TFE_Context* ctx,
const char* device_name,
TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::ImmediateExecutionContext* unwrapped_ctx =
tensorflow::unwrap(ctx);
auto* result =
unwrapped_ctx->GetCustomDeviceOpHandler().CopyTensorHandleToDevice(
unwrapped_ctx, tensorflow::unwrap(h), device_name, &status->status);
if (status->status.ok()) {
return tensorflow::wrap(result);
}
return nullptr;
}
void TFE_ContextAddFunctionDef(TFE_Context* ctx,
const char* serialized_function_def, size_t size,
TF_Status* status) {
tensorflow::FunctionDef function_def;
if (!function_def.ParseFromArray(serialized_function_def, size)) {
status->status =
tensorflow::errors::InvalidArgument("Invalid FunctionDef proto");
return;
}
AnnotateEagerRuntimeConstructionContext(function_def);
status->status = tensorflow::unwrap(ctx)->AddFunctionDef(function_def);
}
void TFE_ContextAddFunction(TFE_Context* ctx, TF_Function* function,
TF_Status* status) {
auto fdef_or = function->record->mutable_fdef();
if (!fdef_or.ok()) {
status->status = fdef_or.status();
return;
}
AnnotateEagerRuntimeConstructionContext(*fdef_or.value());
status->status = tensorflow::unwrap(ctx)->AddFunctionDefWithStackTraces(
*fdef_or.value(), function->record->stack_traces());
}
TF_Function* TFE_ContextGetFunction(TFE_Context* ctx, const char* name,
TF_Status* status) {
tensorflow::core::RefCountPtr<tensorflow::FunctionRecord> record =
tensorflow::unwrap(ctx)->FindRecord(name);
if (record == nullptr) {
status->status = tensorflow::errors::NotFound(
"Unable to find Function with name: ", name);
return nullptr;
}
TF_Function* result = new TF_Function();
record->Ref();
result->record = record.get();
return result;
}
void TFE_ContextRemoveFunction(TFE_Context* ctx, const char* name,
TF_Status* status) {
status->status = tensorflow::unwrap(ctx)->RemoveFunction(name);
}
unsigned char TFE_ContextHasFunction(TFE_Context* ctx, const char* name) {
return tensorflow::unwrap(ctx)->FindFunctionDef(name) != nullptr;
}
void TFE_ContextEnableRunMetadata(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->SetShouldStoreGraphs(true);
}
void TFE_ContextDisableRunMetadata(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->SetShouldStoreGraphs(false);
}
}
TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t,
TF_Status* status) {
return tensorflow::wrap(tensorflow::TensorHandle::CreateLocalHandle(t));
}
void TFE_ContextExportRunMetadata(TFE_Context* ctx, TF_Buffer* buf,
TF_Status* status) {
auto* context = tensorflow::unwrap(ctx);
status->status = context->AsyncWait();
if (!status->status.ok()) return;
auto run_metadata = context->ExportRunMetadata();
status->status = MessageToBuffer(*run_metadata, buf);
}
namespace {
TFE_Op* GetFunc(TFE_Context* ctx, const tensorflow::NameAttrList& func,
TF_Status* status) {
TFE_Op* func_op = TFE_NewOp(ctx, func.name().data(), status);
for (const auto& attr : func.attr()) {
if (!status->status.ok()) return nullptr;
SetOpAttrValueScalar(ctx, func_op, attr.second, attr.first.data(), status);
if (!status->status.ok()) return nullptr;
}
return func_op;
}
}
void TFE_ContextStartStep(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->StartStep();
}
void TFE_ContextEndStep(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->EndStep();
}
const TFE_OpAttrs* TFE_OpGetAttrs(const TFE_Op* op) {
return tensorflow::wrap(tensorflow::unwrap(op)->GetOpAttrs());
}
void TFE_OpAddAttrs(TFE_Op* op, const TFE_OpAttrs* attrs) {
tensorflow::unwrap(op)->AddAttrs(tensorflow::unwrap(attrs));
}
void TFE_OpAttrsSerialize(const TFE_OpAttrs* attrs, TF_Buffer* buf,
TF_Status* status) {
tensorflow::NameAttrList name_and_attrs;
tensorflow::unwrap(attrs)->GetNameAttrList(&name_and_attrs);
status->status = MessageToBuffer(name_and_attrs, buf);
}
namespace tensorflow {
void SetOpAttrValueScalar(TFE_Context* ctx, TFE_Op* op,
const tensorflow::AttrValue& default_value,
const char* attr_name, TF_Status* status) {
switch (default_value.value_case()) {
case tensorflow::AttrValue::kS: {
const string& v = default_value.s();
TFE_OpSetAttrString(op, attr_name, v.data(), v.size());
break;
}
case tensorflow::AttrValue::kI:
TFE_OpSetAttrInt(op, attr_name, static_cast<int64_t>(default_value.i()));
break;
case tensorflow::AttrValue::kF:
TFE_OpSetAttrFloat(op, attr_name, default_value.f());
break;
case tensorflow::AttrValue::kB:
TFE_OpSetAttrBool(op, attr_name, default_value.b());
break;
case tensorflow::AttrValue::kType:
TFE_OpSetAttrType(op, attr_name,
static_cast<TF_DataType>(default_value.type()));
break;
case tensorflow::AttrValue::kShape: {
const auto& tensor_shape = default_value.shape();
if (tensor_shape.unknown_rank()) {
TFE_OpSetAttrShape(op, attr_name, nullptr, -1, status);
} else {
const auto num_dims = tensor_shape.dim_size();
std::unique_ptr<int64_t[]> dims(new int64_t[num_dims]);
for (int i = 0; i < num_dims; ++i) {
dims[i] = tensor_shape.dim(i).size();
}
TFE_OpSetAttrShape(op, attr_name, dims.get(), num_dims, status);
}
} break;
case tensorflow::AttrValue::kFunc: {
const auto func_op = GetFunc(ctx, default_value.func(), status);
if (!status->status.ok()) return;
TFE_OpSetAttrFunction(op, attr_name, func_op);
TFE_DeleteOp(func_op);
} break;
case tensorflow::AttrValue::kList: {
if (const int s_size = default_value.list().s_size()) {
absl::InlinedVector<const void*, 4> values_vector;
values_vector.reserve(s_size);
absl::InlinedVector<size_t, 4> lengths_vector;
lengths_vector.reserve(s_size);
for (int i = 0; i < s_size; ++i) {
const string& v = default_value.list().s(i);
values_vector.push_back(v.data());
lengths_vector.push_back(v.size());
}
TFE_OpSetAttrStringList(op, attr_name, values_vector.data(),
lengths_vector.data(), s_size);
}
if (const int i_size = default_value.list().i_size()) {
absl::InlinedVector<int64_t, 4> i_vector;
i_vector.reserve(i_size);
for (int i = 0; i < i_size; ++i) {
i_vector.push_back(default_value.list().i(i));
}
TFE_OpSetAttrIntList(op, attr_name, i_vector.data(), i_size);
}
if (const int f_size = default_value.list().f_size()) {
absl::InlinedVector<float, 4> f_vector;
f_vector.reserve(f_size);
for (int i = 0; i < f_size; ++i) {
f_vector.push_back(default_value.list().f(i));
}
TFE_OpSetAttrFloatList(op, attr_name, f_vector.data(), f_size);
}
if (const int b_size = default_value.list().b_size()) {
absl::InlinedVector<unsigned char, 4> b_vector;
b_vector.reserve(b_size);
for (int i = 0; i < b_size; i++) {
b_vector.push_back(default_value.list().b(i));
}
TFE_OpSetAttrBoolList(op, attr_name, b_vector.data(), b_size);
}
if (const int type_size = default_value.list().type_size()) {
absl::InlinedVector<unsigned int, 4> type_vector;
type_vector.reserve(type_size);
for (int i = 0; i < type_size; ++i) {
type_vector.push_back(default_value.list().type(i));
}
TFE_OpSetAttrTypeList(
op, attr_name,
reinterpret_cast<const TF_DataType*>(type_vector.data()),
type_size);
}
if (default_value.list().shape_size() > 0 ||
default_value.list().func_size() > 0 ||
default_value.list().tensor_size() > 0) {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
tensorflow::strings::StrCat("Unable to get setfor default value: ",
default_value.DebugString())
.data());
}
} break;
case tensorflow::AttrValue::kTensor:
TF_FALLTHROUGH_INTENDED;
case tensorflow::AttrValue::kPlaceholder:
TF_FALLTHROUGH_INTENDED;
case tensorflow::AttrValue::VALUE_NOT_SET:
TF_SetStatus(
status, TF_UNIMPLEMENTED,
tensorflow::strings::StrCat("Unable to get setfor default value: ",
default_value.DebugString())
.data());
}
}
}
namespace {
TFE_TensorHandle* DefaultCustomDevicePack(TFE_Context* context,
TFE_TensorHandle** handles,
int num_handles, TF_Status* status,
void* device_info) {
TF_SetStatus(status, TF_UNIMPLEMENTED,
"This custom device does not support packing tensors.");
return nullptr;
}
}
extern "C" {
bool TFE_IsCustomDevice(TFE_Context* ctx, const char* device_name) {
return tensorflow::unwrap(ctx)->IsCustomDevice(device_name);
}
void TFE_RegisterCustomDevice(TFE_Context* ctx, TFE_CustomDevice device,
const char* device_name, void* device_info,
TF_Status* status) {
if (device.pack == nullptr) {
device.pack = &DefaultCustomDevicePack;
}
auto custom_device = std::make_unique<tensorflow::CustomDeviceAPI>(
ctx, device, device_info, device_name);
status->status = tensorflow::unwrap(ctx)->RegisterCustomDevice(
device_name, std::move(custom_device));
}
} | #include "tensorflow/c/eager/c_api.h"
#include <string.h>
#include <memory>
#include <string>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/platform.h"
#include "absl/strings/match.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
using tensorflow::string;
namespace {
void BM_InitOp(::testing::benchmark::State& state) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
for (auto s : state) {
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_DeleteOp(matmul);
}
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_InitOp);
void BM_Execute(::testing::benchmark::State& state) {
const int async = state.range(0);
state.SetLabel(async ? "ExecuteAsync" : "Execute");
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = TFE_NewOp(ctx, "MatMul", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
for (auto s : state) {
TFE_OpReset(matmul, "MatMul", nullptr, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(matmul, m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(matmul, m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (state.iterations() >= state.max_iterations && async) {
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
}
}
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_Execute)->Arg(0)->Arg(1);
void BM_Execute_Identity(::testing::benchmark::State& state) {
const int async = state.range(0);
state.SetLabel(async ? "ExecuteIdentityAsync" : "ExecuteIdentity");
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* identity = TFE_NewOp(ctx, "Identity", status);
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
for (auto s : state) {
TFE_OpReset(identity, "Identity", nullptr, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(identity, m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(identity, &retvals[0], &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (state.iterations() >= state.max_iterations && async) {
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
}
}
TFE_DeleteOp(identity);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_Execute_Identity)->Arg(0)->Arg(1);
TEST(CAPI, Context) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
TFE_DeleteContextOptions(opts);
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const int num_devices = TF_DeviceListCount(devices);
EXPECT_GE(num_devices, 1) << "At least one CPU device should exist";
for (int i = 0; i < num_devices; ++i) {
EXPECT_NE("", TF_DeviceListName(devices, i, status)) << i;
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
}
TF_DeleteDeviceList(devices);
TF_DeleteStatus(status);
}
TEST(CAPI, TensorHandle) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
CHECK_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* h = TestMatrixTensorHandle(ctx);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(h));
TF_Tensor* t = TFE_TensorHandleResolve(h, status.get());
ASSERT_EQ(16, TF_TensorByteSize(t));
float data[4] = {0};
memcpy(&data[0], TF_TensorData(t), TF_TensorByteSize(t));
EXPECT_EQ(1.0, data[0]);
EXPECT_EQ(2.0, data[1]);
EXPECT_EQ(3.0, data[2]);
EXPECT_EQ(4.0, data[3]);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(h);
TFE_DeleteContext(ctx);
}
void TensorHandleCopyBetweenDevices(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
const int num_devices = TF_DeviceListCount(devices);
const char* kCPUDevice = "CPU:0";
for (int i = 0; i < num_devices; ++i) {
const string name(TF_DeviceListName(devices, i, status.get()));
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << i << " -- " << TF_Message(status.get());
continue;
}
auto tag = tensorflow::strings::StrCat("Device #", i, " (", name, ")");
TFE_TensorHandle* hdevice =
TFE_TensorHandleCopyToDevice(hcpu, ctx, name.c_str(), status.get());
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
continue;
}
TFE_TensorHandle* hdevice2 =
TFE_TensorHandleCopyToDevice(hdevice, ctx, name.c_str(), status.get());
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
continue;
}
TFE_DeleteTensorHandle(hdevice);
TFE_TensorHandle* hcopy =
TFE_TensorHandleCopyToDevice(hdevice2, ctx, kCPUDevice, status.get());
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
continue;
}
TFE_DeleteTensorHandle(hdevice2);
TF_Tensor* tcopy = TFE_TensorHandleResolve(hcopy, status.get());
TFE_DeleteTensorHandle(hcopy);
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag;
continue;
}
EXPECT_EQ(TF_TensorByteSize(t), TF_TensorByteSize(tcopy)) << tag;
EXPECT_EQ(
0, memcmp(TF_TensorData(t), TF_TensorData(tcopy), TF_TensorByteSize(t)))
<< tag;
TF_DeleteTensor(tcopy);
}
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenDevices) {
TensorHandleCopyBetweenDevices(false);
}
TEST(CAPI, TensorHandleCopyBetweenDevicesAsync) {
TensorHandleCopyBetweenDevices(true);
}
void TensorHandleCopyBetweenDevicesError(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
const char* kErrorDevice = "NoSuchDevice:0";
TFE_TensorHandle* hdevice =
TFE_TensorHandleCopyToDevice(hcpu, ctx, kErrorDevice, status.get());
EXPECT_NE(TF_OK, TF_GetCode(status.get()));
const char* msg = "NoSuchDevice:0 unknown device";
EXPECT_TRUE(strstr(TF_Message(status.get()), msg) != nullptr)
<< TF_Message(status.get());
TF_SetStatus(status.get(), TF_OK, "");
const char* kCPUDevice = "CPU:0";
TFE_TensorHandle* hcopy =
TFE_TensorHandleCopyToDevice(hcpu, ctx, kCPUDevice, status.get());
EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteTensorHandle(hcopy);
TFE_DeleteTensorHandle(hcpu);
if (hdevice != nullptr) TFE_DeleteTensorHandle(hdevice);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenDevicesError) {
TensorHandleCopyBetweenDevicesError(false);
}
TEST(CAPI, TensorHandleCopyBetweenDevicesErrorAsync) {
TensorHandleCopyBetweenDevicesError(true);
}
void TensorHandleCopyBetweenTwoGPUDevices(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
const int num_devices = TF_DeviceListCount(devices);
bool has_gpu0 = false;
bool has_gpu1 = false;
for (int i = 0; i < num_devices; ++i) {
const char* dev = TF_DeviceListName(devices, i, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
string device_name(dev);
if (device_name.find("GPU:0") != string::npos) {
has_gpu0 = true;
}
if (device_name.find("GPU:1") != string::npos) {
has_gpu1 = true;
}
}
const char* kCPUDevice = "CPU:0";
if (!has_gpu0 || !has_gpu1) {
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_DeleteContext(ctx);
return;
}
const string gpu_1_name(TF_DeviceListName(devices, 1, status.get()));
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
const string gpu_2_name(TF_DeviceListName(devices, 2, status.get()));
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_TensorHandle* hdevice =
TFE_TensorHandleCopyToDevice(hcpu, ctx, gpu_1_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_TensorHandle* hdevice2 = TFE_TensorHandleCopyToDevice(
hdevice, ctx, gpu_2_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_DeleteTensorHandle(hdevice);
TFE_TensorHandle* hcopy =
TFE_TensorHandleCopyToDevice(hdevice2, ctx, kCPUDevice, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_DeleteTensorHandle(hdevice2);
TF_Tensor* tcopy = TFE_TensorHandleResolve(hcopy, status.get());
TFE_DeleteTensorHandle(hcopy);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
EXPECT_EQ(TF_TensorByteSize(t), TF_TensorByteSize(tcopy));
EXPECT_EQ(
0, memcmp(TF_TensorData(t), TF_TensorData(tcopy), TF_TensorByteSize(t)));
TF_DeleteTensor(tcopy);
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenTwoGPUDevices) {
TensorHandleCopyBetweenTwoGPUDevices(false);
}
TEST(CAPI, TensorHandleCopyBetweenTwoGPUDevicesAsync) {
TensorHandleCopyBetweenTwoGPUDevices(true);
}
void TensorHandleSilentCopy(bool async,
TFE_ContextDevicePlacementPolicy global_policy,
TFE_ContextDevicePlacementPolicy thread_policy,
bool cpu_op) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_ContextOptionsSetDevicePlacementPolicy(opts, global_policy);
TFE_Context* ctx = TFE_NewContext(opts, status.get());
if (thread_policy != global_policy) {
TFE_ContextSetThreadLocalDevicePlacementPolicy(ctx, thread_policy);
}
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
hcpu, ctx, gpu_device_name.c_str(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
auto cpu_arg =
tensorflow::TensorHandleFromInterface(tensorflow::unwrap(hcpu));
auto gpu_arg =
tensorflow::TensorHandleFromInterface(tensorflow::unwrap(hgpu));
auto gpu_device = gpu_arg->device();
ASSERT_FALSE(cpu_arg->HasLocalMirror(gpu_device));
TFE_Op* matmul = MatMulOp(ctx, hcpu, hgpu);
if (cpu_op) {
string cpu_device_name;
ASSERT_TRUE(GetDeviceName(ctx, &cpu_device_name, "CPU"));
TFE_OpSetDevice(matmul, cpu_device_name.c_str(), status.get());
} else {
TFE_OpSetDevice(matmul, gpu_device_name.c_str(), status.get());
}
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(matmul, &retvals[0], &num_retvals, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ASSERT_TRUE(cpu_arg->HasLocalMirror(gpu_device));
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(hgpu);
}
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleSilentCopy) {
TensorHandleSilentCopy(false, TFE_DEVICE_PLACEMENT_SILENT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
TEST(CAPI, TensorHandleSilentCopyAsync) {
TensorHandleSilentCopy(true, TFE_DEVICE_PLACEMENT_SILENT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
TEST(CAPI, TensorHandleSilentCopyLocalPolicy) {
TensorHandleSilentCopy(false, TFE_DEVICE_PLACEMENT_EXPLICIT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
TEST(CAPI, TensorHandleSilentCopyLocalPolicyAsync) {
TensorHandleSilentCopy(true, TFE_DEVICE_PLACEMENT_EXPLICIT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
void SetAndGetOpDevices(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_OpSetDevice(matmul, "GPU:0", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
const char* device_name = TFE_OpGetDevice(matmul, status);
ASSERT_TRUE(strstr(device_name, "GPU:0") != nullptr);
TFE_OpSetDevice(matmul, "CPU:0", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
device_name = TFE_OpGetDevice(matmul, status);
ASSERT_TRUE(strstr(device_name, "CPU:0") != nullptr);
}
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
TEST(CAPI, TensorHandleNullptr) {
TFE_TensorHandle* h = nullptr;
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Tensor* t = TFE_TensorHandleResolve(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(t, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
const char* device_name = TFE_TensorHandleDeviceName(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_name, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
device_name = TFE_TensorHandleBackingDeviceName(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_name, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
int num_dims = TFE_TensorHandleNumDims(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(num_dims, -1);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
int dim = TFE_TensorHandleDim(h, 0, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(dim, -1);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
}
TEST(CAPI, TensorHandleDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
const char* device_name = TFE_TensorHandleDeviceName(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_name, "CPU:0")) << device_name;
const char* backing_device_name =
TFE_TensorHandleBackingDeviceName(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(backing_device_name, "CPU:0"))
<< backing_device_name;
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
hcpu, ctx, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_Op* shape_op = ShapeOp(ctx, hgpu);
TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
device_name = TFE_TensorHandleDeviceName(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_name, "GPU:0")) << device_name;
backing_device_name =
TFE_TensorHandleBackingDeviceName(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(backing_device_name, "CPU:0"))
<< backing_device_name;
TFE_DeleteOp(shape_op);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(hgpu);
}
TFE_DeleteTensorHandle(hcpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
void ExecuteAdd(bool async, bool forward_input, bool tfrt) {
#ifdef PLATFORM_WINDOWS
return;
#else
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, tfrt);
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* n = TestMatrixTensorHandle100x100(ctx);
std::string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* n_gpu =
TFE_TensorHandleCopyToDevice(n, ctx, gpu_device_name.c_str(), status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(n);
n = n_gpu;
}
TFE_TensorHandle* m = TestMatrixTensorHandle100x100(ctx);
TF_Tensor* orig = TFE_TensorHandleResolve(n, status);
void* orig_ptr = TF_TensorData(orig);
TF_DeleteTensor(orig);
TFE_Op* add_op = AddOp(ctx, n, m);
std::string cpu_device_name;
ASSERT_TRUE(GetDeviceName(ctx, &cpu_device_name, "CPU"));
TFE_OpSetDevice(add_op, cpu_device_name.c_str(), status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (forward_input) {
TFE_DeleteTensorHandle(n);
}
int num_retvals = 1;
if (async) {
for (int i = 0; i < 100000; ++i) {
TFE_Op* add_op_dummy = AddOp(ctx, m, m);
TFE_OpSetDevice(add_op_dummy, cpu_device_name.c_str(), status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* dummy = nullptr;
TFE_Execute(add_op_dummy, &dummy, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(dummy);
TFE_DeleteOp(add_op_dummy);
}
}
TFE_TensorHandle* retval = nullptr;
TFE_Execute(add_op, &retval, &num_retvals, status);
EXPECT_EQ(1, num_retvals);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (!forward_input) {
TFE_DeleteTensorHandle(n);
}
TFE_DeleteOp(add_op);
TF_Tensor* t = TFE_TensorHandleResolve(retval, status);
if (async) {
if (forward_input) {
EXPECT_EQ(orig_ptr, TF_TensorData(t));
} else {
EXPECT_EQ(orig_ptr, TF_TensorData(t));
}
} else {
if (forward_input) {
EXPECT_EQ(orig_ptr, TF_TensorData(t));
} else {
EXPECT_NE(orig_ptr, TF_TensorData(t));
}
}
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(m);
TFE_DeleteTensorHandle(retval);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float result[100 * 100] = {0};
EXPECT_EQ(sizeof(result), TF_TensorByteSize(t));
memcpy(&result[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
for (int i = 0; i < 100 * 100; ++i) {
EXPECT_EQ(2.0f, result[i]);
}
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
#endif
}
TEST(CAPI, ExecuteAdd) {
ExecuteAdd(
false,
false,
false);
}
TEST(CAPI, DISABLED_ExecuteAddAsync) {
ExecuteAdd(
true,
false,
false);
}
TEST(CAPI, ExecuteAddForward) {
ExecuteAdd(
false,
true,
false);
}
TEST(CAPI, ExecuteAddForwardAsync) {
ExecuteAdd(
true,
true,
false);
}
#ifdef PLATFORM_GOOGLE
TEST(CAPI, DISABLED_ExecuteAddTfrt) {
ExecuteAdd(
false,
false,
true);
}
#endif
void Execute_MatMul_CPU(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_TensorHandle* retvals[2] = {nullptr, nullptr};
int num_retvals = 2;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
EXPECT_EQ(1, num_retvals);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(7, product[0]);
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
TF_DeleteStatus(status);
}
TEST(CAPI, Execute_MatMul_CPU) { Execute_MatMul_CPU(false); }
TEST(CAPI, Execute_MatMul_CPUAsync) { Execute_MatMul_CPU(true); }
void Execute_MatMul_CPU_Runtime_Error(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* m2 = DoubleTestMatrixTensorHandle3X2(ctx);
TFE_Op* matmul = MatMulOp(ctx, m1, m2);
TFE_OpSetDevice(matmul, "/job:localhost/replica:0/task:0/device:CPU:0",
status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Op* matmul2 = MatMulOp(ctx, m1, m1);
TFE_OpSetDevice(matmul2, "/job:localhost/replica:0/task:0/device:CPU:0",
status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
TFE_DeleteOp(matmul);
if (!async) {
EXPECT_NE(TF_OK, TF_GetCode(status));
} else {
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
EXPECT_NE(TF_OK, TF_GetCode(status));
EXPECT_EQ(nullptr, t);
const char* msg = "Matrix size-incompatible: In[0]: [2,2], In[1]: [3,2]";
EXPECT_TRUE(strstr(TF_Message(status), msg) != nullptr)
<< TF_Message(status);
TF_SetStatus(status, TF_OK, "");
TFE_DeleteTensorHandle(retvals[0]);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
EXPECT_NE(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
retvals[0] = nullptr;
TFE_Execute(matmul2, &retvals[0], &num_retvals, status);
EXPECT_NE(TF_OK, TF_GetCode(status));
TFE_ExecutorClearError(executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
}
TF_SetStatus(status, TF_OK, "");
if (retvals[0] != nullptr) {
TFE_DeleteTensorHandle(retvals[0]);
}
retvals[0] = nullptr;
TFE_Execute(matmul2, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteTensor(t);
TFE_DeleteOp(matmul2);
TFE_DeleteTensorHandle(m1);
TFE_DeleteTensorHandle(m2);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, Execute_MatMul_CPU_Runtime_Error) {
Execute_MatMul_CPU_Runtime_Error(false);
}
TEST(CAPI, Execute_MatMul_CPU_Runtime_ErrorAsync) {
Execute_MatMul_CPU_Runtime_Error(true);
}
void Execute_MatMul_CPU_Type_Error(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* m2 = DoubleTestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m1, m2);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
EXPECT_NE(TF_OK, TF_GetCode(status));
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m1);
TFE_DeleteTensorHandle(m2);
if (retvals[0] != nullptr) {
TFE_DeleteTensorHandle(retvals[0]);
}
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, Execute_MatMul_CPU_Type_Error) {
Execute_MatMul_CPU_Type_Error(false);
}
TEST(CAPI, Execute_MatMul_CPU_Type_ErrorAsync) {
Execute_MatMul_CPU_Type_Error(true);
}
TEST(CAPI, Execute_Min_CPU) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* input = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* axis = TestAxisTensorHandle(ctx);
TFE_Op* minOp = MinOp(ctx, input, axis);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(minOp, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(minOp);
TFE_DeleteTensorHandle(input);
TFE_DeleteTensorHandle(axis);
ASSERT_EQ(1, num_retvals);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(retvals[0]);
float output[2] = {0};
EXPECT_EQ(sizeof(output), TF_TensorByteSize(t));
memcpy(&output[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(1, output[0]);
EXPECT_EQ(3, output[1]);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
void ExecuteWithTracing(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
TFE_ContextEnableRunMetadata(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
TF_Buffer* b = TF_NewBuffer();
TFE_ContextExportRunMetadata(ctx, b, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::RunMetadata rm;
EXPECT_TRUE(
rm.ParseFromString({reinterpret_cast<const char*>(b->data), b->length}));
TF_DeleteBuffer(b);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(1, num_retvals);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(7, product[0]);
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
TF_DeleteStatus(status);
}
TEST(CAPI, ExecuteWithTracing) { ExecuteWithTracing(false); }
TEST(CAPI, ExecuteWithTracingAsync) { ExecuteWithTracing(true); }
REGISTER_OP("TestNonCommUnavailable")
.Output("out: string")
.Doc(R"doc(Test non-communication op throwing Unavailable error.)doc");
REGISTER_OP("TestCommUnavailable")
.Output("out: string")
.SetIsDistributedCommunication()
.Doc(R"doc(Test communication op throwing Unavailable error.)doc");
class TestUnavailableErrorOp : public tensorflow::OpKernel {
public:
explicit TestUnavailableErrorOp(tensorflow::OpKernelConstruction* ctx)
: tensorflow::OpKernel(ctx) {}
void Compute(tensorflow::OpKernelContext* ctx) override {
ctx->SetStatus(tensorflow::errors::Unavailable("Test error."));
}
};
REGISTER_KERNEL_BUILDER(
Name("TestNonCommUnavailable").Device(tensorflow::DEVICE_DEFAULT),
TestUnavailableErrorOp);
REGISTER_KERNEL_BUILDER(
Name("TestCommUnavailable").Device(tensorflow::DEVICE_DEFAULT),
TestUnavailableErrorOp);
string FunctionWithErrorOp(const tensorflow::StringPiece op_name) {
const std::string& func_str =
" signature {"
" name: 'FunctionWith__OP_NAME__'"
" output_arg {"
" name: 'out'"
" type: DT_STRING"
" }"
" }"
" node_def {"
" name: 'error_op'"
" op: '__OP_NAME__'"
" }"
" ret {"
" key: 'out'"
" value: 'error_op:out'"
" }";
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
tensorflow::str_util::StringReplace(func_str, "__OP_NAME__", op_name,
true),
&def));
return def.SerializeAsString();
}
TEST(CAPI, ExecuteOpAndFunctionWithError) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(false));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Op* non_comm_op = TFE_NewOp(ctx, "TestNonCommUnavailable", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retval[1] = {};
int num_retvals = 1;
TFE_Execute(non_comm_op, retval, &num_retvals, status);
EXPECT_EQ(TF_INTERNAL, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(non_comm_op);
TFE_Op* comm_op = TFE_NewOp(ctx, "TestCommUnavailable", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(comm_op, retval, &num_retvals, status);
EXPECT_EQ(TF_UNAVAILABLE, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(comm_op);
const string& fdef1 = FunctionWithErrorOp("TestNonCommUnavailable");
TFE_ContextAddFunctionDef(ctx, fdef1.data(), fdef1.size(), status);
TFE_Op* fn1 = TFE_NewOp(ctx, "FunctionWithTestNonCommUnavailable", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(fn1, retval, &num_retvals, status);
EXPECT_EQ(TF_INTERNAL, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(fn1);
const string& fdef2 = FunctionWithErrorOp("TestCommUnavailable");
TFE_ContextAddFunctionDef(ctx, fdef2.data(), fdef2.size(), status);
TFE_Op* fn2 = TFE_NewOp(ctx, "FunctionWithTestCommUnavailable", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(fn2, retval, &num_retvals, status);
EXPECT_EQ(TF_UNAVAILABLE, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(fn2);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
string MatMulFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'MatMulFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'm'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'matmul'"
" op: 'MatMul'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" }"
" ret {"
" key: 'm'"
" value: 'matmul:product'"
" }",
&def));
return def.SerializeAsString();
}
string AddFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'AddFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'o'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'output'"
" op: 'Add'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" }"
" ret {"
" key: 'o'"
" value: 'output:z'"
" }",
&def));
return def.SerializeAsString();
}
void FunctionDefAndExecute(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
string function_def = MatMulFunction();
TFE_ContextAddFunctionDef(ctx, function_def.data(), function_def.size(),
status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (bool clear_cache : {true, false, true}) {
if (clear_cache) {
TFE_ContextClearCaches(ctx);
}
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* retval[1] = {nullptr};
int num_retvals = 1;
TFE_Op* op = TFE_NewOp(ctx, "MatMulFunction", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(op, m, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &retval[0], &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(1, num_retvals);
TFE_DeleteOp(op);
TFE_DeleteTensorHandle(m);
TF_Tensor* t = TFE_TensorHandleResolve(retval[0], status);
TFE_DeleteTensorHandle(retval[0]);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(7, product[0]);
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
}
TFE_ContextRemoveFunction(ctx, "MatMulFunction", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
TEST(CAPI, FunctionDefAndExecute) { FunctionDefAndExecute(false); }
TEST(CAPI, FunctionDefAndExecuteAsync) { FunctionDefAndExecute(true); }
void RunAddFunction(bool use_tfrt, bool enable_grappler) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
string function_def = AddFunction();
TFE_ContextAddFunctionDef(ctx, function_def.data(), function_def.size(),
status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* retval[1] = {nullptr};
int num_retvals = 1;
TFE_Op* op = TFE_NewOp(ctx, "AddFunction", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (enable_grappler) {
tensorflow::ConfigProto config;
config.mutable_graph_options()
->mutable_rewrite_options()
->set_min_graph_nodes(-1);
string serialized_config;
ASSERT_TRUE(config.SerializeToString(&serialized_config));
TFE_OpSetAttrString(
op, "config_proto",
reinterpret_cast<const void*>(serialized_config.c_str()),
serialized_config.length());
}
if (use_tfrt) {
TFE_OpSetAttrBool(op, "TFRT_TEST_enable_native_ops", false);
TFE_OpSetAttrBool(op, "TFRT_TEST_enable_grappler", enable_grappler);
}
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(op, m, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &retval[0], &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(1, num_retvals);
TFE_DeleteOp(op);
TFE_DeleteTensorHandle(m);
TF_Tensor* t = TFE_TensorHandleResolve(retval[0], status);
TFE_DeleteTensorHandle(retval[0]);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(2, product[0]);
EXPECT_EQ(4, product[1]);
EXPECT_EQ(6, product[2]);
EXPECT_EQ(8, product[3]);
if (use_tfrt) {
TF_Buffer* buf = TF_NewBuffer();
TFE_GetExecutedOpNames(ctx, buf, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
#ifndef NDEBUG
if (enable_grappler)
EXPECT_NE(strstr(static_cast<const char*>(buf->data), "tf.Mul"), nullptr);
else
EXPECT_NE(strstr(static_cast<const char*>(buf->data), "tf.Add"), nullptr);
#endif
TF_DeleteBuffer(buf);
}
TFE_ContextRemoveFunction(ctx, "AddFunction", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
TEST(CAPI, RunAddFunctionWithGrappler) {
RunAddFunction(false, true);
}
void BM_ExecuteFunction(::testing::benchmark::State& state) {
const int async = state.range(0);
state.SetLabel(async ? "ExecuteFunctionAsync" : "ExecuteFunction");
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
string function_def = MatMulFunction();
TFE_ContextAddFunctionDef(ctx, function_def.data(), function_def.size(),
status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* retval[1] = {nullptr};
int num_retvals = 1;
for (auto s : state) {
TFE_Op* matmul = TFE_NewOp(ctx, "MatMulFunction", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(matmul, m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(matmul, &retval[0], &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(matmul);
if (state.iterations() >= state.max_iterations && async) {
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
}
}
TFE_DeleteTensorHandle(m);
TFE_DeleteTensorHandle(retval[0]);
TFE_ContextRemoveFunction(ctx, "MatMulFunction", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_ExecuteFunction)->Arg(0)->Arg(1);
TEST(CAPI, Variables) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* var_handle = TestVariable(ctx, 12.0);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Op* op = TFE_NewOp(ctx, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int num_retvals = 1;
TFE_TensorHandle* value_handle = nullptr;
TFE_Execute(op, &value_handle, &num_retvals, status);
TFE_DeleteOp(op);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(value_handle));
EXPECT_EQ(0, TFE_TensorHandleNumDims(value_handle, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(value_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(12.0, value);
TFE_DeleteTensorHandle(var_handle);
TFE_DeleteTensorHandle(value_handle);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
void BM_ReadVariable(::testing::benchmark::State& state) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* var_handle = TestVariable(ctx, 5.0);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int num_retvals = 1;
TFE_TensorHandle* h = nullptr;
for (auto s : state) {
TFE_Op* op = TFE_NewOp(ctx, "ReadVariableOp", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &h, &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
CHECK_EQ(1, num_retvals);
CHECK(h);
CHECK_EQ(TF_FLOAT, TFE_TensorHandleDataType(h));
CHECK_EQ(0, TFE_TensorHandleNumDims(h, status));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
h = nullptr;
TFE_DeleteOp(op);
}
TFE_DeleteTensorHandle(var_handle);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_ReadVariable);
TEST(CAPI, StringAttributes) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::vector<int64_t> dims(4, 1);
TFE_Op* op = TFE_NewOp(ctx, "AvgPool", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* tensor =
TF_AllocateTensor(TF_FLOAT, dims.data(), dims.size(), sizeof(float));
float tensor_data[] = {1};
memcpy(TF_TensorData(tensor), tensor_data, TF_TensorByteSize(tensor));
TFE_TensorHandle* tensor_handle = TFE_NewTensorHandle(tensor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(op, tensor_handle, status);
TF_DeleteTensor(tensor);
TFE_DeleteTensorHandle(tensor_handle);
std::vector<int64_t> values(4, 1);
TFE_OpSetAttrIntList(op, "ksize", values.data(), values.size());
TFE_OpSetAttrIntList(op, "strides", values.data(), values.size());
const int BUFFER_SIZE = 10;
char buffer[BUFFER_SIZE];
std::strncpy(buffer, "VALID", BUFFER_SIZE);
TFE_OpSetAttrString(op, "padding", buffer, std::strlen(buffer));
std::strncpy(buffer, "NHWC", BUFFER_SIZE);
TFE_OpSetAttrString(op, "data_format", buffer, std::strlen(buffer));
TFE_OpSetAttrType(op, "T", TF_FLOAT);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(op, &retvals[0], &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(1, num_retvals);
tensor = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(4, TF_TensorByteSize(tensor));
TF_DeleteTensor(tensor);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteOp(op);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, TestTFE_SetOpAttrs) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
std::vector<int64_t> dims(4, 1);
TFE_Op* op = TFE_NewOp(ctx, "AvgPool", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* tensor =
TF_AllocateTensor(TF_FLOAT, dims.data(), dims.size(), sizeof(float));
float tensor_data[] = {1};
memcpy(TF_TensorData(tensor), tensor_data, TF_TensorByteSize(tensor));
TFE_TensorHandle* tensor_handle = TFE_NewTensorHandle(tensor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(op, tensor_handle, status);
TF_DeleteTensor(tensor);
TFE_DeleteTensorHandle(tensor_handle);
tensorflow::AttrValue i_list_values;
for (int i = 0; i < 4; ++i) {
i_list_values.mutable_list()->add_i(1);
}
SetOpAttrValueScalar(ctx, op, i_list_values, "ksize", status);
SetOpAttrValueScalar(ctx, op, i_list_values, "strides", status);
tensorflow::AttrValue padding_value;
*padding_value.mutable_s() = "VALID";
tensorflow::SetOpAttrValueScalar(ctx, op, padding_value, "padding", status);
tensorflow::AttrValue data_format_value;
*data_format_value.mutable_s() = "NHWC";
tensorflow::SetOpAttrValueScalar(ctx, op, data_format_value, "data_format",
status);
TFE_OpSetAttrType(op, "T", TF_FLOAT);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(op, &retvals[0], &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(1, num_retvals);
tensor = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(4, TF_TensorByteSize(tensor));
TF_DeleteTensor(tensor);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteOp(op);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, TestTFE_TensorHandleCopySharingUnderlyingTensorHandle) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
CHECK_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* h = TestMatrixTensorHandle(ctx);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(h));
TFE_TensorHandle* h_shares_tensor =
TFE_TensorHandleCopySharingTensor(h, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_Tensor* t = TFE_TensorHandleResolve(h_shares_tensor, status.get());
ASSERT_EQ(16, TF_TensorByteSize(t));
float data[4] = {0};
memcpy(&data[0], TF_TensorData(t), TF_TensorByteSize(t));
EXPECT_EQ(1.0, data[0]);
EXPECT_EQ(2.0, data[1]);
EXPECT_EQ(3.0, data[2]);
EXPECT_EQ(4.0, data[3]);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(h);
TFE_DeleteTensorHandle(h_shares_tensor);
TFE_DeleteContext(ctx);
}
tensorflow::AttrValueMap ExtractAttrs(TFE_Op* op) {
tensorflow::AttrValueMap attr_values;
tensorflow::EagerOperation* operation =
tensorflow::OperationFromInterface(tensorflow::unwrap(op));
operation->Attrs().FillAttrValueMap(&attr_values);
return attr_values;
}
TEST(CAPI, TestTFE_OpInferSingleInputAttrs) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* input = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* axis = TestAxisTensorHandle(ctx);
TFE_Op* minOp = TFE_NewOp(ctx, "Min", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(minOp, input, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(minOp, axis, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::AttrValueMap attr_values = ExtractAttrs(minOp);
tensorflow::AttrValueMap::const_iterator attr_found = attr_values.find("T");
EXPECT_NE(attr_found, attr_values.cend());
EXPECT_EQ(attr_found->second.type(), tensorflow::DataType::DT_FLOAT);
attr_found = attr_values.find("Tidx");
EXPECT_NE(attr_found, attr_values.cend());
EXPECT_EQ(attr_found->second.type(), tensorflow::DataType::DT_INT32);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(minOp, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
TFE_DeleteOp(minOp);
TFE_DeleteTensorHandle(input);
TFE_DeleteTensorHandle(axis);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TestTFE_OpInferSingleTypeInputListAttrs) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* input1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* input2 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* dim = TestScalarTensorHandle(ctx, 0);
TFE_Op* concatOp = TFE_NewOp(ctx, "Concat", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* inputs[] = {input1, input2};
TFE_OpAddInput(concatOp, dim, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInputList(concatOp, inputs, 2, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::AttrValueMap attr_values = ExtractAttrs(concatOp);
tensorflow::AttrValueMap::const_iterator attr_found = attr_values.find("T");
EXPECT_NE(attr_found, attr_values.cend());
EXPECT_EQ(attr_found->second.type(), tensorflow::DataType::DT_FLOAT);
attr_found = attr_values.find("N");
EXPECT_NE(attr_found, attr_values.cend());
EXPECT_EQ(attr_found->second.i(), 2);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(concatOp, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
TFE_DeleteOp(concatOp);
TFE_DeleteTensorHandle(input1);
TFE_DeleteTensorHandle(input2);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(dim);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TestTFE_OpInferMixedTypeInputListAttrs) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* condition = TestScalarTensorHandle(ctx, true);
TFE_TensorHandle* t1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* t2 = TestAxisTensorHandle(ctx);
TFE_Op* assertOp = TFE_NewOp(ctx, "Assert", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(assertOp, condition, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* data[] = {condition, t1, t2};
TFE_OpAddInputList(assertOp, data, 3, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::AttrValueMap attr_values = ExtractAttrs(assertOp);
tensorflow::AttrValueMap::const_iterator attr_found = attr_values.find("T");
EXPECT_NE(attr_found, attr_values.cend());
EXPECT_EQ(attr_found->second.list().type(0), tensorflow::DataType::DT_BOOL);
EXPECT_EQ(attr_found->second.list().type(1), tensorflow::DataType::DT_FLOAT);
EXPECT_EQ(attr_found->second.list().type(2), tensorflow::DataType::DT_INT32);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(assertOp, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
TFE_DeleteOp(assertOp);
TFE_DeleteTensorHandle(condition);
TFE_DeleteTensorHandle(t1);
TFE_DeleteTensorHandle(t2);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TestTFE_OpAttrsInferenceDisabledWhenNotCallingOpAddInputList) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* input1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* input2 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* dim = TestScalarTensorHandle(ctx, 0);
TFE_Op* concatOp = TFE_NewOp(ctx, "Concat", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* inputs[] = {input1, input2};
TFE_OpAddInput(concatOp, dim, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
CHECK(tensorflow::unwrap(concatOp)->OpDef());
TFE_OpAddInput(concatOp, inputs[0], status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_FALSE(tensorflow::unwrap(concatOp)->OpDef())
<< "Inference context is still present";
TFE_OpAddInput(concatOp, inputs[1], status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::AttrValueMap attr_values = ExtractAttrs(concatOp);
EXPECT_EQ(attr_values.find("T"), attr_values.end());
EXPECT_EQ(attr_values.find("N"), attr_values.end());
TF_DeleteStatus(status);
TFE_DeleteOp(concatOp);
TFE_DeleteTensorHandle(input1);
TFE_DeleteTensorHandle(input2);
TFE_DeleteTensorHandle(dim);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TestTFE_OpGetInputAndOutputLengths) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* input1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* input2 = TestMatrixTensorHandle(ctx);
TFE_Op* identityOp = TFE_NewOp(ctx, "IdentityN", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(-1, TFE_OpGetInputLength(identityOp, "input", status));
CHECK_NE(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(-1, TFE_OpGetOutputLength(identityOp, "output", status));
CHECK_NE(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* inputs[] = {input1, input2};
TFE_OpAddInputList(identityOp, inputs, 2, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(2, TFE_OpGetInputLength(identityOp, "input", status));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(2, TFE_OpGetOutputLength(identityOp, "output", status));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retvals[2] = {nullptr};
int num_retvals = 2;
TFE_Execute(identityOp, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(2, TFE_OpGetInputLength(identityOp, "input", status));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(2, TFE_OpGetOutputLength(identityOp, "output", status));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
TFE_DeleteOp(identityOp);
TFE_DeleteTensorHandle(input1);
TFE_DeleteTensorHandle(input2);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(retvals[1]);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TestTFE_OpGetInputAndOutputLengthsFailForUnknownArguments) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* input1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* input2 = TestMatrixTensorHandle(ctx);
TFE_Op* identityOp = TFE_NewOp(ctx, "IdentityN", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* inputs[] = {input1, input2};
TFE_OpAddInputList(identityOp, inputs, 2, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(-1, TFE_OpGetInputLength(identityOp, "cheese", status));
CHECK_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(-1, TFE_OpGetOutputLength(identityOp, "cheese", status));
CHECK_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
TFE_DeleteOp(identityOp);
TFE_DeleteTensorHandle(input1);
TFE_DeleteTensorHandle(input2);
TFE_DeleteContext(ctx);
}
void TestOpAddAttrs(bool use_tfrt) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, use_tfrt);
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Op* var_op = TFE_NewOp(ctx, "VarHandleOp", status);
TFE_OpSetAttrType(var_op, "dtype", TF_INT64);
TFE_OpSetAttrShape(var_op, "shape", {}, 0, status);
const TFE_OpAttrs* attributes = TFE_OpGetAttrs(var_op);
TFE_Op* copy_op = TFE_NewOp(ctx, "VarHandleOp", status);
TFE_OpSetAttrType(copy_op, "dtype", TF_FLOAT);
TFE_OpAddAttrs(copy_op, attributes);
unsigned char is_list = 0;
ASSERT_EQ(TF_ATTR_TYPE,
TFE_OpGetAttrType(copy_op, "dtype", &is_list, status));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(TF_ATTR_SHAPE,
TFE_OpGetAttrType(copy_op, "shape", &is_list, status));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::AttrValueMap attr_values;
tensorflow::EagerOperation* op =
tensorflow::OperationFromInterface(tensorflow::unwrap(copy_op));
op->Attrs().FillAttrValueMap(&attr_values);
EXPECT_EQ(tensorflow::DT_FLOAT, attr_values.find("dtype")->second.type());
TF_DeleteStatus(status);
TFE_DeleteOp(var_op);
TFE_DeleteOp(copy_op);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TestTFE_OpAddAttrs) { TestOpAddAttrs(false); }
TEST(CAPI, TestTFE_OpAttrsSerialize) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Op* var_op = TFE_NewOp(ctx, "VarHandleOp", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(var_op, "dtype", TF_INT64);
TFE_OpSetAttrShape(var_op, "shape", {}, 0, status);
const TFE_OpAttrs* attributes = TFE_OpGetAttrs(var_op);
TF_Buffer* serialized_attr_values = TF_NewBuffer();
TFE_OpAttrsSerialize(attributes, serialized_attr_values, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::NameAttrList name_and_attrs;
ASSERT_TRUE(name_and_attrs.ParseFromArray(serialized_attr_values->data,
serialized_attr_values->length));
ASSERT_EQ("VarHandleOp", name_and_attrs.name());
ASSERT_EQ(tensorflow::DT_INT64,
name_and_attrs.attr().find("dtype")->second.type());
TF_DeleteBuffer(serialized_attr_values);
TFE_Op* var_op_2 = TFE_NewOp(ctx, "VarHandleOp", status);
string serialized_dtype;
ASSERT_TRUE(name_and_attrs.attr().find("dtype")->second.SerializeToString(
&serialized_dtype));
TFE_OpSetAttrValueProto(
var_op_2, "dtype",
reinterpret_cast<const void*>(serialized_dtype.c_str()),
serialized_dtype.length(), status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::AttrValueMap attr_values;
tensorflow::EagerOperation* op =
tensorflow::OperationFromInterface(tensorflow::unwrap(var_op_2));
op->Attrs().FillAttrValueMap(&attr_values);
EXPECT_EQ(tensorflow::DT_INT64, attr_values.find("dtype")->second.type());
TF_DeleteStatus(status);
TFE_DeleteOp(var_op);
TFE_DeleteOp(var_op_2);
TFE_DeleteContext(ctx);
}
TFE_Op* CloneOp(const TFE_Op* other) {
TF_Status* status = TF_NewStatus();
TFE_Context* context = TFE_OpGetContext(other, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const char* op_name = TFE_OpGetName(other, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Op* ret = TFE_NewOp(context, op_name, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const char* device = TFE_OpGetDevice(other, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetDevice(ret, device, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddAttrs(ret, TFE_OpGetAttrs(other));
int num_inputs = TFE_OpGetFlatInputCount(other, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (int input_index = 0; input_index < num_inputs; ++input_index) {
TFE_TensorHandle* input = TFE_OpGetFlatInput(other, input_index, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(ret, input, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
}
TF_DeleteStatus(status);
return ret;
}
TEST(CAPI, TestTFE_OpRecreation) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Op* original_var_op = TFE_NewOp(ctx, "VarHandleOp", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(original_var_op, "dtype", TF_INT64);
TFE_OpSetAttrShape(original_var_op, "shape", {}, 0, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ("", std::string(TFE_OpGetDevice(original_var_op, status)));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetDevice(original_var_op,
"/job:localhost/replica:0/task:0/device:CPU:0", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Op* cloned = CloneOp(original_var_op);
EXPECT_EQ("/job:localhost/replica:0/task:0/device:CPU:0",
std::string(TFE_OpGetDevice(cloned, status)));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ("VarHandleOp", std::string(TFE_OpGetName(cloned, status)));
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int num_retvals = 1;
TFE_TensorHandle* ret;
TFE_Execute(cloned, &ret, &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(ret);
TFE_TensorHandle* input1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* input2 = TestMatrixTensorHandle(ctx);
TFE_Op* original_identity = TFE_NewOp(ctx, "IdentityN", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* inputs[] = {input1, input2};
TFE_OpAddInputList(original_identity, inputs, 2, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Op* cloned_identity = CloneOp(original_identity);
EXPECT_EQ("", std::string(TFE_OpGetDevice(cloned_identity, status)));
TFE_TensorHandle* identity_ret[] = {nullptr, nullptr};
num_retvals = 2;
TFE_Execute(cloned_identity, identity_ret, &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(input1);
TFE_DeleteTensorHandle(input2);
TFE_DeleteTensorHandle(identity_ret[0]);
TFE_DeleteTensorHandle(identity_ret[1]);
TFE_DeleteOp(cloned_identity);
TFE_DeleteOp(original_identity);
TFE_DeleteOp(original_var_op);
TFE_DeleteOp(cloned);
TF_DeleteStatus(status);
TFE_DeleteContext(ctx);
}
TEST(CAPI, ShareVariableAcrossContextsWorks) {
tensorflow::ServerDef server_def_0 = GetServerDef(3);
server_def_0.mutable_default_session_config()->set_isolate_session_state(
false);
tensorflow::ServerDef server_def_1 =
ReplaceTaskInServerDef(server_def_0, 0);
string serialized_server_def_0 = server_def_0.SerializeAsString();
string serialized_server_def_1 = server_def_1.SerializeAsString();
server_def_0.set_task_index(1);
std::unique_ptr<tensorflow::GrpcServer> worker_server1;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
server_def_0.set_task_index(2);
std::unique_ptr<tensorflow::GrpcServer> worker_server2;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server2)
.ok());
ASSERT_TRUE(worker_server2->Start().ok());
TFE_Context* ctx_0 = CreateContext(serialized_server_def_0,
false,
0);
TFE_Context* ctx_1 = CreateContext(serialized_server_def_1,
false,
0);
const char remote_device[] = "/job:localhost/replica:0/task:1/device:CPU:0";
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_0);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_1);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
TFE_TensorHandle* handle_0 =
CreateVariable(ctx_0, 1.2, remote_device, "var2");
TF_Status* status = TF_NewStatus();
TFE_ContextAsyncWait(ctx_0, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
{
TFE_TensorHandle* var_handle =
CreateVarHandle(ctx_1, remote_device, "var2");
TFE_TensorHandle* handle_1 = nullptr;
int num_retvals = 1;
TF_Status* status = TF_NewStatus();
TFE_Op* op = TFE_NewOp(ctx_1, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &handle_1, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(op);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(handle_1));
EXPECT_EQ(0, TFE_TensorHandleNumDims(handle_1, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(handle_1, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(1.2f, value);
TFE_DeleteTensorHandle(handle_1);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(var_handle);
}
TFE_DeleteTensorHandle(handle_0);
TFE_DeleteContext(ctx_0);
TFE_DeleteContext(ctx_1);
worker_server1.release();
worker_server2.release();
}
TEST(CAPI, ShareVariableAcrossContextsAfterUpdateContextWorks) {
tensorflow::ServerDef server_def_0 = GetServerDef(3);
server_def_0.mutable_default_session_config()->set_isolate_session_state(
false);
tensorflow::ServerDef server_def_1 =
ReplaceTaskInServerDef(server_def_0, 0);
string serialized_server_def_0 = server_def_0.SerializeAsString();
string serialized_server_def_1 = server_def_1.SerializeAsString();
server_def_0.set_task_index(1);
std::unique_ptr<tensorflow::GrpcServer> worker_server1;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
server_def_0.set_task_index(2);
std::unique_ptr<tensorflow::GrpcServer> worker_server2;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server2)
.ok());
ASSERT_TRUE(worker_server2->Start().ok());
TFE_Context* ctx_0 = CreateContext(serialized_server_def_0,
false,
0);
TFE_Context* ctx_1 = CreateContext(serialized_server_def_1,
false,
0);
const char remote_device[] = "/job:localhost/replica:0/task:2/device:CPU:0";
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_0);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_1);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
TFE_TensorHandle* handle_0 =
CreateVariable(ctx_0, 1.2, remote_device, "var");
TF_Status* status = TF_NewStatus();
TFE_ContextAsyncWait(ctx_0, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
int port = tensorflow::testing::PickUnusedPortOrDie();
ReplaceTaskInServerDef(&server_def_0, 1, "localhost", port);
ReplaceTaskInServerDef(&server_def_1, 1, "localhost", port);
server_def_0.set_task_index(1);
worker_server1.release();
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
{
server_def_0.set_task_index(0);
string serialized_update = server_def_0.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDef(ctx_0, 0, serialized_update.data(),
serialized_update.size(), status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
server_def_1.set_task_index(0);
string serialized_update = server_def_1.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDef(ctx_1, 0, serialized_update.data(),
serialized_update.size(), status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
TFE_TensorHandle* var_handle =
CreateVarHandle(ctx_1, remote_device, "var");
TFE_TensorHandle* handle_1 = nullptr;
int num_retvals = 1;
TF_Status* status = TF_NewStatus();
TFE_Op* op = TFE_NewOp(ctx_1, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &handle_1, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(op);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(handle_1));
EXPECT_EQ(0, TFE_TensorHandleNumDims(handle_1, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(handle_1, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(1.2f, value);
TFE_DeleteTensorHandle(handle_1);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(var_handle);
}
TFE_DeleteTensorHandle(handle_0);
TFE_DeleteContext(ctx_0);
TFE_DeleteContext(ctx_1);
worker_server1.release();
worker_server2.release();
}
tensorflow::ServerDef CreateSingleHostServerDef(
const tensorflow::ServerDef& cluster_server_def, int task_index) {
tensorflow::ServerDef single_host_server_def;
single_host_server_def.set_job_name("worker");
single_host_server_def.set_protocol(cluster_server_def.protocol());
single_host_server_def.set_task_index(0);
tensorflow::ClusterDef* cluster_def =
single_host_server_def.mutable_cluster();
tensorflow::JobDef* job_def = cluster_def->add_job();
job_def->set_name("client");
job_def->mutable_tasks()->insert(
{0, tensorflow::strings::StrCat(
"localhost:", tensorflow::testing::PickUnusedPortOrDie())});
tensorflow::JobDef* job_def2 = cluster_def->add_job();
job_def2->set_name("worker");
for (auto task : cluster_server_def.cluster().job(0).tasks()) {
if (task.first == task_index) {
job_def2->mutable_tasks()->insert({task.first, task.second});
}
}
return single_host_server_def;
}
tensorflow::ServerDef GetClusterServerDef(const string& worker_job_name,
int num_workers) {
tensorflow::ServerDef server_def = GetServerDef(worker_job_name, num_workers);
tensorflow::ClusterDef* cluster_def = server_def.mutable_cluster();
tensorflow::JobDef* job_def2 = cluster_def->add_job();
job_def2->set_name("client");
job_def2->mutable_tasks()->insert(
{0, tensorflow::strings::StrCat(
"localhost:", tensorflow::testing::PickUnusedPortOrDie())});
return server_def;
}
TEST(CAPI, SingleHostServerDefV1Works) {
tensorflow::ServerDef cluster_server_def = GetClusterServerDef("worker", 2);
tensorflow::ServerDef worker_1_server_def =
CreateSingleHostServerDef(cluster_server_def, 1);
worker_1_server_def.set_task_index(1);
worker_1_server_def.set_job_name("worker");
std::unique_ptr<tensorflow::GrpcServer> worker_server1;
ASSERT_TRUE(tensorflow::GrpcServer::Create(worker_1_server_def,
tensorflow::Env::Default(),
&worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
worker_1_server_def.set_task_index(0);
worker_1_server_def.set_job_name("client");
TFE_Context* local_ctx =
CreateContext(worker_1_server_def.SerializeAsString(),
false,
0);
const char remote_device[] = "/job:worker/replica:0/task:1/device:CPU:0";
TFE_TensorHandle* handle_0 =
CreateVariable(local_ctx, 1.2, remote_device, "var");
TF_Status* status = TF_NewStatus();
TFE_ContextAsyncWait(local_ctx, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(handle_0);
tensorflow::ServerDef worker_0_server_def =
CreateSingleHostServerDef(cluster_server_def, 0);
worker_0_server_def.set_task_index(0);
std::unique_ptr<tensorflow::GrpcServer> worker_server0;
ASSERT_TRUE(tensorflow::GrpcServer::Create(worker_0_server_def,
tensorflow::Env::Default(),
&worker_server0)
.ok());
ASSERT_TRUE(worker_server0->Start().ok());
cluster_server_def.set_task_index(0);
cluster_server_def.set_job_name("client");
TFE_Context* remote_ctx =
CreateContext(cluster_server_def.SerializeAsString(),
false,
0);
{
TFE_TensorHandle* var_handle =
CreateVarHandle(remote_ctx, remote_device, "var");
TFE_TensorHandle* handle_1 = nullptr;
int num_retvals = 1;
TF_Status* status = TF_NewStatus();
TFE_Op* op = TFE_NewOp(remote_ctx, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &handle_1, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(op);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(handle_1));
EXPECT_EQ(0, TFE_TensorHandleNumDims(handle_1, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(handle_1, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(1.2f, value);
TFE_DeleteTensorHandle(handle_1);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(var_handle);
}
TFE_DeleteContext(local_ctx);
TFE_DeleteContext(remote_ctx);
worker_server1.release();
worker_server0.release();
}
TEST(CAPI, SingleHostServerDefV2Works) {
tensorflow::ServerDef cluster_server_def = GetClusterServerDef("worker", 2);
cluster_server_def.set_task_index(0);
cluster_server_def.set_job_name("worker");
std::unique_ptr<tensorflow::GrpcServer> worker_server0;
ASSERT_TRUE(tensorflow::GrpcServer::Create(cluster_server_def,
tensorflow::Env::Default(),
&worker_server0)
.ok());
ASSERT_TRUE(worker_server0->Start().ok());
cluster_server_def.set_task_index(1);
cluster_server_def.set_job_name("worker");
std::unique_ptr<tensorflow::GrpcServer> worker_server1;
ASSERT_TRUE(tensorflow::GrpcServer::Create(cluster_server_def,
tensorflow::Env::Default(),
&worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
cluster_server_def.set_task_index(0);
cluster_server_def.set_job_name("client");
TFE_Context* ctx_with_cluster_server_def =
CreateContext(cluster_server_def.SerializeAsString(),
false,
0);
const char worker_1_device[] = "/job:worker/replica:0/task:1/device:CPU:0";
TFE_TensorHandle* handle_0 =
CreateVariable(ctx_with_cluster_server_def, 1.2, worker_1_device,
"var");
TF_Status* status = TF_NewStatus();
TFE_ContextAsyncWait(ctx_with_cluster_server_def, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(handle_0);
tensorflow::ServerDef worker_1_server_def =
CreateSingleHostServerDef(cluster_server_def, 1);
worker_1_server_def.set_task_index(0);
worker_1_server_def.set_job_name("client");
TFE_Context* ctx_with_worker_1_server_def =
CreateContext(worker_1_server_def.SerializeAsString(),
false,
0);
{
TFE_TensorHandle* var_handle = CreateVarHandle(
ctx_with_worker_1_server_def, worker_1_device, "var");
TFE_TensorHandle* handle_1 = nullptr;
int num_retvals = 1;
TF_Status* status = TF_NewStatus();
TFE_Op* op =
TFE_NewOp(ctx_with_worker_1_server_def, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &handle_1, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(op);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(handle_1));
EXPECT_EQ(0, TFE_TensorHandleNumDims(handle_1, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(handle_1, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(1.2f, value);
TFE_DeleteTensorHandle(handle_1);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(var_handle);
}
TFE_DeleteContext(ctx_with_worker_1_server_def);
TFE_DeleteContext(ctx_with_cluster_server_def);
worker_server1.release();
worker_server0.release();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9aff0930-974e-4a79-88e7-15f021c37799 | cpp | tensorflow/tensorflow | verifier | tensorflow/lite/core/tools/verifier.cc | tensorflow/lite/core/tools/verifier_test.cc | #include "tensorflow/lite/core/tools/verifier.h"
#include <stdarg.h>
#include <algorithm>
#include <climits>
#include <complex>
#include <cstdint>
#include <cstring>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/types/optional.h"
#include "flatbuffers/string.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/tools/verifier_internal.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
const char* NameOrEmptyString(const flatbuffers::String* str) {
if (str == nullptr || str->c_str() == nullptr) {
return "";
}
return str->c_str();
}
bool IsNullOrEmptyString(const flatbuffers::String* str) {
return strcmp(NameOrEmptyString(str), "") == 0;
}
void ReportError(ErrorReporter* error_reporter, const char* format, ...) {
if (error_reporter) {
va_list args;
va_start(args, format);
TF_LITE_REPORT_ERROR(error_reporter, format, args);
va_end(args);
}
}
uint32_t GetIntPtr(const char* ptr) {
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return flatbuffers::EndianScalar(*reinterpret_cast<const uint32_t*>(ptr));
#else
return *reinterpret_cast<const uint32_t*>(ptr);
#endif
}
const uint32_t kMaxNumString = UINT_MAX / sizeof(int32_t) - 2;
bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
ErrorReporter* error_reporter) {
uint32_t buffer_size = buffer.data()->size();
if (buffer_size < sizeof(uint32_t)) {
ReportError(error_reporter, "String tensor %s is invalid (empty)",
NameOrEmptyString(tensor.name()));
return false;
}
const char* buffer_ptr = reinterpret_cast<const char*>(buffer.data()->data());
uint32_t num_strings = GetIntPtr(buffer_ptr);
if (num_strings > kMaxNumString) {
ReportError(error_reporter,
"String tensor %s has invalid num of string set: %d",
NameOrEmptyString(tensor.name()), num_strings);
return false;
}
uint32_t header_offsets =
static_cast<uint32_t>(num_strings + 2) * sizeof(int32_t);
if (buffer_size < header_offsets) {
ReportError(error_reporter,
"String tensor %s buffer requires at least %d bytes, but is "
"allocated with %d bytes",
NameOrEmptyString(tensor.name()), header_offsets, buffer_size);
return false;
}
uint32_t prev_ptr = header_offsets;
uint32_t offset = sizeof(int32_t);
if (GetIntPtr(buffer_ptr + offset) != header_offsets) {
ReportError(error_reporter,
"String tensor %s buffer initial offset must be: %d",
NameOrEmptyString(tensor.name()), header_offsets);
return false;
}
offset += sizeof(int32_t);
for (int i = 1, end = num_strings; i <= end; i++, offset += sizeof(int32_t)) {
int string_offset = GetIntPtr(buffer_ptr + offset);
if (string_offset < static_cast<int>(prev_ptr) ||
string_offset > static_cast<int>(buffer_size)) {
ReportError(error_reporter,
"String tensor %s buffer is invalid: index %d",
NameOrEmptyString(tensor.name()), i);
return false;
}
}
if (GetIntPtr(buffer_ptr + offset - sizeof(int32_t)) != buffer_size) {
ReportError(error_reporter,
"String tensor %s buffer last offset must be %d",
NameOrEmptyString(tensor.name()), buffer_size);
return false;
}
return true;
}
bool CheckArraySegments(const DimensionMetadata* dim_metadata) {
if (dim_metadata->array_segments() == nullptr) {
return false;
}
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return (dim_metadata->array_segments_as_Int32Vector()->values() !=
nullptr);
case SparseIndexVector_Uint16Vector:
return (dim_metadata->array_segments_as_Uint16Vector()->values() !=
nullptr);
case SparseIndexVector_Uint8Vector:
return (dim_metadata->array_segments_as_Uint8Vector()->values() !=
nullptr);
default:
return false;
}
}
int GetSizeOfSegments(const DimensionMetadata* dim_metadata) {
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return dim_metadata->array_segments_as_Int32Vector()->values()->size();
case SparseIndexVector_Uint16Vector:
return dim_metadata->array_segments_as_Uint16Vector()->values()->size();
case SparseIndexVector_Uint8Vector:
return dim_metadata->array_segments_as_Uint8Vector()->values()->size();
default:
return -1;
}
}
int GetValueOfSegmentsAt(const DimensionMetadata* dim_metadata, const int i) {
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Int32Vector()->values()->Get(i));
case SparseIndexVector_Uint16Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Uint16Vector()->values()->Get(i));
case SparseIndexVector_Uint8Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Uint8Vector()->values()->Get(i));
default:
return -1;
}
}
bool CheckArrayIndices(const DimensionMetadata* dim_metadata) {
if (dim_metadata->array_indices() == nullptr) {
return false;
}
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return (dim_metadata->array_indices_as_Int32Vector()->values() !=
nullptr);
case SparseIndexVector_Uint16Vector:
return (dim_metadata->array_indices_as_Uint16Vector()->values() !=
nullptr);
case SparseIndexVector_Uint8Vector:
return (dim_metadata->array_indices_as_Uint8Vector()->values() !=
nullptr);
default:
return false;
}
}
int GetSizeOfIndices(const DimensionMetadata* dim_metadata) {
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return dim_metadata->array_indices_as_Int32Vector()->values()->size();
case SparseIndexVector_Uint16Vector:
return dim_metadata->array_indices_as_Uint16Vector()->values()->size();
case SparseIndexVector_Uint8Vector:
return dim_metadata->array_indices_as_Uint8Vector()->values()->size();
default:
return -1;
}
}
int GetValueOfIndicesAt(const DimensionMetadata* dim_metadata, const int i) {
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Int32Vector()->values()->Get(i));
case SparseIndexVector_Uint16Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Uint16Vector()->values()->Get(i));
case SparseIndexVector_Uint8Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Uint8Vector()->values()->Get(i));
default:
return -1;
}
return -1;
}
absl::optional<uint64_t> VerifyAndCountElements(
const SparsityParameters& sparsity, const std::vector<int>& dim_sizes) {
const int total_level = sparsity.traversal_order()->size();
uint64_t num_elements = 1;
for (int i = 0; i < total_level; i++) {
const int original_dim = sparsity.traversal_order()->Get(i);
const auto* dim_metadata = sparsity.dim_metadata()->Get(i);
if (dim_metadata->format() == DimensionType_DENSE) {
if (dim_metadata->dense_size() != dim_sizes[original_dim]) {
return absl::nullopt;
}
num_elements *= dim_metadata->dense_size();
} else {
if (!CheckArraySegments(dim_metadata) ||
!CheckArrayIndices(dim_metadata)) {
return absl::nullopt;
}
int array_segments_size = GetSizeOfSegments(dim_metadata);
int array_indices_size = GetSizeOfIndices(dim_metadata);
for (int j = 0; j < array_segments_size - 1; j++) {
if (GetValueOfSegmentsAt(dim_metadata, j) < 0 ||
GetValueOfSegmentsAt(dim_metadata, j + 1) < 0 ||
GetValueOfSegmentsAt(dim_metadata, j) >
GetValueOfSegmentsAt(dim_metadata, j + 1)) {
return absl::nullopt;
}
}
if (static_cast<int>(num_elements) != array_segments_size - 1) {
return absl::nullopt;
}
if (array_indices_size !=
GetValueOfSegmentsAt(dim_metadata, array_segments_size - 1)) {
return absl::nullopt;
}
for (int j = 0; j < array_indices_size; j++) {
if (GetValueOfIndicesAt(dim_metadata, j) < 0 ||
GetValueOfIndicesAt(dim_metadata, j) >= dim_sizes[original_dim]) {
return absl::nullopt;
}
}
num_elements = array_indices_size;
}
}
return num_elements;
}
absl::optional<uint64_t> VerifyAndCountSparseElements(const Tensor& tensor) {
const auto* sparsity = tensor.sparsity();
if (sparsity->traversal_order() == nullptr ||
sparsity->dim_metadata() == nullptr) {
return absl::nullopt;
}
const int total_dims = sparsity->traversal_order()->size();
const int original_rank = tensor.shape()->size();
const int sparsity_dim_metadata_size = sparsity->dim_metadata()->size();
if (total_dims < original_rank || sparsity_dim_metadata_size != total_dims) {
return absl::nullopt;
}
const int block_rank = total_dims - original_rank;
if (block_rank > 0) {
if (sparsity->block_map() == nullptr) {
return absl::nullopt;
}
const int sparse_rank = sparsity->block_map()->size();
if (sparse_rank != block_rank) {
return absl::nullopt;
}
}
std::vector<int> traversal_order(total_dims);
for (int i = 0; i < total_dims; i++) {
traversal_order[i] = sparsity->traversal_order()->Get(i);
}
std::sort(traversal_order.begin(), traversal_order.begin() + original_rank);
for (int i = 0; i < original_rank; i++) {
if (traversal_order[i] != i) {
return absl::nullopt;
}
}
std::sort(traversal_order.begin() + original_rank, traversal_order.end());
for (int i = original_rank; i < total_dims; i++) {
if (traversal_order[i] != i) {
return absl::nullopt;
}
}
std::vector<int> expanded_dim_sizes;
expanded_dim_sizes.resize(total_dims);
for (int i = 0; i < original_rank; i++) {
expanded_dim_sizes[i] = tensor.shape()->Get(i);
}
for (int i = 0; i < block_rank; i++) {
int original_block_dim =
sparsity->traversal_order()->Get(i + original_rank);
if (original_block_dim < 0 || original_block_dim >= total_dims) {
return absl::nullopt;
}
int block_dim_size =
sparsity->dim_metadata()->Get(i + original_rank)->dense_size();
if (block_dim_size <= 0) {
return absl::nullopt;
}
expanded_dim_sizes[original_block_dim] = block_dim_size;
int mapped_block_dim = sparsity->block_map()->Get(i);
if (mapped_block_dim < 0 || mapped_block_dim >= total_dims) {
return absl::nullopt;
}
expanded_dim_sizes[mapped_block_dim] /= block_dim_size;
}
return VerifyAndCountElements(*sparsity, expanded_dim_sizes);
}
bool VerifyNumericTensorBuffer(const Tensor& tensor, const Buffer& buffer,
ErrorReporter* error_reporter) {
uint64_t bytes_required = 1;
if (!tensor.shape()) {
return true;
}
if (tensor.sparsity() != nullptr) {
const auto num_elements = VerifyAndCountSparseElements(tensor);
if (!num_elements.has_value()) {
ReportError(error_reporter, "Tensor %s has invalid sparsity parameters",
NameOrEmptyString(tensor.name()));
return false;
}
bytes_required = num_elements.value();
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
} else {
for (int dim : *tensor.shape()) {
bytes_required *= dim;
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
}
}
switch (tensor.type()) {
case TensorType_FLOAT32:
bytes_required *= sizeof(float);
break;
case TensorType_FLOAT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_BFLOAT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_FLOAT64:
bytes_required *= sizeof(double);
break;
case TensorType_INT32:
bytes_required *= sizeof(int32_t);
break;
case TensorType_UINT32:
bytes_required *= sizeof(uint32_t);
break;
case TensorType_INT4:
bytes_required *= sizeof(int8_t);
break;
case TensorType_UINT8:
bytes_required *= sizeof(uint8_t);
break;
case TensorType_INT8:
bytes_required *= sizeof(int8_t);
break;
case TensorType_INT64:
bytes_required *= sizeof(int64_t);
break;
case TensorType_UINT64:
bytes_required *= sizeof(uint64_t);
break;
case TensorType_BOOL:
bytes_required *= sizeof(bool);
break;
case TensorType_INT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_UINT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_COMPLEX64:
bytes_required *= sizeof(std::complex<float>);
break;
case TensorType_COMPLEX128:
bytes_required *= sizeof(std::complex<double>);
break;
default:
ReportError(error_reporter, "Tensor %s invalid type: %d",
NameOrEmptyString(tensor.name()), tensor.type());
return false;
}
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
if (bytes_required != buffer.data()->size()) {
ReportError(
error_reporter,
"Tensor %s requires %d bytes, but is allocated with %d bytes buffer",
NameOrEmptyString(tensor.name()), bytes_required,
buffer.data()->size());
return false;
}
return true;
}
using flatbuffers::Offset;
using flatbuffers::Vector;
bool VerifyOperators(const Vector<Offset<Operator>>& operators,
ErrorReporter* error_reporter) {
for (const auto* op : operators) {
if (!op->inputs()) {
ReportError(error_reporter, "Missing 'inputs' for operator.");
return false;
}
if (!op->outputs()) {
ReportError(error_reporter, "Missing 'outputs' for operator.");
return false;
}
}
return true;
}
bool IsConstantTensor(const Tensor& tensor, const Model& model) {
if (!tensor.buffer() || !model.buffers()) return false;
if (tensor.buffer() > 0 && tensor.buffer() < model.buffers()->size()) {
auto* buffer = model.buffers()->Get(tensor.buffer());
if (buffer && buffer->data()) {
return true;
}
}
return false;
}
bool VerifySubGraphConsistency(const Model& model, const SubGraph& subgraph,
ErrorReporter* error_reporter) {
absl::flat_hash_set<int> subgraph_input_tensors, constant_tensors,
variable_tensors, output_tensors;
if (subgraph.tensors()) {
for (int i = 0, end = subgraph.tensors()->size(); i < end; ++i) {
const auto* tensor = subgraph.tensors()->Get(i);
if (IsConstantTensor(*tensor, model)) {
constant_tensors.insert(i);
} else if (tensor->is_variable()) {
variable_tensors.insert(i);
}
}
}
if (subgraph.inputs()) {
for (const int tensor_idx : *subgraph.inputs()) {
subgraph_input_tensors.insert(tensor_idx);
}
}
if (subgraph.operators()) {
for (int op_idx = 0, end = subgraph.operators()->size(); op_idx < end;
++op_idx) {
const auto* op = subgraph.operators()->Get(op_idx);
if (!model.operator_codes() ||
(op->opcode_index() >= model.operator_codes()->size())) {
ReportError(error_reporter,
"Operator %d does not exist in model op codes",
op->opcode_index());
return false;
}
const auto& opcode = model.operator_codes()->Get(op->opcode_index());
auto builtin_code = GetBuiltinCode(opcode);
for (const int input_idx : *op->inputs()) {
if (input_idx == kTfLiteOptionalTensor) continue;
if (constant_tensors.find(input_idx) == constant_tensors.end() &&
variable_tensors.find(input_idx) == variable_tensors.end() &&
subgraph_input_tensors.find(input_idx) ==
subgraph_input_tensors.end() &&
output_tensors.find(input_idx) == output_tensors.end()) {
ReportError(error_reporter,
"Input tensor %d to op %d (%s) is not produced",
input_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
}
}
for (const int output_idx : *op->outputs()) {
if (constant_tensors.find(output_idx) != constant_tensors.end()) {
ReportError(
error_reporter, "Output tensor %d to op %d (%s) is a constant",
output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
} else if (variable_tensors.find(output_idx) !=
variable_tensors.end()) {
ReportError(
error_reporter, "Output tensor %d to op %d (%s) is a variable",
output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
} else if (subgraph_input_tensors.find(output_idx) !=
subgraph_input_tensors.end()) {
ReportError(error_reporter,
"Output tensor %d to op %d (%s) is a subgraph input",
output_idx, op_idx,
EnumNameBuiltinOperator(builtin_code));
return false;
} else if (output_tensors.find(output_idx) != output_tensors.end()) {
ReportError(error_reporter,
"Output tensor %d to op %d (%s) is an output from "
"another op. There is a cycle in the graph",
output_idx, op_idx,
EnumNameBuiltinOperator(builtin_code));
return false;
}
output_tensors.insert(output_idx);
}
}
}
return true;
}
bool VerifySubGraphs(const Model& model, ErrorReporter* error_reporter) {
if (!model.subgraphs()) {
ReportError(error_reporter, "Missing 'subgraphs' section.");
return false;
}
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->operators()) {
ReportError(error_reporter, "Missing 'operators' section in subgraph.");
return false;
}
if (!VerifyOperators(*subgraph->operators(), error_reporter)) {
return false;
}
if (!VerifySubGraphConsistency(model, *subgraph, error_reporter)) {
return false;
}
}
return true;
}
bool VerifyTensors(const Model& model, ErrorReporter* error_reporter) {
if (!model.subgraphs()) {
return true;
}
if (!model.buffers()) {
ReportError(error_reporter, "Missing 'buffers' section.");
return false;
}
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->tensors()) {
continue;
}
for (const auto* tensor : *subgraph->tensors()) {
if (!tensor->buffer()) {
continue;
}
if (tensor->buffer() >= model.buffers()->size()) {
ReportError(error_reporter, "Tensor %s invalid buffer index: %d",
NameOrEmptyString(tensor->name()), tensor->buffer());
return false;
}
auto* buffer = model.buffers()->Get(tensor->buffer());
if (!buffer) {
ReportError(error_reporter, "Tensor %s buffer %d not set",
NameOrEmptyString(tensor->name()), tensor->buffer());
return false;
}
if (buffer->data()) {
if (tensor->type() == TensorType_STRING) {
if (!VerifyStringTensorBuffer(*tensor, *buffer, error_reporter)) {
return false;
}
} else {
if (!VerifyNumericTensorBuffer(*tensor, *buffer, error_reporter)) {
return false;
}
}
}
}
}
return true;
}
bool VerifyOps(const Model& model, const OpResolver& resolver,
ErrorReporter* error_reporter) {
if (!model.operator_codes()) {
return true;
}
absl::flat_hash_set<int> regular_code_indices;
absl::flat_hash_set<int> validation_code_indices;
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->operators()) {
continue;
}
if (subgraph->name() && IsValidationSubgraph(subgraph->name()->c_str())) {
for (const auto& op : *(subgraph->operators())) {
validation_code_indices.insert(op->opcode_index());
}
} else {
for (const auto* op : *(subgraph->operators())) {
regular_code_indices.insert(op->opcode_index());
}
}
}
for (int i = 0; i < model.operator_codes()->size(); i++) {
const auto* opcode = model.operator_codes()->Get(i);
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code < BuiltinOperator_MIN ||
builtin_code > BuiltinOperator_MAX) {
ReportError(error_reporter, "Operator id '%d' is out of range.",
builtin_code);
return false;
}
if (builtin_code == BuiltinOperator_CUSTOM) {
if (IsNullOrEmptyString(opcode->custom_code())) {
ReportError(error_reporter,
"Invalid custom op name, cannot be null/empty.");
return false;
} else if (!resolver.FindOp(opcode->custom_code()->c_str(),
opcode->version())) {
if (regular_code_indices.contains(i) ||
!validation_code_indices.contains(i)) {
ReportError(error_reporter, "Unsupported custom op: %s, version: %d",
opcode->custom_code()->c_str(), opcode->version());
return false;
}
}
} else {
if (!resolver.FindOp(builtin_code, opcode->version())) {
ReportError(error_reporter, "Unsupported builtin op: %s, version: %d",
EnumNameBuiltinOperator(builtin_code), opcode->version());
return false;
}
}
}
return true;
}
bool VerifyModel(const Model* model, ErrorReporter* error_reporter) {
if (model == nullptr) {
ReportError(error_reporter, "Invalid flatbuffer format");
return false;
}
if (model->version() != TFLITE_SCHEMA_VERSION) {
ReportError(error_reporter, "Invalid model version %d", model->version());
return false;
}
if (!VerifySubGraphs(*model, error_reporter)) {
return false;
}
if (!VerifyTensors(*model, error_reporter)) {
return false;
}
return true;
}
}
bool Verify(const void* buf, size_t len, ErrorReporter* error_reporter) {
const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
return VerifyModel(model, error_reporter);
}
bool Verify(const void* buf, size_t len, const OpResolver& resolver,
ErrorReporter* error_reporter) {
const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
if (!VerifyModel(model, error_reporter)) {
return false;
}
if (!VerifyOps(*model, resolver, error_reporter)) {
return false;
}
return true;
}
} | #include "tensorflow/lite/core/tools/verifier.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/error_reporter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/util.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
static const char* kSparseTensorTestModel =
"tensorflow/lite/testdata/sparse_tensor.bin";
}
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ = vsnprintf(buffer_, kBufferSize, format, args);
return buffer_size_;
}
int GetBufferSize() { return buffer_size_; }
string GetAsString() const { return string(buffer_, buffer_size_); }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
class TfLiteFlatbufferModelBuilder {
public:
TfLiteFlatbufferModelBuilder() {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
}
TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
const std::vector<std::string>& custom_ops) {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
for (const auto& iter : builtin_ops) {
resolver_.AddBuiltin(iter, &fake_op_);
}
for (const auto& iter : custom_ops) {
resolver_.AddCustom(iter.data(), &fake_op_);
}
}
void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
const std::vector<uint8_t>& buffer, const char* name,
const bool is_variable = false) {
int buffer_index = 0;
if (!buffer.empty()) {
buffer_index = buffers_.size();
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
}
if (shape.empty()) {
tensors_.push_back(CreateTensorDirect(builder_, nullptr, type,
buffer_index, name,
0, is_variable));
return;
}
tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
name, 0,
is_variable));
}
void AddOperator(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
tflite::BuiltinOperator builtin_op, const char* custom_op) {
operator_codes_.push_back(
CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
operators_.push_back(CreateOperator(
builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), BuiltinOptions_NONE,
0,
0, tflite::CustomOptionsFormat_FLEXBUFFERS));
}
enum BuilderMode {
kBuilderModeEmptyVectorIsEmpty,
kBuilderModeEmptyVectorIsNull,
kBuilderModeDefault = kBuilderModeEmptyVectorIsEmpty,
};
void FinishModel(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
BuilderMode mode = kBuilderModeDefault) {
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>({CreateSubGraph(
builder_, CreateVector(tensors_, mode), CreateVector(inputs, mode),
CreateVector(outputs, mode), CreateVector(operators_, mode),
builder_.CreateString("test_subgraph"))});
auto result = CreateModel(
builder_, TFLITE_SCHEMA_VERSION, CreateVector(operator_codes_, mode),
CreateVector(subgraph, mode), builder_.CreateString("test_model"),
CreateVector(buffers_, mode));
tflite::FinishModelBuffer(builder_, result);
}
bool Verify() {
return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
&mock_reporter_);
}
bool VerifyWithOpResolver() {
return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
resolver_, &mock_reporter_);
}
string GetErrorString() { return mock_reporter_.GetAsString(); }
private:
template <typename T>
flatbuffers::Offset<flatbuffers::Vector<T>> CreateVector(
const std::vector<T>& v, BuilderMode mode) {
if (mode == kBuilderModeEmptyVectorIsNull && v.empty()) {
return 0;
}
return builder_.CreateVector(v);
}
flatbuffers::FlatBufferBuilder builder_;
MutableOpResolver resolver_;
TfLiteRegistration fake_op_{};
MockErrorReporter mock_reporter_;
std::vector<flatbuffers::Offset<Operator>> operators_;
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes_;
std::vector<flatbuffers::Offset<Tensor>> tensors_;
std::vector<flatbuffers::Offset<Buffer>> buffers_;
};
TEST(VerifyModel, TestEmptyModel) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0, 0,
0, 0);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Missing 'subgraphs' section."));
}
TEST(VerifyModel, TestEmptyVector) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor({}, TensorType_UINT8, {}, "empty_vector");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {3});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
}
TEST(VerifyModel, TestSimpleModel) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TestNullTensors) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.FinishModel(
{}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(builder.GetErrorString(),
"Input tensor 0 to op 0 (CUSTOM) is not produced");
}
TEST(VerifyModel, TestNullOperators) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.FinishModel(
{0, 1}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Missing 'operators' section in subgraph"));
}
TEST(VerifyModel, TestNullInputs) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel(
{}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TestCorruptedData) {
std::string model = "123";
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(model.data(), model.size(), MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Invalid flatbuffer format"));
}
TEST(VerifyModel, TestUnsupportedVersion) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, 1, 0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Invalid model version 1"));
}
TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
builder.GetSize());
for (size_t i = 0; i < model_content.size(); i++) {
model_content[i] = (model_content[i] + 137) % 255;
EXPECT_FALSE(Verify(model_content.data(), model_content.size(),
MutableOpResolver{}, DefaultErrorReporter()))
<< "Fail at position: " << i;
}
}
TEST(VerifyModel, TestIntTensorShapeIsGreaterThanBuffer) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires 6 bytes, but is "
"allocated with 4 bytes buffer"));
}
TEST(VerifyModel, TestIntTensorShapeIsSmallerThanBuffer) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2, 1}, TensorType_UINT8, {1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires 2 bytes, but is "
"allocated with 4 bytes buffer"));
}
TEST(VerifyModel, TestIntTensorShapeOverflow) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({1024, 2048, 4096}, TensorType_UINT8, {1, 2, 3, 4},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input dimension overflow"));
}
TEST(VerifyModel, TensorBufferIsNotValid) {
flatbuffers::FlatBufferBuilder builder;
std::vector<int> shape = {2, 3};
auto tensors = builder.CreateVector(std::vector<flatbuffers::Offset<Tensor>>{
CreateTensorDirect(builder, &shape, TensorType_INT32, 2,
"input", 0)});
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>(
{CreateSubGraph(builder, tensors, 0, 0,
0, builder.CreateString("Main"))});
auto buffers = builder.CreateVector(std::vector<flatbuffers::Offset<Buffer>>{
CreateBuffer(builder, builder.CreateVector(
std::vector<uint8_t>{1, 2, 3, 4, 5, 6})),
});
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, 0,
builder.CreateVector(subgraph),
builder.CreateString("SmartReply"), buffers);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(
mock_reporter.GetAsString(),
::testing::ContainsRegex("Missing 'operators' section in subgraph."));
}
TEST(VerifyModel, StringTensorIsEmpty) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2}, TensorType_STRING, {0x00}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(builder.GetErrorString(), "String tensor input is invalid (empty)");
}
TEST(VerifyModel, StringTensorHasInvalidNumString) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{0x00, 0x00, 0x00, 0x20, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer requires at least -2147483640 bytes, "
"but is allocated with 18 bytes"));
}
TEST(VerifyModel, StringTensorOffsetTooSmall) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 12, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer initial offset must be: 16"));
}
TEST(VerifyModel, StringTensorOffsetOutOfRange) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 22, 0, 0, 0, 'A', 'B'}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer is invalid: index 2"));
}
TEST(VerifyModel, StringTensorIsLargerThanRequired) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B', 'C'},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer last offset must be 19"));
}
TEST(VerifyModel, AllOpsAreSupported) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output2");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "CustomOp");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, UseUnsupportedBuiltinOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_SUB}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Unsupported builtin op: ADD, version: 1"));
}
TEST(VerifyModel, UseUnsupportedCustomOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "Not supported");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"Unsupported custom op: Not supported, version: 1"));
}
TEST(VerifyModel, UseUnnamedCustomOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"Invalid custom op name, cannot be null/empty."));
}
TEST(VerifyModel, UnpopulatedInputToOp) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({1, 2}, {3}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor({2, 3}, TensorType_UINT8, {}, "invalid_input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 2}, {3});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Input tensor 1 to op 0 (CUSTOM) is not produced",
builder.GetErrorString());
}
TEST(VerifyModel, MultipleOpsOutputToSameTensor) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "CustomOp");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(
"Output tensor 2 to op 1 (CUSTOM) is an output from another op. "
"There is a cycle in the graph",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsAConstantTensor) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {1, 2, 3, 4, 5, 6}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a constant",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsSubgraphInput) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1, 2}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a subgraph input",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsAVariable) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output", true);
builder.FinishModel({0, 1}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a variable",
builder.GetErrorString());
}
TEST(VerifyModel, OpWithOptionalTensor) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({kTfLiteOptionalTensor, 0, 1}, {2},
BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TypedTensorShapeMismatchWithTensorBufferSize) {
TfLiteFlatbufferModelBuilder builder;
for (int tensor_type = TensorType_MIN; tensor_type <= TensorType_MAX;
++tensor_type) {
if (tensor_type == TensorType_STRING) continue;
builder.AddTensor({2, 3}, static_cast<TensorType>(tensor_type),
{1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires .* bytes, but is "
"allocated with 4 bytes buffer"));
}
}
TEST(VerifyModel, TypedTensorShapeMatchesTensorBufferSize) {
TfLiteFlatbufferModelBuilder builder;
for (int tensor_type = TensorType_MIN; tensor_type <= TensorType_MAX;
++tensor_type) {
if (tensor_type == TensorType_STRING ||
tensor_type == TensorType_RESOURCE || tensor_type == TensorType_VARIANT)
continue;
TfLiteType lite_type = kTfLiteNoType;
ASSERT_EQ(ConvertTensorType(static_cast<TensorType>(tensor_type),
&lite_type, nullptr),
kTfLiteOk);
size_t size_bytes = 0;
ASSERT_EQ(GetSizeOfType(nullptr, lite_type, &size_bytes),
kTfLiteOk);
std::vector<uint8_t> buffer(size_bytes);
builder.AddTensor({1}, static_cast<TensorType>(tensor_type), buffer,
"input");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
}
}
TEST(VerifyModel, SimpleValidSparseTensor) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_TRUE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_TRUE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
}
TEST(VerifyModel, InvalidSparseTensorMissingBlockMap) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->block_map = {};
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, InvalidSparseTensorIndexOutOfBound) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->dim_metadata[1]->array_indices.AsUint8Vector()->values[1] =
5;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, InvalidSparseTensorInvalidBuffer) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
scoped_model->buffers[1]->data = {0, 1, 2, 3, 4, 5, 6, 7};
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex(
"requires 12 bytes, but is allocated with 8 bytes buffer"));
}
TEST(VerifyModel, InvalidSparseTensorInvalidTraversalOrder) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->traversal_order[0] = 10;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, ValidSparseTensorBCSC) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->traversal_order = {1, 0, 3, 2};
tensor->sparsity->block_map = {0, 1};
tensor->sparsity->dim_metadata[0]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[0]->dense_size = 2;
tensor->sparsity->dim_metadata[1]->format = DimensionType_SPARSE_CSR;
tensor->sparsity->dim_metadata[1]->array_segments.AsUint8Vector()->values = {
0, 1, 3};
tensor->sparsity->dim_metadata[1]->array_indices.AsUint8Vector()->values = {
0, 0, 1};
tensor->sparsity->dim_metadata[2]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[2]->dense_size = 2;
tensor->sparsity->dim_metadata[3]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[3]->dense_size = 2;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_TRUE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_TRUE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
111cb924-fbfa-4fec-8fee-aef598b480b7 | cpp | tensorflow/tensorflow | verifier_internal | tensorflow/lite/core/tools/verifier_internal.cc | tensorflow/lite/core/tools/verifier_internal_test.cc | #include "tensorflow/lite/core/tools/verifier_internal.h"
#include <stddef.h>
#include <stdint.h>
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace internal {
const Model* VerifyFlatBufferAndGetModel(const void* buf, size_t len) {
::flatbuffers::Verifier verifier(static_cast<const uint8_t*>(buf), len);
if (VerifyModelBuffer(verifier)) {
return ::tflite::GetModel(buf);
} else {
return nullptr;
}
}
}
} | #include "tensorflow/lite/core/tools/verifier_internal.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
class TfLiteFlatbufferModelBuilder {
public:
TfLiteFlatbufferModelBuilder() {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
}
TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
const std::vector<std::string>& custom_ops) {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
for (const auto& iter : builtin_ops) {
resolver_.AddBuiltin(iter, &fake_op_);
}
for (const auto& iter : custom_ops) {
resolver_.AddCustom(iter.data(), &fake_op_);
}
}
void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
const std::vector<uint8_t>& buffer, const char* name,
const bool is_variable = false) {
int buffer_index = 0;
if (!buffer.empty()) {
buffer_index = buffers_.size();
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
}
if (shape.empty()) {
tensors_.push_back(CreateTensorDirect(builder_, nullptr, type,
buffer_index, name,
0, is_variable));
return;
}
tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
name, 0,
is_variable));
}
void AddOperator(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
tflite::BuiltinOperator builtin_op, const char* custom_op) {
operator_codes_.push_back(
CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
operators_.push_back(CreateOperator(
builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), BuiltinOptions_NONE,
0,
0, tflite::CustomOptionsFormat_FLEXBUFFERS));
}
enum BuilderMode {
kBuilderModeEmptyVectorIsEmpty,
kBuilderModeEmptyVectorIsNull,
kBuilderModeDefault = kBuilderModeEmptyVectorIsEmpty,
};
void FinishModel(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
BuilderMode mode = kBuilderModeDefault) {
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>({CreateSubGraph(
builder_, CreateVector(tensors_, mode), CreateVector(inputs, mode),
CreateVector(outputs, mode), CreateVector(operators_, mode),
builder_.CreateString("test_subgraph"))});
auto result = CreateModel(
builder_, TFLITE_SCHEMA_VERSION, CreateVector(operator_codes_, mode),
CreateVector(subgraph, mode), builder_.CreateString("test_model"),
CreateVector(buffers_, mode));
tflite::FinishModelBuffer(builder_, result);
}
bool Verify(const void* buf, size_t length) {
return tflite::internal::VerifyFlatBufferAndGetModel(buf, length);
}
bool Verify() {
return Verify(builder_.GetBufferPointer(), builder_.GetSize());
}
private:
template <typename T>
flatbuffers::Offset<flatbuffers::Vector<T>> CreateVector(
const std::vector<T>& v, BuilderMode mode) {
if (mode == kBuilderModeEmptyVectorIsNull && v.empty()) {
return 0;
}
return builder_.CreateVector(v);
}
flatbuffers::FlatBufferBuilder builder_;
MutableOpResolver resolver_;
TfLiteRegistration fake_op_{};
std::vector<flatbuffers::Offset<Operator>> operators_;
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes_;
std::vector<flatbuffers::Offset<Tensor>> tensors_;
std::vector<flatbuffers::Offset<Buffer>> buffers_;
};
TEST(VerifyModel, TestEmptyModel) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0, 0,
0, 0);
::tflite::FinishModelBuffer(builder, model);
ASSERT_TRUE(::tflite::internal::VerifyFlatBufferAndGetModel(
builder.GetBufferPointer(), builder.GetSize()));
}
TEST(VerifyModel, TestSimpleModel) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
}
TEST(VerifyModel, TestCorruptedData) {
std::string model = "123";
ASSERT_FALSE(::tflite::internal::VerifyFlatBufferAndGetModel(model.data(),
model.size()));
}
TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
builder.GetSize());
for (size_t i = 0; i < model_content.size(); i++) {
model_content[i] = (model_content[i] + 137) % 255;
EXPECT_FALSE(tflite::internal::VerifyFlatBufferAndGetModel(
model_content.data(), model_content.size()))
<< "Fail at position: " << i;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7e011ad8-1944-4d60-9a4c-0f9cc292d713 | cpp | tensorflow/tensorflow | async_subgraph | tensorflow/lite/core/async/async_subgraph.cc | tensorflow/lite/core/async/async_subgraph_test.cc | #include "tensorflow/lite/core/async/async_subgraph.h"
#include <vector>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace async {
namespace {
TfLiteAsyncKernel* GetAsyncKernel(TfLiteContext* context,
const TfLiteRegistration& op_reg,
TfLiteNode& node) {
if (op_reg.registration_external) {
auto* context_ = reinterpret_cast<TfLiteOpaqueContext*>(context);
auto* node_ = reinterpret_cast<TfLiteOpaqueNode*>(&node);
if (op_reg.registration_external->async_kernel_with_data) {
auto user_data = op_reg.registration_external->user_data;
return op_reg.registration_external->async_kernel_with_data(
user_data, context_, node_);
} else if (op_reg.registration_external->async_kernel) {
return op_reg.registration_external->async_kernel(context_, node_);
}
}
if (op_reg.async_kernel) {
return op_reg.async_kernel(context, &node);
}
return nullptr;
}
}
Subgraph* AsyncSubgraph::subgraph() const { return subgraph_; }
TfLiteContext* AsyncSubgraph::context() const { return subgraph_->context(); }
TfLiteOpaqueContext* AsyncSubgraph::opaque_context() const {
return reinterpret_cast<TfLiteOpaqueContext*>(context());
}
TfLiteAsyncKernel* AsyncSubgraph::async_kernel() const { return async_kernel_; }
AsyncSubgraph::AsyncSubgraph(Subgraph* subgraph) : subgraph_(subgraph) {
if (!IsFullyDelegated()) {
subgraph->ReportError("Model is not fully delegated by 1 backend.");
return;
}
auto node_index = subgraph_->execution_plan()[0];
TfLiteNode& node = subgraph_->nodes_and_registration_[node_index].first;
const TfLiteRegistration& registration =
subgraph_->nodes_and_registration_[node_index].second;
async_kernel_ = GetAsyncKernel(context(), registration, node);
if (!async_kernel_) {
subgraph->ReportError("Backend does not support asynchronous execution.");
return;
}
opaque_node_ =
reinterpret_cast<TfLiteOpaqueNode*>(const_cast<TfLiteNode*>(&node));
#define POPULATE_VECTOR(io_type, accessor, dest) \
{ \
const char* const* types = nullptr; \
size_t n_types = 0; \
(*async_kernel_->accessor)(async_kernel_, io_type, &types, &n_types); \
dest[io_type] = std::vector<const char*>(types, types + n_types); \
}
POPULATE_VECTOR(kTfLiteIoTypeInput, supported_buffer_types,
supported_buffer_types_);
POPULATE_VECTOR(kTfLiteIoTypeOutput, supported_buffer_types,
supported_buffer_types_);
POPULATE_VECTOR(kTfLiteIoTypeInput, supported_synchronizations,
supported_synchronizations_);
POPULATE_VECTOR(kTfLiteIoTypeOutput, supported_synchronizations,
supported_synchronizations_);
#undef POPULATE_VECTOR
}
bool AsyncSubgraph::IsFullyDelegated() const {
if (subgraph_->execution_plan().size() != 1) return false;
const TfLiteNode& node =
subgraph_->nodes_and_registration()[subgraph_->execution_plan()[0]].first;
if (node.delegate == nullptr) return false;
return true;
}
TfLiteStatus AsyncSubgraph::RegisterBuffer(TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (buffer == nullptr || attrs == nullptr || handle == nullptr ||
async_kernel() == nullptr) {
return kTfLiteError;
}
*handle = next_buffer_handle_.fetch_add(1, std::memory_order_relaxed);
return (*async_kernel_->register_buffer)(
async_kernel_, reinterpret_cast<TfLiteOpaqueContext*>(context()), io_type,
buffer, attrs, *handle);
}
TfLiteStatus AsyncSubgraph::RegisterBufferSlice(TfLiteBufferHandle buffer_pool,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (attrs == nullptr || handle == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
*handle = next_buffer_handle_.fetch_add(1, std::memory_order_relaxed);
return (*async_kernel_->register_buffer_slice)(
async_kernel_, opaque_context(), buffer_pool, attrs, *handle);
}
TfLiteStatus AsyncSubgraph::UnregisterBuffer(TfLiteBufferHandle handle) {
if (async_kernel() == nullptr) return kTfLiteError;
return (*async_kernel_->unregister_buffer)(async_kernel_, opaque_context(),
handle);
}
const std::vector<const char*>& AsyncSubgraph::SupportedBufferTypes(
TfLiteIoType io_type) const {
return supported_buffer_types_.at(io_type);
}
const std::vector<const char*>& AsyncSubgraph::SupportedSynchronizations(
TfLiteIoType io_type) const {
return supported_synchronizations_.at(io_type);
}
bool AsyncSubgraph::ReconcileRestrictions(
int tensor_index, const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) const {
if (user_provided_attributes == nullptr || merged == nullptr ||
async_kernel() == nullptr) {
return false;
}
if (tensor_index < 0 || tensor_index >= subgraph_->tensors_size()) {
return false;
}
return (*async_kernel_->reconcile_restrictions)(
async_kernel_, opaque_context(), opaque_node_, tensor_index,
user_provided_attributes, merged, conflict);
}
TfLiteStatus AsyncSubgraph::SetAttributes(int tensor_index,
const TfLiteAttributeMap* attrs) {
if (attrs == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
if (tensor_index < 0 || tensor_index >= subgraph_->tensors_size()) {
return kTfLiteError;
}
return (*async_kernel_->set_attributes)(async_kernel_, opaque_context(),
opaque_node_, tensor_index, attrs);
}
TfLiteStatus AsyncSubgraph::SetBufferAttributes(
const TfLiteBackendBuffer* buffer, const TfLiteAttributeMap* attrs) {
return (*async_kernel_->set_buffer_attributes)(async_kernel_, buffer, attrs);
}
TfLiteStatus AsyncSubgraph::GetBufferAttributes(
const TfLiteBackendBuffer* buffer, TfLiteAttributeMap* attrs) {
return (*async_kernel_->get_buffer_attributes)(async_kernel_, buffer, attrs);
}
TfLiteStatus AsyncSubgraph::Prepare() {
if (async_kernel() == nullptr) return kTfLiteError;
return (*async_kernel_->prepare)(async_kernel_, opaque_context(),
opaque_node_);
}
TfLiteExecutionTask* AsyncSubgraph::CreateTask() {
return new TfLiteExecutionTask;
}
TfLiteStatus AsyncSubgraph::InvokeAsync(TfLiteExecutionTask* task) {
if (task == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
if (task->task->SetScheduled(true)) {
TFLITE_LOG(tflite::TFLITE_LOG_ERROR,
"The task has already been scheduled for execution.");
return kTfLiteError;
}
auto ret = (*async_kernel_->eval)(async_kernel_, opaque_context(),
opaque_node_, task);
task->task->SetStatus(ret);
return ret;
}
TfLiteStatus AsyncSubgraph::Wait(TfLiteExecutionTask* task) {
if (task == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
if (!task->task->Scheduled()) {
return task->task->Status();
}
auto ret = (*async_kernel_->wait)(async_kernel_, opaque_context(), task);
task->task->SetStatus(ret);
task->task->SetScheduled(false);
return ret;
}
TfLiteStatus AsyncSubgraph::Finish(TfLiteExecutionTask* task) {
if (async_kernel() == nullptr) return kTfLiteError;
auto ret = (*async_kernel_->finish)(async_kernel_, opaque_context(), task);
if (ret != kTfLiteOk) {
subgraph_->ReportError("Failed to finish task.");
}
delete task;
return ret;
}
}
} | #include "tensorflow/lite/core/async/async_subgraph.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/backend_async_kernel_interface.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
#include "tensorflow/lite/core/async/testing/test_backend.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
using ::testing::_;
namespace tflite {
namespace async {
class AsyncSubgraphTestPeer {
public:
explicit AsyncSubgraphTestPeer(AsyncSubgraph* subgraph)
: subgraph_(subgraph) {}
bool IsFullyDelegated() const { return subgraph_->IsFullyDelegated(); }
private:
AsyncSubgraph* subgraph_;
};
class AsyncSubgraphTest : public ::testing::Test {
protected:
void SetUp() override {
kernel_ = std::make_unique<testing::MockAsyncKernel>();
backend_ = std::make_unique<testing::TestBackend>(kernel_->kernel());
interpreter_ = std::make_unique<Interpreter>();
interpreter_->AddTensors(5);
interpreter_->SetInputs({0, 1});
interpreter_->SetOutputs({3, 4});
TfLiteQuantizationParams quant;
interpreter_->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(4, kTfLiteFloat32, "", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
void* builtin_data_2 = malloc(sizeof(int));
void* builtin_data_3 = malloc(sizeof(int));
interpreter_->AddNodeWithParameters({0, 0}, {2}, nullptr, 0, builtin_data_1,
reg);
interpreter_->AddNodeWithParameters({1, 1}, {3}, nullptr, 0, builtin_data_2,
reg);
interpreter_->AddNodeWithParameters({2, 1}, {4}, nullptr, 0, builtin_data_3,
reg);
}
void BuildAsyncSubgraph() {
interpreter_->ModifyGraphWithDelegate(backend_->get_delegate());
subgraph_ = std::make_unique<AsyncSubgraph>(interpreter_->subgraph(0));
}
void TearDown() override { subgraph_.reset(); }
protected:
std::unique_ptr<testing::MockAsyncKernel> kernel_;
std::unique_ptr<testing::TestBackend> backend_;
std::unique_ptr<Interpreter> interpreter_;
std::unique_ptr<AsyncSubgraph> subgraph_;
};
TEST_F(AsyncSubgraphTest, FullyDelegated) {
BuildAsyncSubgraph();
EXPECT_TRUE(AsyncSubgraphTestPeer(subgraph_.get()).IsFullyDelegated());
}
TEST_F(AsyncSubgraphTest, NotFullyDelegated) {
backend_->SetMinPartitionedNodes(42);
BuildAsyncSubgraph();
EXPECT_FALSE(AsyncSubgraphTestPeer(subgraph_.get()).IsFullyDelegated());
}
TEST_F(AsyncSubgraphTest, BasicTest) {
BuildAsyncSubgraph();
EXPECT_CALL(*kernel_, RegisterBuffer(_, _, _, _, _));
EXPECT_CALL(*kernel_, RegisterBufferSlice(_, _, _, _));
EXPECT_CALL(*kernel_, UnregisterBuffer(_, _));
EXPECT_CALL(*kernel_, ReconcileRestrictions(_, _, _, _, _, _));
EXPECT_CALL(*kernel_, SetAttributes(_, _, _, _));
EXPECT_CALL(*kernel_, Prepare(_, _));
EXPECT_CALL(*kernel_, Eval(_, _, _));
EXPECT_CALL(*kernel_, Wait(_, _));
EXPECT_CALL(*kernel_, Finish(_, _));
auto* buffer = TfLiteBackendBufferCreate();
auto* attrs = new TfLiteAttributeMap(kTfLiteAttrMapTypeBuffer);
TfLiteBufferHandle handle = 1;
TfLiteBufferHandle another_handle = 1;
auto* task = new TfLiteExecutionTask;
EXPECT_FALSE(task->task->Scheduled());
subgraph_->RegisterBuffer(kTfLiteIoTypeInput, buffer, attrs, &handle);
subgraph_->RegisterBufferSlice(handle, attrs, &another_handle);
subgraph_->UnregisterBuffer(handle);
subgraph_->ReconcileRestrictions(0, attrs, attrs, attrs);
subgraph_->SetAttributes(0, attrs);
subgraph_->Prepare();
EXPECT_EQ(kTfLiteOk, subgraph_->InvokeAsync(task));
EXPECT_TRUE(task->task->Scheduled());
EXPECT_EQ(kTfLiteError, subgraph_->InvokeAsync(task));
EXPECT_TRUE(task->task->Scheduled());
EXPECT_EQ(kTfLiteOk, task->task->Status());
EXPECT_EQ(kTfLiteOk, subgraph_->Wait(task));
task->task->SetStatus(kTfLiteError);
EXPECT_EQ(kTfLiteError, subgraph_->Wait(task));
EXPECT_EQ(kTfLiteError, subgraph_->Wait(task));
EXPECT_FALSE(task->task->Scheduled());
subgraph_->Finish(task);
TfLiteBackendBufferDelete(buffer);
delete attrs;
EXPECT_NE(handle, another_handle);
}
TEST_F(AsyncSubgraphTest, OutOfBoundTest) {
BuildAsyncSubgraph();
auto* attrs = new TfLiteAttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(subgraph_->ReconcileRestrictions(42, attrs, attrs, attrs));
EXPECT_EQ(kTfLiteError, subgraph_->SetAttributes(42, attrs));
delete attrs;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/async_subgraph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/async_subgraph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
831eba5a-1e8a-4c10-b485-c8cc4caf6274 | cpp | tensorflow/tensorflow | task_internal | tensorflow/lite/core/async/task_internal.cc | tensorflow/lite/core/async/task_internal_test.cc | #include "tensorflow/lite/core/async/task_internal.h"
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace async {
bool ExecutionTask::GetTensorIdx(TfLiteIoType io_type, const char* name,
int* idx) const {
const std::map<std::string, uint32_t>* map = nullptr;
if (io_type == kTfLiteIoTypeInput) {
map = input_name_to_idx_;
} else {
map = output_name_to_idx_;
}
if (!map) return false;
if (auto it_idx = map->find(name); it_idx != map->end()) {
*idx = it_idx->second;
return true;
}
return false;
}
TfLiteBufferHandle ExecutionTask::GetBufferHandle(TfLiteIoType io_type,
const char* name) const {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteNullBufferHandle;
}
return GetBufferHandle(index);
}
TfLiteBufferHandle ExecutionTask::GetBufferHandle(int tensor_index) const {
if (auto it = io_data_.find(tensor_index); it != io_data_.end()) {
return it->second.buf;
}
return kTfLiteNullBufferHandle;
}
TfLiteStatus ExecutionTask::SetBufferHandle(TfLiteIoType io_type,
const char* name,
TfLiteBufferHandle handle) {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteError;
}
return SetBufferHandle(index, handle);
}
TfLiteStatus ExecutionTask::SetBufferHandle(int tensor_index,
TfLiteBufferHandle handle) {
io_data_[tensor_index].buf = handle;
return kTfLiteOk;
}
TfLiteSynchronization* ExecutionTask::GetSynchronization(
TfLiteIoType io_type, const char* name) const {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return nullptr;
}
return GetSynchronization(index);
}
TfLiteSynchronization* ExecutionTask::GetSynchronization(
int tensor_index) const {
if (auto it = io_data_.find(tensor_index); it != io_data_.end()) {
return it->second.sync;
}
return nullptr;
}
TfLiteStatus ExecutionTask::SetSynchronization(TfLiteIoType io_type,
const char* name,
TfLiteSynchronization* sync) {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteError;
}
return SetSynchronization(index, sync);
}
TfLiteStatus ExecutionTask::SetSynchronization(int tensor_index,
TfLiteSynchronization* sync) {
io_data_[tensor_index].sync = sync;
return kTfLiteOk;
}
}
}
TfLiteExecutionTask::TfLiteExecutionTask() {
task = std::make_unique<tflite::async::ExecutionTask>();
} | #include "tensorflow/lite/core/async/task_internal.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite::async {
TEST(TfLiteExecutionTaskTest, BasicTest) {
tflite::async::ExecutionTask task;
tflite::async::ExecutionTask::TensorNameMapT input_names;
input_names["x"] = 1;
input_names["y"] = 2;
tflite::async::ExecutionTask::TensorNameMapT output_names;
output_names["a"] = 3;
task.SetInputNameMap(&input_names);
task.SetOutputNameMap(&output_names);
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk, task.SetSynchronization(kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(42, task.GetBufferHandle(kTfLiteIoTypeInput, "x"));
EXPECT_EQ(43, task.GetBufferHandle(kTfLiteIoTypeInput, "y"));
EXPECT_EQ(44, task.GetBufferHandle(kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(sync, task.GetSynchronization(kTfLiteIoTypeInput, "x"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeInput, "y"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "a"));
TfLiteSynchronizationDelete(sync);
}
TEST(TfLiteExecutionTaskTest, NameMapUninitialized) {
tflite::async::ExecutionTask task;
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeInput, "foo"));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeOutput, "foo"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "foo"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "foo"));
}
TEST(TfLiteExecutionTaskTest, NoMatchingName) {
tflite::async::ExecutionTask task;
tflite::async::ExecutionTask::TensorNameMapT input_names;
input_names["x"] = 1;
input_names["y"] = 2;
tflite::async::ExecutionTask::TensorNameMapT output_names;
output_names["a"] = 3;
task.SetInputNameMap(&input_names);
task.SetOutputNameMap(&output_names);
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteError, task.SetBufferHandle(kTfLiteIoTypeInput, "xx", 42));
EXPECT_EQ(kTfLiteError, task.SetBufferHandle(kTfLiteIoTypeOutput, "aa", 44));
EXPECT_EQ(kTfLiteError,
task.SetSynchronization(kTfLiteIoTypeInput, "xx", sync));
EXPECT_EQ(kTfLiteError,
task.SetSynchronization(kTfLiteIoTypeOutput, "aa", sync));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeInput, "xx"));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeOutput, "aa"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeInput, "xx"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "aa"));
TfLiteSynchronizationDelete(sync);
}
TEST(TfLiteExecutionTaskTest, DelegateData) {
TfLiteAsyncKernel kernel{};
int data = 0;
tflite::async::ExecutionTask task;
EXPECT_EQ(nullptr, task.GetDelegateExecutionData(&kernel));
task.SetDelegateExecutionData(&kernel, &data);
EXPECT_EQ(&data, task.GetDelegateExecutionData(&kernel));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/task_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/task_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22e618cf-019a-4ec3-904c-f4d326d6e1d6 | cpp | tensorflow/tensorflow | async_signature_runner | tensorflow/lite/core/async/c/async_signature_runner.cc | tensorflow/lite/core/async/c/async_signature_runner_test.cc | #include "tensorflow/lite/core/async/c/async_signature_runner.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/core/async/async_signature_runner.h"
#include "tensorflow/lite/core/async/c/internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
TfLiteAsyncSignatureRunner* TfLiteInterpreterGetAsyncSignatureRunner(
const TfLiteInterpreter* interpreter, const char* signature_key) {
if (!interpreter) return nullptr;
tflite::async::AsyncSignatureRunner* runner =
interpreter->impl->GetAsyncSignatureRunner(signature_key);
if (!runner) return nullptr;
return new TfLiteAsyncSignatureRunner{runner};
}
TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->RegisterBuffer(io_type, buffer, attrs,
handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBufferSlice(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle buffer_pool, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->RegisterBufferSlice(buffer_pool, attrs,
handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerUnregisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->UnregisterBuffer(handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types) {
if (async_signature_runner == nullptr || types == nullptr ||
num_types == nullptr)
return kTfLiteError;
const auto& buffer_types =
async_signature_runner->impl->SupportedBufferTypes(io_type);
*types = buffer_types.data();
*num_types = buffer_types.size();
return kTfLiteOk;
}
TfLiteStatus TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types) {
if (async_signature_runner == nullptr || types == nullptr ||
num_types == nullptr)
return kTfLiteError;
const auto& synchronization_types =
async_signature_runner->impl->SupportedSynchronizations(io_type);
*types = synchronization_types.data();
*num_types = synchronization_types.size();
return kTfLiteOk;
}
bool TfLiteAsyncSignatureRunnerReconcileRestrictions(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* name,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) {
if (!async_signature_runner) return false;
return async_signature_runner->impl->ReconcileRestrictions(
io_type, name, user_provided_attributes, merged, conflict);
}
bool TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
const TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) {
if (!async_signature_runner) return false;
return async_signature_runner->impl->ReconcileRestrictions(
tensor_index, user_provided_attributes, merged, conflict);
}
TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributes(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const char* name, const TfLiteAttributeMap* attrs) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->SetAttributes(io_type, name, attrs);
}
TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributesByIndex(
TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* attrs) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->SetAttributes(tensor_index, attrs);
}
TfLiteStatus TfLiteAsyncSignatureRunnerPrepareBackends(
TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->PrepareBackends();
}
TfLiteExecutionTask* TfLiteAsyncSignatureRunnerCreateTask(
TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->CreateTask();
}
TfLiteStatus TfLiteAsyncSignatureRunnerInvokeAsync(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->InvokeAsync(task);
}
TfLiteStatus TfLiteAsyncSignatureRunnerWait(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->Wait(task);
}
TfLiteStatus TfLiteAsyncSignatureRunnerFinish(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->Finish(task);
}
size_t TfLiteAsyncSignatureRunnerGetInputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return 0;
return async_signature_runner->impl->input_size();
}
const char* TfLiteAsyncSignatureRunnerGetInputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t input_index) {
if (!async_signature_runner) return nullptr;
size_t count =
TfLiteAsyncSignatureRunnerGetInputCount(async_signature_runner);
if (input_index < 0 || input_index >= count) {
return nullptr;
}
const auto& input_names = async_signature_runner->impl->input_names();
if (input_index >= input_names.size()) {
return nullptr;
}
return input_names[input_index];
}
size_t TfLiteAsyncSignatureRunnerGetOutputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return 0;
return async_signature_runner->impl->output_size();
}
const char* TfLiteAsyncSignatureRunnerGetOutputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t output_index) {
if (!async_signature_runner) return nullptr;
size_t count =
TfLiteAsyncSignatureRunnerGetOutputCount(async_signature_runner);
if (output_index < 0 || output_index >= count) {
return nullptr;
}
const auto& output_names = async_signature_runner->impl->output_names();
if (output_index >= output_names.size()) {
return nullptr;
}
return async_signature_runner->impl->output_names()[output_index];
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetInputTensor(
TfLiteAsyncSignatureRunner* async_signature_runner,
const char* input_name) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->input_tensor(input_name);
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetOutputTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner,
const char* output_name) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->output_tensor(output_name);
}
void TfLiteAsyncSignatureRunnerDelete(
TfLiteAsyncSignatureRunner* signature_runner) {
delete signature_runner;
}
const int* TfLiteAsyncSignatureRunnerInputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->inputs().data();
}
const int* TfLiteAsyncSignatureRunnerOutputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->outputs().data();
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner, int index) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->tensor(index);
} | #include "tensorflow/lite/core/async/c/async_signature_runner.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/backend_async_kernel_interface.h"
#include "tensorflow/lite/core/async/c/internal.h"
#include "tensorflow/lite/core/async/c/task.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
#include "tensorflow/lite/core/async/testing/test_backend.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/interpreter_test_util.h"
using ::testing::_;
using ::testing::Return;
namespace tflite {
namespace async {
class AsyncSignatureRunnerTest : public InterpreterTest,
public ::testing::WithParamInterface<bool> {
protected:
void SetUp() override {
kernel_ =
std::make_unique<::testing::StrictMock<testing::MockAsyncKernel>>();
backend_ = std::make_unique<testing::TestBackend>(kernel_->kernel());
auto interpreter = std::make_unique<Interpreter>();
interpreter->AddTensors(2);
interpreter->SetInputs({0});
interpreter->SetOutputs({1});
TfLiteQuantizationParams quant;
interpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "x", {3},
quant);
interpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "a", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
interpreter->AddNodeWithParameters({0, 0}, {1}, nullptr, 0, builtin_data_1,
reg);
tflite_interpreter_.impl = std::move(interpreter);
}
void BuildRunner(bool has_signature) {
auto* interpreter = tflite_interpreter_.impl.get();
if (has_signature) {
const char kSignatureKey[] = "serving_default";
BuildSignature(interpreter, kSignatureKey, {{"input", 0}},
{{"output", 1}});
interpreter->ModifyGraphWithDelegate(backend_->get_delegate());
runner_ = TfLiteInterpreterGetAsyncSignatureRunner(&tflite_interpreter_,
kSignatureKey);
} else {
interpreter->ModifyGraphWithDelegate(backend_->get_delegate());
runner_ = TfLiteInterpreterGetAsyncSignatureRunner(&tflite_interpreter_,
nullptr);
}
ASSERT_NE(nullptr, runner_);
}
void TearDown() override { TfLiteAsyncSignatureRunnerDelete(runner_); }
protected:
TfLiteAsyncSignatureRunner* runner_ = nullptr;
std::unique_ptr<::testing::StrictMock<testing::MockAsyncKernel>> kernel_;
std::unique_ptr<testing::TestBackend> backend_;
internal::SignatureDef signature_def_;
TfLiteInterpreter tflite_interpreter_{};
};
INSTANTIATE_TEST_SUITE_P(AsyncSignatureRunnerTest, AsyncSignatureRunnerTest,
::testing::Bool());
TEST_P(AsyncSignatureRunnerTest, RegisterBufferTest) {
BuildRunner(GetParam());
EXPECT_CALL(*kernel_, RegisterBuffer(_, _, _, _, _))
.WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, RegisterBufferSlice(_, _, _, _))
.WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, UnregisterBuffer(_, _)).WillOnce(Return(kTfLiteOk));
TfLiteBufferHandle handle;
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
auto* buf = TfLiteBackendBufferCreate();
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerRegisterBuffer(
runner_, kTfLiteIoTypeInput, buf, attr, &handle));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerRegisterBufferSlice(
runner_, handle, attr, &handle));
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerUnregisterBuffer(runner_, handle));
TfLiteAttributeMapDelete(attr);
TfLiteBackendBufferDelete(buf);
}
TEST_P(AsyncSignatureRunnerTest, SupportedTypesTest) {
BuildRunner(GetParam());
const char* const* buffer_types = nullptr;
size_t num_buffer_types = 0;
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
runner_, kTfLiteIoTypeInput, &buffer_types, &num_buffer_types));
EXPECT_EQ(1, num_buffer_types);
EXPECT_STREQ("buffer_type", buffer_types[0]);
const char* const* sync_types = nullptr;
size_t num_sync_types = 0;
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
runner_, kTfLiteIoTypeInput, &sync_types, &num_sync_types));
EXPECT_EQ(1, num_sync_types);
EXPECT_STREQ("sync_type", sync_types[0]);
}
TEST_P(AsyncSignatureRunnerTest, ReconcileTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_CALL(*kernel_, ReconcileRestrictions(_, _, _, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*kernel_, SetAttributes(_, _, _, _)).WillOnce(Return(kTfLiteOk));
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
if (has_signature) {
EXPECT_TRUE(TfLiteAsyncSignatureRunnerReconcileRestrictions(
runner_, kTfLiteIoTypeInput, "input", attr, attr, nullptr));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerSetAttributes(
runner_, kTfLiteIoTypeInput, "input", attr));
} else {
EXPECT_TRUE(TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
runner_, 0, attr, attr, nullptr));
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerSetAttributesByIndex(runner_, 0, attr));
}
TfLiteAttributeMapDelete(attr);
}
TEST_P(AsyncSignatureRunnerTest, ExecutionTest) {
BuildRunner(GetParam());
EXPECT_CALL(*kernel_, Prepare(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Eval(_, _, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Wait(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Finish(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerPrepareBackends(runner_));
auto* task = TfLiteAsyncSignatureRunnerCreateTask(runner_);
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerInvokeAsync(runner_, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerWait(runner_, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerFinish(runner_, task));
}
TEST_P(AsyncSignatureRunnerTest, InputsTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetInputCount(runner_));
if (has_signature) {
EXPECT_STREQ("input", TfLiteAsyncSignatureRunnerGetInputName(runner_, 0));
EXPECT_STREQ(
"x", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetInputTensor(runner_, "input")));
} else {
EXPECT_STREQ("x", TfLiteAsyncSignatureRunnerGetInputName(runner_, 0));
EXPECT_STREQ("x",
TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetInputTensor(runner_, "x")));
}
}
TEST_P(AsyncSignatureRunnerTest, OutputsTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetOutputCount(runner_));
if (has_signature) {
EXPECT_STREQ("output", TfLiteAsyncSignatureRunnerGetOutputName(runner_, 0));
EXPECT_STREQ(
"a", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetOutputTensor(runner_, "output")));
} else {
EXPECT_STREQ("a", TfLiteAsyncSignatureRunnerGetOutputName(runner_, 0));
EXPECT_STREQ("a",
TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetOutputTensor(runner_, "a")));
}
}
TEST_P(AsyncSignatureRunnerTest, InputByIndexTest) {
BuildRunner(GetParam());
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetInputCount(runner_));
auto* indices = TfLiteAsyncSignatureRunnerInputTensorIndices(runner_);
EXPECT_NE(nullptr, indices);
auto indice = indices[0];
EXPECT_STREQ("x", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetTensor(runner_, indice)));
}
TEST_P(AsyncSignatureRunnerTest, OutputsByIndexTest) {
BuildRunner(GetParam());
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetOutputCount(runner_));
auto* indices = TfLiteAsyncSignatureRunnerOutputTensorIndices(runner_);
EXPECT_NE(nullptr, indices);
auto indice = indices[0];
EXPECT_STREQ("a", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetTensor(runner_, indice)));
}
TEST_P(AsyncSignatureRunnerTest, IndexOutOfBound) {
BuildRunner(GetParam());
EXPECT_EQ(nullptr, TfLiteAsyncSignatureRunnerGetTensor(runner_, 42));
}
TEST(AsyncSignatureRunnerTest, TestNoSignatures) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"third_party/tensorflow/lite/testdata/no_signatures.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
auto kernel =
std::make_unique<::testing::StrictMock<testing::MockAsyncKernel>>();
auto backend = std::make_unique<testing::TestBackend>(kernel->kernel());
TfLiteInterpreterOptionsAddDelegate(options, backend->get_delegate());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
int nun_signatures = TfLiteInterpreterGetSignatureCount(interpreter);
ASSERT_EQ(nun_signatures, 0);
ASSERT_EQ(TfLiteInterpreterGetAsyncSignatureRunner(interpreter, "foo"),
nullptr);
TfLiteAsyncSignatureRunner* runner =
TfLiteInterpreterGetAsyncSignatureRunner(interpreter, nullptr);
ASSERT_NE(runner, nullptr);
int num_interpreter_inputs =
TfLiteInterpreterGetInputTensorCount(interpreter);
int num_runner_inputs = TfLiteAsyncSignatureRunnerGetInputCount(runner);
ASSERT_EQ(num_runner_inputs, num_interpreter_inputs);
for (int i = 0; i < num_interpreter_inputs; ++i) {
auto* interpreter_input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, i);
ASSERT_NE(interpreter_input_tensor, nullptr);
auto* interpreter_input_name = TfLiteTensorName(interpreter_input_tensor);
ASSERT_NE(interpreter_input_name, nullptr);
auto* runner_input_name = TfLiteAsyncSignatureRunnerGetInputName(runner, i);
ASSERT_NE(runner_input_name, nullptr);
EXPECT_STREQ(runner_input_name, interpreter_input_name);
auto* runner_input_tensor = TfLiteAsyncSignatureRunnerGetInputTensor(
runner, interpreter_input_name);
ASSERT_NE(runner_input_tensor, nullptr);
ASSERT_EQ(runner_input_tensor, reinterpret_cast<const TfLiteOpaqueTensor*>(
interpreter_input_tensor));
}
int num_interpreter_outputs =
TfLiteInterpreterGetOutputTensorCount(interpreter);
int num_runner_outputs = TfLiteAsyncSignatureRunnerGetOutputCount(runner);
ASSERT_EQ(num_runner_outputs, num_interpreter_outputs);
for (int i = 0; i < num_interpreter_outputs; ++i) {
auto* interpreter_output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, i);
ASSERT_NE(interpreter_output_tensor, nullptr);
auto* interpreter_output_name = TfLiteTensorName(interpreter_output_tensor);
ASSERT_NE(interpreter_output_name, nullptr);
auto* runner_output_name =
TfLiteAsyncSignatureRunnerGetOutputName(runner, i);
ASSERT_NE(runner_output_name, nullptr);
EXPECT_STREQ(runner_output_name, interpreter_output_name);
auto* runner_output_tensor = TfLiteAsyncSignatureRunnerGetOutputTensor(
runner, interpreter_output_name);
ASSERT_NE(runner_output_tensor, nullptr);
ASSERT_EQ(runner_output_tensor, reinterpret_cast<const TfLiteOpaqueTensor*>(
interpreter_output_tensor));
}
EXPECT_CALL(*kernel, Prepare(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel, Eval(_, _, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel, Wait(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel, Finish(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerPrepareBackends(runner));
auto* task = TfLiteAsyncSignatureRunnerCreateTask(runner);
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerInvokeAsync(runner, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerWait(runner, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerFinish(runner, task));
TfLiteAsyncSignatureRunnerDelete(runner);
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/c/async_signature_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/c/async_signature_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7ebe33fa-b0b5-44fd-81fd-f58aa8e47d6e | cpp | tensorflow/tensorflow | reconcile_fns | tensorflow/lite/core/async/interop/reconcile_fns.cc | tensorflow/lite/core/async/interop/reconcile_fns_test.cc | #include "tensorflow/lite/core/async/interop/reconcile_fns.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <set>
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite {
namespace interop {
namespace {
template <typename T>
T gcd(T x, T y) {
while (y) {
auto m = x % y;
x = y;
y = m;
}
return x;
}
template <typename T>
T lcm(T x, T y) {
return x / gcd(x, y) * y;
}
void ReconcileAlignment(size_t l, size_t r, AttributeMap::ContainerT* merged) {
merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeyAlignment),
lcm(l, r));
}
void ReconcilePadding(size_t l, size_t r, AttributeMap::ContainerT* merged) {
merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeyPadding),
lcm(l, r));
}
bool CheckMultiples(size_t l, size_t r) { return l % r == 0; }
void ReconcileSize(size_t l, size_t r, AttributeMap::ContainerT* merged) {
merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeySize),
std::max(l, r));
}
bool CheckSize(size_t l, size_t r) { return l >= r; }
}
bool ReconcileGeneralAttributeKeys(TfLiteAttrMapType type,
const AttributeMap::ContainerT* lhs,
const AttributeMap::ContainerT* rhs,
AttributeMap::ContainerT* merged,
AttributeMap::ContainerT* conflict) {
if (lhs == nullptr || rhs == nullptr || merged == nullptr) return false;
bool ret = true;
std::set<uint32_t> keys;
std::transform(lhs->begin(), lhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
std::transform(rhs->begin(), rhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
for (auto k : keys) {
const auto l = lhs->find(k);
const auto r = rhs->find(k);
if (l == lhs->end() || l->second.GetPtr() == nullptr) {
merged->insert_or_assign(k, r->second);
continue;
}
if (r == rhs->end() || r->second.GetPtr() == nullptr) {
merged->insert_or_assign(k, l->second);
continue;
}
if (type == kTfLiteAttrMapTypeBuffer) {
switch (static_cast<TfLiteBufferAttrKey>(k)) {
case kTfLiteBufferAttrKeySize:
ReconcileSize(*l->second.Get<size_t>(), *r->second.Get<size_t>(),
merged);
break;
case kTfLiteBufferAttrKeyAlignment:
ReconcileAlignment(*l->second.Get<size_t>(), *r->second.Get<size_t>(),
merged);
break;
case kTfLiteBufferAttrKeyPadding:
ReconcilePadding(*l->second.Get<size_t>(), *r->second.Get<size_t>(),
merged);
break;
default:
if (l->second == r->second) {
merged->insert_or_assign(k, l->second);
} else {
ret = false;
if (conflict) conflict->insert_or_assign(k, r->second);
}
}
} else {
if (l->second == r->second) {
merged->insert_or_assign(k, l->second);
} else {
ret = false;
if (conflict) conflict->insert_or_assign(k, r->second);
}
}
}
return ret;
}
bool CheckGeneralAttributeKeysCoverage(TfLiteAttrMapType type,
const AttributeMap::ContainerT* lhs,
const AttributeMap::ContainerT* rhs,
AttributeMap::ContainerT* conflict) {
if (lhs == nullptr || rhs == nullptr) return false;
bool ret = true;
std::set<uint32_t> keys;
std::transform(lhs->begin(), lhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
std::transform(rhs->begin(), rhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
for (auto k : keys) {
bool has_conflict = false;
const auto l = lhs->find(k);
const auto r = rhs->find(k);
if (r == rhs->end() || r->second.GetPtr() == nullptr) {
continue;
} else if (l == lhs->end() || l->second.GetPtr() == nullptr) {
has_conflict = true;
} else {
if (type == kTfLiteAttrMapTypeBuffer) {
switch (static_cast<TfLiteBufferAttrKey>(k)) {
case kTfLiteBufferAttrKeySize:
has_conflict |=
!CheckSize(*l->second.Get<size_t>(), *r->second.Get<size_t>());
break;
case kTfLiteBufferAttrKeyAlignment:
has_conflict |= !CheckMultiples(*l->second.Get<size_t>(),
*r->second.Get<size_t>());
break;
case kTfLiteBufferAttrKeyPadding:
has_conflict |=
!CheckSize(*l->second.Get<size_t>(), *r->second.Get<size_t>());
break;
default:
if (l->second != r->second) {
has_conflict = true;
}
}
} else {
if (l->second != r->second) {
has_conflict = true;
}
}
}
if (has_conflict) {
if (conflict != nullptr) conflict->insert_or_assign(k, r->second);
ret = false;
}
}
return ret;
}
}
} | #include "tensorflow/lite/core/async/interop/reconcile_fns.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
#include <tuple>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite::interop {
namespace {
using ContainerT = AttributeMap::ContainerT;
template <typename ValT, typename KeyT>
void SetAttr(ContainerT* c, KeyT k, ValT v) {
c->insert_or_assign(static_cast<uint32_t>(k), v);
}
template <typename ValT, typename KeyT>
ValT GetAttr(const ContainerT& c, KeyT k) {
return *(c.at(static_cast<uint32_t>(k)).Get<ValT>());
}
TEST(ReconcileTest, NullCheck) {
ContainerT m1, m2;
EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &m1, &m2,
nullptr,
nullptr));
EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer,
nullptr, &m1, &m2,
nullptr));
EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &m1,
nullptr, &m2,
nullptr));
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer,
nullptr, &m1, &m2));
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &m1,
nullptr, &m2));
}
TEST(ReconcileTest, MissingAttributeTest) {
{
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(4, GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment));
}
{
ContainerT lhs, rhs, merged;
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(4, GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment));
}
{
ContainerT lhs, rhs, merged;
const char value[] = "string";
SetAttr(&rhs, kTfLiteSynchronizationAttrKeyObjectTypeName, value);
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeSync, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(value, GetAttr<const char*>(
merged, kTfLiteSynchronizationAttrKeyObjectTypeName));
}
}
TEST(CheckCoverageTest, MissingAttributeTest) {
{
ContainerT lhs, rhs;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer,
&lhs, &rhs, nullptr));
}
{
ContainerT lhs, rhs, merged;
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer,
&lhs, &rhs, nullptr));
}
}
class ReconcileAlignmentTest
: public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {};
TEST_P(ReconcileAlignmentTest, Test) {
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, std::get<1>(GetParam()));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(std::get<2>(GetParam()),
GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment));
}
INSTANTIATE_TEST_SUITE_P(ReconcileAlignmentTest, ReconcileAlignmentTest,
testing::Values(std::make_tuple(4, 4, 4),
std::make_tuple(1, 4, 4),
std::make_tuple(8, 4, 8),
std::make_tuple(8, 3, 24)));
class CheckAlignmentTest
: public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {};
TEST_P(CheckAlignmentTest, Test) {
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, std::get<1>(GetParam()));
EXPECT_EQ(std::get<2>(GetParam()),
CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &conflict));
EXPECT_EQ(
!std::get<2>(GetParam()),
conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeyAlignment)));
}
INSTANTIATE_TEST_SUITE_P(CheckAlignmentTest, CheckAlignmentTest,
testing::Values(std::make_tuple(4, 4, true),
std::make_tuple(4, 1, true),
std::make_tuple(1, 4, false)));
class ReconcilePaddingTest
: public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {};
TEST_P(ReconcilePaddingTest, Test) {
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeyPadding, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyPadding, std::get<1>(GetParam()));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(std::get<2>(GetParam()),
GetAttr<size_t>(merged, kTfLiteBufferAttrKeyPadding));
}
INSTANTIATE_TEST_SUITE_P(ReconcilePaddingTest, ReconcilePaddingTest,
testing::Values(std::make_tuple(4, 4, 4),
std::make_tuple(1, 4, 4),
std::make_tuple(8, 4, 8),
std::make_tuple(8, 3, 24)));
class CheckPaddingTest
: public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {};
TEST_P(CheckPaddingTest, Test) {
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, kTfLiteBufferAttrKeyPadding, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyPadding, std::get<1>(GetParam()));
EXPECT_EQ(std::get<2>(GetParam()),
CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &conflict));
EXPECT_EQ(!std::get<2>(GetParam()),
conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeyPadding)));
}
INSTANTIATE_TEST_SUITE_P(CheckPaddingTest, CheckPaddingTest,
testing::Values(std::make_tuple(4, 4, true),
std::make_tuple(4, 1, true),
std::make_tuple(1, 4, false)));
class ReconcileSizeTest
: public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {};
TEST_P(ReconcileSizeTest, Test) {
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeySize, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeySize, std::get<1>(GetParam()));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(std::get<2>(GetParam()),
GetAttr<size_t>(merged, kTfLiteBufferAttrKeySize));
}
INSTANTIATE_TEST_SUITE_P(ReconcileSizeTest, ReconcileSizeTest,
testing::Values(std::make_tuple(4, 4, 4),
std::make_tuple(1, 4, 4),
std::make_tuple(8, 4, 8),
std::make_tuple(8, 3, 8)));
class CheckSizeTest
: public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {};
TEST_P(CheckSizeTest, Test) {
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, kTfLiteBufferAttrKeySize, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeySize, std::get<1>(GetParam()));
EXPECT_EQ(std::get<2>(GetParam()),
CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &conflict));
EXPECT_EQ(!std::get<2>(GetParam()),
conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeySize)));
}
INSTANTIATE_TEST_SUITE_P(CheckSizeTest, CheckSizeTest,
testing::Values(std::make_tuple(4, 4, true),
std::make_tuple(4, 1, true),
std::make_tuple(1, 4, false)));
class ReconcileNameTest
: public testing::TestWithParam<std::tuple<TfLiteAttrMapType, uint32_t>> {};
TEST_P(ReconcileNameTest, Test) {
constexpr char name_string1[] = "string1";
std::string name_string1_1 = "string1";
constexpr char name_string2[] = "string2";
{
ContainerT lhs, rhs, merged;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string1_1.c_str());
EXPECT_TRUE(ReconcileGeneralAttributeKeys(std::get<0>(GetParam()), &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(0, strcmp(GetAttr<const char*>(merged, std::get<1>(GetParam())),
name_string1));
}
{
ContainerT lhs, rhs, merged, conflict;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string2);
EXPECT_FALSE(ReconcileGeneralAttributeKeys(std::get<0>(GetParam()), &lhs,
&rhs, &merged, &conflict));
EXPECT_TRUE(conflict.count(std::get<1>(GetParam())));
}
}
INSTANTIATE_TEST_SUITE_P(
ReconcileNameTest, ReconcileNameTest,
testing::Values(
std::make_tuple(
kTfLiteAttrMapTypeBuffer,
static_cast<uint32_t>(kTfLiteBufferAttrKeyResourceTypeName)),
std::make_tuple(kTfLiteAttrMapTypeSync,
static_cast<uint32_t>(
kTfLiteSynchronizationAttrKeyObjectTypeName))));
class CheckNameTest
: public testing::TestWithParam<std::tuple<TfLiteAttrMapType, uint32_t>> {};
TEST_P(CheckNameTest, Test) {
constexpr char name_string1[] = "string1";
std::string name_string1_1 = "string1";
constexpr char name_string2[] = "string2";
{
ContainerT lhs, rhs;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string1_1.c_str());
EXPECT_TRUE(CheckGeneralAttributeKeysCoverage(std::get<0>(GetParam()), &lhs,
&rhs, nullptr));
}
{
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string2);
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(std::get<0>(GetParam()),
&lhs, &rhs, &conflict));
EXPECT_TRUE(conflict.count(std::get<1>(GetParam())));
}
}
INSTANTIATE_TEST_SUITE_P(
CheckNameTest, CheckNameTest,
testing::Values(
std::make_tuple(
kTfLiteAttrMapTypeBuffer,
static_cast<uint32_t>(kTfLiteBufferAttrKeyResourceTypeName)),
std::make_tuple(kTfLiteAttrMapTypeSync,
static_cast<uint32_t>(
kTfLiteSynchronizationAttrKeyObjectTypeName))));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/reconcile_fns.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/reconcile_fns_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3fc1de5f-22d4-46d6-9a7d-6cf17b61d645 | cpp | tensorflow/tensorflow | attribute_map_internal | tensorflow/lite/core/async/interop/attribute_map_internal.cc | tensorflow/lite/core/async/interop/attribute_map_internal_test.cc | #include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/reconcile_fns.h"
namespace tflite {
namespace interop {
bool AttributeMap::ReconcileAttributes(const AttributeMap* other,
AttributeMap* merged,
AttributeMap* conflict) const {
if (other == nullptr || merged == nullptr) return false;
if (type_ != other->type_) return false;
merged->type_ = type_;
if (conflict) conflict->type_ = type_;
return tflite::interop::ReconcileGeneralAttributeKeys(
type_, &attrs_, &other->attrs_, &merged->attrs_,
conflict ? &conflict->attrs_ : nullptr);
}
bool AttributeMap::CheckAttributeCoverage(const AttributeMap* other,
AttributeMap* conflict) const {
if (other == nullptr) return false;
if (type_ != other->type_) return false;
if (conflict) conflict->type_ = type_;
return tflite::interop::CheckGeneralAttributeKeysCoverage(
type_, &attrs_, &other->attrs_, conflict ? &conflict->attrs_ : nullptr);
}
}
} | #include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite {
namespace interop {
namespace {
TEST(AttributeMapTest, TypeTest) {
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_TRUE(attrs.IsBufferAttributeMap());
EXPECT_FALSE(attrs.IsSyncAttributeMap());
}
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs.IsSyncAttributeMap());
EXPECT_FALSE(attrs.IsBufferAttributeMap());
}
}
TEST(AttributeMapTest, AccessorTest) {
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
{
attrs.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
size_t result;
EXPECT_TRUE(attrs.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
{
attrs.SetCustomAttr("Foo", 12);
int result;
EXPECT_FALSE(attrs.GetCustomAttr("Bar", &result));
EXPECT_TRUE(attrs.GetCustomAttr("Foo", &result));
EXPECT_EQ(12, result);
}
}
TEST(AttributeMapTest, ReconcileFailDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(
attrs1.ReconcileAttributes(&attrs2, &attrs3, nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
}
TEST(AttributeMapTest, NullptrTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(nullptr, &attrs2,
nullptr));
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, nullptr,
nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(nullptr,
nullptr));
}
TEST(AttributeMapTest, ReconcileDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, &attrs3,
nullptr));
}
TEST(AttributeMapTest, ReconcileTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs4 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.ReconcileAttributes(&attrs2, &attrs3, &attrs4));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
EXPECT_TRUE(attrs4.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(attrs3.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
TEST(AttributeMapTest, CoverageTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
}
TEST(AttributeMapTest, CoverageFailedTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(10));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto conflict = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &conflict));
EXPECT_TRUE(conflict.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(conflict.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(4, result);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/attribute_map_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/attribute_map_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2dfa250a-7737-404a-b9d5-dbff2dca84a2 | cpp | tensorflow/tensorflow | task | tensorflow/lite/core/async/c/task.cc | tensorflow/lite/core/async/c/task_test.cc | #include "tensorflow/lite/core/async/c/task.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
extern "C" {
TfLiteStatus TfLiteExecutionTaskSetBuffer(TfLiteExecutionTask* task,
TfLiteIoType io_type,
const char* tensor_signature_name,
TfLiteBufferHandle handle) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteError;
return task->task->SetBufferHandle(io_type, tensor_signature_name, handle);
}
TfLiteStatus TfLiteExecutionTaskSetBufferByIndex(TfLiteExecutionTask* task,
int tensor_index,
TfLiteBufferHandle handle) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->SetBufferHandle(tensor_index, handle);
}
TfLiteStatus TfLiteExecutionTaskSetSync(TfLiteExecutionTask* task,
TfLiteIoType io_type,
const char* tensor_signature_name,
TfLiteSynchronization* sync) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteError;
return task->task->SetSynchronization(io_type, tensor_signature_name, sync);
}
TfLiteStatus TfLiteExecutionTaskSetSyncByIndex(TfLiteExecutionTask* task,
int tensor_index,
TfLiteSynchronization* sync) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->SetSynchronization(tensor_index, sync);
}
TfLiteBufferHandle TfLiteExecutionTaskGetBufferByName(
const TfLiteExecutionTask* task, TfLiteIoType io_type,
const char* tensor_signature_name) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteNullBufferHandle;
return task->task->GetBufferHandle(io_type, tensor_signature_name);
}
TfLiteSynchronization* TfLiteExecutionTaskGetSyncByName(
const TfLiteExecutionTask* task, TfLiteIoType io_type,
const char* tensor_signature_name) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return nullptr;
return task->task->GetSynchronization(io_type, tensor_signature_name);
}
TfLiteBufferHandle TfLiteExecutionTaskGetBufferByIndex(
const TfLiteExecutionTask* task, int tensor_index) {
if (task == nullptr || task->task == nullptr) return kTfLiteNullBufferHandle;
return task->task->GetBufferHandle(tensor_index);
}
TfLiteSynchronization* TfLiteExecutionTaskGetSyncByIndex(
const TfLiteExecutionTask* task, int tensor_index) {
if (task == nullptr || task->task == nullptr) return nullptr;
return task->task->GetSynchronization(tensor_index);
}
void* TfLiteExecutionTaskGetDelegateExecutionData(
const TfLiteExecutionTask* task, TfLiteAsyncKernel* kernel) {
if (task == nullptr || task->task == nullptr) return nullptr;
return task->task->GetDelegateExecutionData(kernel);
}
void TfLiteExecutionTaskSetDelegateExecutionData(TfLiteExecutionTask* task,
TfLiteAsyncKernel* kernel,
void* data) {
if (task == nullptr || task->task == nullptr) return;
task->task->SetDelegateExecutionData(kernel, data);
}
TfLiteStatus TfLiteExecutionTaskGetStatus(const TfLiteExecutionTask* task) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->Status();
}
void TfLiteExecutionTaskSetStatus(TfLiteExecutionTask* task,
TfLiteStatus status) {
if (task == nullptr || task->task == nullptr) return;
task->task->SetStatus(status);
}
} | #include "tensorflow/lite/core/async/c/task.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/common.h"
namespace {
class TfLiteExecutionTaskTest : public ::testing::Test {
protected:
void SetUp() override {
input_names_["x"] = 1;
input_names_["y"] = 2;
output_names_["a"] = 3;
task_.task->SetInputNameMap(&input_names_);
task_.task->SetOutputNameMap(&output_names_);
}
TfLiteExecutionTask* task() { return &task_; }
protected:
tflite::async::ExecutionTask::TensorNameMapT input_names_;
tflite::async::ExecutionTask::TensorNameMapT output_names_;
TfLiteExecutionTask task_;
};
TEST_F(TfLiteExecutionTaskTest, BasicTest) {
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetSync(task(), kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(
42, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeInput, "x"));
EXPECT_EQ(
43, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeInput, "y"));
EXPECT_EQ(
44, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(sync,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeInput, "x"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeInput, "y"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeOutput, "a"));
TfLiteSynchronizationDelete(sync);
}
TEST_F(TfLiteExecutionTaskTest, BasicTestByTensorIndex) {
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetSync(task(), kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(42, TfLiteExecutionTaskGetBufferByIndex(task(), 1));
EXPECT_EQ(43, TfLiteExecutionTaskGetBufferByIndex(task(), 2));
EXPECT_EQ(44, TfLiteExecutionTaskGetBufferByIndex(task(), 3));
EXPECT_EQ(sync, TfLiteExecutionTaskGetSyncByIndex(task(), 1));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(task(), 2));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(task(), 3));
TfLiteSynchronizationDelete(sync);
}
TEST_F(TfLiteExecutionTaskTest, NullTest) {
EXPECT_EQ(kTfLiteError,
TfLiteExecutionTaskSetBuffer(nullptr, kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskSetSync(
nullptr, kTfLiteIoTypeInput, "x", nullptr));
EXPECT_EQ(kTfLiteNullBufferHandle, TfLiteExecutionTaskGetBufferByName(
nullptr, kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(nullptr, kTfLiteIoTypeInput, "x"));
EXPECT_EQ(kTfLiteNullBufferHandle,
TfLiteExecutionTaskGetBufferByIndex(nullptr, 3));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(nullptr, 3));
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskGetStatus(nullptr));
TfLiteExecutionTaskSetStatus(nullptr, kTfLiteOk);
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskSetBufferByIndex(nullptr, 0, 0));
EXPECT_EQ(kTfLiteError,
TfLiteExecutionTaskSetSyncByIndex(nullptr, 0, nullptr));
}
TEST_F(TfLiteExecutionTaskTest, StatusTest) {
EXPECT_EQ(kTfLiteOk, TfLiteExecutionTaskGetStatus(task()));
TfLiteExecutionTaskSetStatus(task(), kTfLiteError);
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskGetStatus(task()));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/c/task.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/c/task_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c1f8f33e-1147-4fa1-b5d6-6457a9b6e08e | cpp | tensorflow/tensorflow | flatbuffer_conversions | tensorflow/lite/core/api/flatbuffer_conversions.cc | tensorflow/lite/core/api/flatbuffer_conversions_test.cc | #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class SafeBuiltinDataAllocator {
public:
class BuiltinDataDeleter {
public:
explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
void operator()(void* data) { allocator_->Deallocate(data); }
private:
BuiltinDataAllocator* allocator_;
};
template <typename T>
using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
template <typename T>
BuiltinDataPtr<T> Allocate() {
return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
BuiltinDataDeleter(allocator_));
}
private:
BuiltinDataAllocator* allocator_;
};
void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
TFLITE_DCHECK(op != nullptr);
TFLITE_DCHECK(error_reporter != nullptr);
TFLITE_DCHECK(allocator != nullptr);
TFLITE_DCHECK(builtin_data != nullptr);
}
template <typename DataType = int32_t>
static TfLiteStatus FlatBufferIntVectorToArray(
int max_size_of_buffer, const flatbuffers::Vector<DataType>* flat_vector,
DataType* buffer, ErrorReporter* error_reporter, const char* op_name) {
if (!flat_vector) {
TF_LITE_REPORT_ERROR(error_reporter,
"Input array not provided for operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
size_t num_dimensions = flat_vector->size();
if (num_dimensions > max_size_of_buffer / sizeof(DataType)) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Found too many dimensions in the input array of operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
for (size_t i = 0; i < num_dimensions; ++i) {
buffer[i] = flat_vector->Get(i);
}
}
}
return kTfLiteOk;
}
TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
switch (activation) {
case ActivationFunctionType_NONE:
return kTfLiteActNone;
case ActivationFunctionType_RELU:
return kTfLiteActRelu;
case ActivationFunctionType_RELU_N1_TO_1:
return kTfLiteActReluN1To1;
case ActivationFunctionType_RELU6:
return kTfLiteActRelu6;
case ActivationFunctionType_TANH:
return kTfLiteActTanh;
case ActivationFunctionType_SIGN_BIT:
return kTfLiteActSignBit;
}
return kTfLiteActNone;
}
TfLitePadding ConvertPadding(Padding padding) {
switch (padding) {
case Padding_SAME:
return kTfLitePaddingSame;
case Padding_VALID:
return kTfLitePaddingValid;
}
return kTfLitePaddingUnknown;
}
TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) {
switch (padding) {
case MirrorPadMode_REFLECT:
return kTfLiteMirrorPaddingReflect;
case MirrorPadMode_SYMMETRIC:
return kTfLiteMirrorPaddingSymmetric;
}
return kTfLiteMirrorPaddingUnknown;
}
TfLiteRngAlgorithm ConvertRngAlgorithm(RngAlgorithm algorithm) {
switch (algorithm) {
case RngAlgorithm_THREEFRY:
return kTfLiteRngAlgorithmThreefry;
case RngAlgorithm_PHILOX:
return kTfLiteRngAlgorithmPhilox;
case RngAlgorithm_DEFAULT:
return kTfLiteRngAlgorithmDefault;
}
return kTfLiteRngAlgorithmUnknown;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
auto parseLSHProjectionType = [](LSHProjectionType type) {
switch (type) {
case LSHProjectionType_SPARSE:
return kTfLiteLshProjectionSparse;
case LSHProjectionType_DENSE:
return kTfLiteLshProjectionDense;
default:
return kTfLiteLshProjectionUnknown;
}
};
auto parseCombinerType = [](CombinerType type) {
switch (type) {
case CombinerType_MEAN:
return kTfLiteCombinerTypeMean;
case CombinerType_SQRTN:
return kTfLiteCombinerTypeSqrtn;
case CombinerType_SUM:
default:
return kTfLiteCombinerTypeSum;
}
};
SafeBuiltinDataAllocator safe_allocator(allocator);
*builtin_data = nullptr;
switch (op_type) {
case BuiltinOperator_ABS: {
return ParseAbs(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD: {
return ParseAdd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD_N: {
return ParseAddN(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MAX: {
return ParseArgMax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MIN: {
return ParseArgMin(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ASSIGN_VARIABLE: {
return ParseAssignVariable(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_AVERAGE_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_MATMUL: {
return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_TO_SPACE_ND: {
return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BROADCAST_ARGS: {
return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BROADCAST_TO: {
return ParseBroadcastTo(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CALL_ONCE: {
return ParseCallOnce(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CEIL: {
return ParseCeil(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONCATENATION: {
return ParseConcatenation(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONV_2D: {
return ParseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CUMSUM: {
return ParseCumsum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTH_TO_SPACE: {
return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEQUANTIZE: {
return ParseDequantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DIV: {
return ParseDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ELU: {
return ParseElu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EMBEDDING_LOOKUP: {
return ParseEmbeddingLookup(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXP: {
return ParseExp(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXPAND_DIMS: {
return ParseExpandDims(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FILL: {
return ParseFill(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR: {
return ParseFloor(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_DIV: {
return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_MOD: {
return ParseFloorMod(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FULLY_CONNECTED: {
return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GATHER_ND: {
return ParseGatherNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER: {
return ParseGreater(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER_EQUAL: {
return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_HARD_SWISH: {
return ParseHardSwish(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_NORMALIZATION: {
return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LEAKY_RELU: {
return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS: {
return ParseLess(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS_EQUAL: {
return ParseLessEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOG: {
return ParseLog(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_AND: {
return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_NOT: {
return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_OR: {
return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGISTIC: {
return ParseLogistic(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOG_SOFTMAX: {
return ParseLogSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LSTM: {
return ParseLSTM(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MAXIMUM: {
return ParseMaximum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MAX_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MIRROR_PAD: {
return ParseMirrorPad(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MEAN: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MINIMUM: {
return ParseMinimum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MUL: {
return ParseMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_NEG: {
return ParseNeg(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_NOT_EQUAL: {
return ParseNotEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PACK: {
return ParsePack(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PAD: {
return ParsePad(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PADV2: {
return ParsePadV2(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_POW: {
return ParsePow(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PRELU: {
return ParsePrelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_QUANTIZE: {
return ParseQuantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_READ_VARIABLE: {
return ParseReadVariable(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_ANY: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_ALL: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_MAX: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_MIN: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_PROD: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RELU: {
return ParseRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RELU6: {
return ParseRelu6(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESHAPE: {
return ParseReshape(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESIZE_BILINEAR: {
return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
return ParseResizeNearestNeighbor(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_ROUND: {
return ParseRound(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RSQRT: {
return ParseRsqrt(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SELECT_V2: {
return ParseSelectV2(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SHAPE: {
return ParseShape(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SIN: {
return ParseSin(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SOFTMAX: {
return ParseSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_BATCH_ND: {
return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_DEPTH: {
return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPLIT: {
return ParseSplit(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPLIT_V: {
return ParseSplitV(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQRT: {
return ParseSqrt(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQUARE: {
return ParseSquare(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQUARED_DIFFERENCE: {
return ParseSquaredDifference(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_SQUEEZE: {
return ParseSqueeze(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STRIDED_SLICE: {
return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SUB: {
return ParseSub(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SUM: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SVDF: {
return ParseSvdf(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_TANH: {
return ParseTanh(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_TRANSPOSE_CONV: {
return ParseTransposeConv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_UNPACK: {
return ParseUnpack(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_VAR_HANDLE: {
return ParseVarHandle(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ZEROS_LIKE: {
return ParseZerosLike(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BITWISE_XOR: {
return ParseBitwiseXor(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RIGHT_SHIFT: {
return ParseRightShift(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CAST: {
return ParseCast(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LSH_PROJECTION: {
auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* lshParams =
op->builtin_options_as_LSHProjectionOptions()) {
params->type = parseLSHProjectionType(lshParams->type());
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* sequence_rnn_params =
op->builtin_options_as_SequenceRNNOptions()) {
params->activation =
ConvertActivation(sequence_rnn_params->fused_activation_function());
params->time_major = sequence_rnn_params->time_major();
params->asymmetric_quantize_inputs =
sequence_rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
auto params =
safe_allocator.Allocate<TfLiteBidirectionalSequenceRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bidi_sequence_rnn_params =
op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
params->activation = ConvertActivation(
bidi_sequence_rnn_params->fused_activation_function());
params->time_major = bidi_sequence_rnn_params->time_major();
params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
params->asymmetric_quantize_inputs =
bidi_sequence_rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_RNN: {
auto params = safe_allocator.Allocate<TfLiteRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
params->activation =
ConvertActivation(rnn_params->fused_activation_function());
params->asymmetric_quantize_inputs =
rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
auto params =
safe_allocator.Allocate<TfLiteEmbeddingLookupSparseParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* embedding_params =
op->builtin_options_as_EmbeddingLookupSparseOptions()) {
params->combiner = parseCombinerType(embedding_params->combiner());
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_HASHTABLE_LOOKUP:
return kTfLiteOk;
case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_LocalResponseNormalizationOptions()) {
params->radius = schema_params->radius();
params->bias = schema_params->bias();
params->alpha = schema_params->alpha();
params->beta = schema_params->beta();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
auto params =
safe_allocator.Allocate<TfLiteBidirectionalSequenceLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bidi_lstm_params =
op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
params->activation =
ConvertActivation(bidi_lstm_params->fused_activation_function());
params->cell_clip = bidi_lstm_params->cell_clip();
params->proj_clip = bidi_lstm_params->proj_clip();
params->merge_outputs = bidi_lstm_params->merge_outputs();
params->time_major = bidi_lstm_params->time_major();
params->asymmetric_quantize_inputs =
bidi_lstm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_SKIP_GRAM: {
auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* skip_gram_params =
op->builtin_options_as_SkipGramOptions()) {
params->ngram_size = skip_gram_params->ngram_size();
params->max_skip_size = skip_gram_params->max_skip_size();
params->include_all_ngrams = skip_gram_params->include_all_ngrams();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_GATHER: {
return ParseGather(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPARSE_TO_DENSE: {
auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* sparse_to_dense_params =
op->builtin_options_as_SparseToDenseOptions()) {
params->validate_indices = sparse_to_dense_params->validate_indices();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_DELEGATE: {
TF_LITE_REPORT_ERROR(error_reporter,
"DELEGATE op shouldn't exist in model.");
return kTfLiteError;
}
case BuiltinOperator_FAKE_QUANT: {
auto params = safe_allocator.Allocate<TfLiteFakeQuantParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_FakeQuantOptions()) {
params->min = schema_params->min();
params->max = schema_params->max();
params->num_bits = schema_params->num_bits();
params->narrow_range = schema_params->narrow_range();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_ONE_HOT: {
auto params = safe_allocator.Allocate<TfLiteOneHotParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
params->axis = schema_params->axis();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIQUE: {
auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* unique_params = op->builtin_options_as_UniqueOptions();
if (unique_params != nullptr) {
params->index_out_type =
unique_params->idx_out_type() == tflite::TensorType_INT64
? TfLiteType::kTfLiteInt64
: TfLiteType::kTfLiteInt32;
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_REVERSE_SEQUENCE: {
auto params = safe_allocator.Allocate<TfLiteReverseSequenceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* reverse_seq_params =
op->builtin_options_as_ReverseSequenceOptions()) {
params->seq_dim = reverse_seq_params->seq_dim();
params->batch_dim = reverse_seq_params->batch_dim();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_IF: {
auto params = safe_allocator.Allocate<TfLiteIfParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* if_params = op->builtin_options_as_IfOptions()) {
params->then_subgraph_index = if_params->then_subgraph_index();
params->else_subgraph_index = if_params->else_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_WHILE: {
auto params = safe_allocator.Allocate<TfLiteWhileParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
params->cond_subgraph_index = while_params->cond_subgraph_index();
params->body_subgraph_index = while_params->body_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CONV_3D:
case BuiltinOperator_CONV_3D_TRANSPOSE: {
auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
params->padding = ConvertPadding(conv3d_params->padding());
params->activation =
ConvertActivation(conv3d_params->fused_activation_function());
params->stride_depth = conv3d_params->stride_d();
params->stride_height = conv3d_params->stride_h();
params->stride_width = conv3d_params->stride_w();
params->dilation_depth_factor = conv3d_params->dilation_d_factor();
params->dilation_height_factor = conv3d_params->dilation_h_factor();
params->dilation_width_factor = conv3d_params->dilation_w_factor();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_HASHTABLE: {
auto params = safe_allocator.Allocate<TfLiteHashtableParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* hashtable_params =
op->builtin_options_as_HashtableOptions()) {
params->table_id = hashtable_params->table_id();
TF_LITE_ENSURE_STATUS(ConvertTensorType(
hashtable_params->key_dtype(), ¶ms->key_dtype, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(),
¶ms->value_dtype,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_MULTINOMIAL: {
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* multinomial_params =
op->builtin_options_as_RandomOptions()) {
params->seed = multinomial_params->seed();
params->seed2 = multinomial_params->seed2();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_RANDOM_STANDARD_NORMAL: {
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* random_std_normal_params =
op->builtin_options_as_RandomOptions()) {
params->seed = random_std_normal_params->seed();
params->seed2 = random_std_normal_params->seed2();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_BUCKETIZE: {
auto params = safe_allocator.Allocate<TfLiteBucketizeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bucketize_params =
op->builtin_options_as_BucketizeOptions()) {
const flatbuffers::Vector<float>* boundaries =
bucketize_params->boundaries();
if (boundaries == nullptr) {
TF_LITE_REPORT_ERROR(
error_reporter,
"boundaries array not provided for operation 'bucketize'.\n");
return kTfLiteError;
}
params->num_boundaries = boundaries->size();
if (boundaries->data() == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter,
"boundaries.data() returned nullptr for "
"operation 'bucketize'.\n");
return kTfLiteError;
}
params->boundaries = boundaries->data();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_RANDOM_UNIFORM: {
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* random_uniform_params =
op->builtin_options_as_RandomOptions()) {
params->seed = random_uniform_params->seed();
params->seed2 = random_uniform_params->seed2();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_GELU: {
auto params = safe_allocator.Allocate<TfLiteGeluParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) {
params->approximate = gelu_params->approximate();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_STABLEHLO_SCATTER: {
return ParseStablehloScatter(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR: {
return ParseStablehloRngBitGenerator(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_STABLEHLO_GATHER: {
return ParseStablehloGather(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STABLEHLO_REDUCE_WINDOW: {
return ParseStablehloReduceWindow(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_REDUCE_WINDOW: {
auto params = safe_allocator.Allocate<TfLiteReduceWindowParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* reduce_params =
op->builtin_options_2_as_ReduceWindowOptions()) {
switch (reduce_params->reduce_function()) {
case ReduceWindowFunction_ADD:
params->reduce_function = TfLiteReduceWindowFunctionAdd;
break;
case ReduceWindowFunction_MUL:
params->reduce_function = TfLiteReduceWindowFunctionMul;
break;
case ReduceWindowFunction_MINIMUM:
params->reduce_function = TfLiteReduceWindowFunctionMin;
break;
case ReduceWindowFunction_MAXIMUM:
params->reduce_function = TfLiteReduceWindowFunctionMax;
break;
case ReduceWindowFunction_ALL:
params->reduce_function = TfLiteReduceWindowFunctionAll;
break;
case ReduceWindowFunction_ANY:
params->reduce_function = TfLiteReduceWindowFunctionAny;
break;
case ReduceWindowFunction_UNSUPPORTED:
default:
return kTfLiteError;
}
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_STABLEHLO_PAD: {
return ParseStablehloPad(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STABLEHLO_COMPOSITE: {
return ParseStablehloComposite(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_STABLEHLO_SHIFT_LEFT: {
return ParseStablehloShiftLeft(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_STABLEHLO_SLICE:
case BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM:
case BuiltinOperator_STABLEHLO_CONVOLUTION:
case BuiltinOperator_STABLEHLO_LOGISTIC:
case BuiltinOperator_STABLEHLO_ADD:
case BuiltinOperator_STABLEHLO_DIVIDE:
case BuiltinOperator_STABLEHLO_MULTIPLY:
case BuiltinOperator_STABLEHLO_MAXIMUM:
case BuiltinOperator_STABLEHLO_RESHAPE:
case BuiltinOperator_STABLEHLO_CLAMP:
case BuiltinOperator_STABLEHLO_CONCATENATE:
case BuiltinOperator_STABLEHLO_CUSTOM_CALL:
case BuiltinOperator_STABLEHLO_REDUCE:
case BuiltinOperator_STABLEHLO_ABS:
case BuiltinOperator_STABLEHLO_AND:
case BuiltinOperator_STABLEHLO_COSINE:
case BuiltinOperator_STABLEHLO_EXPONENTIAL:
case BuiltinOperator_STABLEHLO_FLOOR:
case BuiltinOperator_STABLEHLO_LOG:
case BuiltinOperator_STABLEHLO_MINIMUM:
case BuiltinOperator_STABLEHLO_NEGATE:
case BuiltinOperator_STABLEHLO_OR:
case BuiltinOperator_STABLEHLO_POWER:
case BuiltinOperator_STABLEHLO_REMAINDER:
case BuiltinOperator_STABLEHLO_RSQRT:
case BuiltinOperator_STABLEHLO_SELECT:
case BuiltinOperator_STABLEHLO_SUBTRACT:
case BuiltinOperator_STABLEHLO_TANH:
case BuiltinOperator_STABLEHLO_DYNAMIC_SLICE:
case BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE:
case BuiltinOperator_STABLEHLO_IOTA:
case BuiltinOperator_STABLEHLO_COMPARE:
case BuiltinOperator_STABLEHLO_CONVERT:
case BuiltinOperator_STABLEHLO_DOT_GENERAL:
case BuiltinOperator_STABLEHLO_SORT:
case BuiltinOperator_STABLEHLO_WHILE:
case BuiltinOperator_STABLEHLO_TRANSPOSE:
case BuiltinOperator_STABLEHLO_CBRT:
case BuiltinOperator_CALL:
case BuiltinOperator_COMPLEX_ABS:
case BuiltinOperator_CONCAT_EMBEDDINGS:
case BuiltinOperator_COS:
case BuiltinOperator_CUSTOM:
case BuiltinOperator_DENSIFY:
case BuiltinOperator_DYNAMIC_UPDATE_SLICE:
case BuiltinOperator_EQUAL:
case BuiltinOperator_HASHTABLE_FIND:
case BuiltinOperator_HASHTABLE_IMPORT:
case BuiltinOperator_HASHTABLE_SIZE:
case BuiltinOperator_IMAG:
case BuiltinOperator_MATRIX_DIAG:
case BuiltinOperator_MATRIX_SET_DIAG:
case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
case BuiltinOperator_RELU_N1_TO_1:
case BuiltinOperator_RELU_0_TO_1:
case BuiltinOperator_SCATTER_ND:
case BuiltinOperator_SELECT:
case BuiltinOperator_SLICE:
case BuiltinOperator_TILE:
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_RANGE:
case BuiltinOperator_RANK:
case BuiltinOperator_REAL:
case BuiltinOperator_RFFT2D:
case BuiltinOperator_SEGMENT_SUM:
case BuiltinOperator_REVERSE_V2:
case BuiltinOperator_UNSORTED_SEGMENT_MAX:
case BuiltinOperator_UNSORTED_SEGMENT_MIN:
case BuiltinOperator_UNSORTED_SEGMENT_PROD:
case BuiltinOperator_UNSORTED_SEGMENT_SUM:
case BuiltinOperator_ATAN2:
case BuiltinOperator_SIGN:
case BuiltinOperator_BITCAST:
case BuiltinOperator_WHERE:
case BuiltinOperator_DILATE:
return kTfLiteOk;
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
return kTfLiteError;
}
return kTfLiteError;
}
#endif
}
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
ErrorReporter* error_reporter) {
switch (tensor_type) {
case TensorType_FLOAT16:
*type = kTfLiteFloat16;
return kTfLiteOk;
case TensorType_BFLOAT16:
*type = kTfLiteBFloat16;
return kTfLiteOk;
case TensorType_FLOAT32:
*type = kTfLiteFloat32;
return kTfLiteOk;
case TensorType_FLOAT64:
*type = kTfLiteFloat64;
return kTfLiteOk;
case TensorType_INT16:
*type = kTfLiteInt16;
return kTfLiteOk;
case TensorType_UINT16:
*type = kTfLiteUInt16;
return kTfLiteOk;
case TensorType_INT32:
*type = kTfLiteInt32;
return kTfLiteOk;
case TensorType_UINT32:
*type = kTfLiteUInt32;
return kTfLiteOk;
case TensorType_UINT8:
*type = kTfLiteUInt8;
return kTfLiteOk;
case TensorType_INT8:
*type = kTfLiteInt8;
return kTfLiteOk;
case TensorType_INT64:
*type = kTfLiteInt64;
return kTfLiteOk;
case TensorType_UINT64:
*type = kTfLiteUInt64;
return kTfLiteOk;
case TensorType_STRING:
*type = kTfLiteString;
return kTfLiteOk;
case TensorType_BOOL:
*type = kTfLiteBool;
return kTfLiteOk;
case TensorType_COMPLEX64:
*type = kTfLiteComplex64;
return kTfLiteOk;
case TensorType_COMPLEX128:
*type = kTfLiteComplex128;
return kTfLiteOk;
case TensorType_RESOURCE:
*type = kTfLiteResource;
return kTfLiteOk;
case TensorType_VARIANT:
*type = kTfLiteVariant;
return kTfLiteOk;
case TensorType_INT4:
*type = kTfLiteInt4;
return kTfLiteOk;
default:
*type = kTfLiteNoType;
TF_LITE_REPORT_ERROR(error_reporter,
"Unsupported data type %d in tensor\n", tensor_type);
return kTfLiteError;
}
}
TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteAddParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const AddOptions* schema_params = op->builtin_options_as_AddOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
return kTfLiteOk;
}
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteArgMaxParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteArgMaxParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->output_type(), ¶ms->output_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteArgMinParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteArgMinParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->output_type(), ¶ms->output_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {
params->adj_x = bmm_params->adj_x();
params->adj_y = bmm_params->adj_y();
params->asymmetric_quantize_inputs =
bmm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBroadcastArgs(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBroadcastTo(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteCallOnceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteCallOnceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const CallOnceOptions* schema_params =
op->builtin_options_as_CallOnceOptions();
if (schema_params != nullptr) {
params->init_subgraph_index = schema_params->init_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCastParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->in_data_type(), ¶ms->in_data_type, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
¶ms->out_data_type,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseConcatenation(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteConcatenationParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteConcatenationParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ConcatenationOptions* schema_params =
op->builtin_options_as_ConcatenationOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->axis = schema_params->axis();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
¶ms->quantized_bias_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
params->exclusive = cumsum_params->exclusive();
params->reverse = cumsum_params->reverse();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseDepthToSpace(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteDepthToSpaceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteDepthwiseConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const DepthwiseConv2DOptions* schema_params =
op->builtin_options_as_DepthwiseConv2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->depth_multiplier = schema_params->depth_multiplier();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteDivParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseEmbeddingLookup(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFullyConnected(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteFullyConnectedParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const FullyConnectedOptions* schema_params =
op->builtin_options_as_FullyConnectedOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->keep_num_dims = schema_params->keep_num_dims();
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();
TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
¶ms->quantized_bias_type, error_reporter));
switch (schema_params->weights_format()) {
case FullyConnectedOptionsWeightsFormat_DEFAULT:
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
break;
case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
params->weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
break;
default:
TF_LITE_REPORT_ERROR(error_reporter,
"Unhandled fully-connected weights format.");
return kTfLiteError;
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteGatherParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
params->axis = 0;
params->batch_dims = 0;
if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
params->axis = gather_params->axis();
params->batch_dims = gather_params->batch_dims();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteIfParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteIfParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const IfOptions* schema_params = op->builtin_options_as_IfOptions();
if (schema_params != nullptr) {
params->then_subgraph_index = schema_params->then_subgraph_index();
params->else_subgraph_index = schema_params->else_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseL2Normalization(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteL2NormParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteL2NormParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* leaky_relu_params =
op->builtin_options_as_LeakyReluOptions()) {
params->alpha = leaky_relu_params->alpha();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
params->activation =
ConvertActivation(lstm_params->fused_activation_function());
params->cell_clip = lstm_params->cell_clip();
params->proj_clip = lstm_params->proj_clip();
switch (lstm_params->kernel_type()) {
case LSTMKernelType_FULL:
params->kernel_type = kTfLiteLSTMFullKernel;
break;
case LSTMKernelType_BASIC:
params->kernel_type = kTfLiteLSTMBasicKernel;
break;
default:
TF_LITE_REPORT_ERROR(error_reporter, "Unhandled LSTM kernel type: %d",
lstm_params->kernel_type());
return kTfLiteError;
}
params->asymmetric_quantize_inputs =
lstm_params->asymmetric_quantize_inputs();
} else {
TF_LITE_REPORT_ERROR(error_reporter, "No valid LSTM builtin options exist");
return kTfLiteError;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteMirrorPaddingParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const MirrorPadOptions* schema_params =
op->builtin_options_as_MirrorPadOptions();
if (schema_params != nullptr) {
params->mode = ConvertMirrorPadding(schema_params->mode());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const MulOptions* schema_params = op->builtin_options_as_MulOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLitePackParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLitePackParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const PackOptions* schema_params = op->builtin_options_as_PackOptions();
if (schema_params != nullptr) {
params->values_count = schema_params->values_count();
params->axis = schema_params->axis();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLitePoolParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLitePoolParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->filter_width = schema_params->filter_width();
params->filter_height = schema_params->filter_height();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteReducerParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteReducerParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
if (schema_params != nullptr) {
params->keep_dims = schema_params->keep_dims();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteReshapeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteReshapeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
if (schema_params != nullptr) {
const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
if (new_shape != nullptr) {
TF_LITE_ENSURE_STATUS(
FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
params->shape, error_reporter, "reshape"));
params->num_dimensions = new_shape->size();
} else {
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseResizeBilinear(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteResizeBilinearParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteResizeBilinearParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ResizeBilinearOptions* schema_params =
op->builtin_options_as_ResizeBilinearOptions();
if (schema_params != nullptr) {
params->align_corners = schema_params->align_corners();
params->half_pixel_centers = schema_params->half_pixel_centers();
} else {
params->align_corners = false;
params->half_pixel_centers = false;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteResizeNearestNeighborParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ResizeNearestNeighborOptions* schema_params =
op->builtin_options_as_ResizeNearestNeighborOptions();
if (schema_params != nullptr) {
params->align_corners = schema_params->align_corners();
params->half_pixel_centers = schema_params->half_pixel_centers();
} else {
params->align_corners = false;
params->half_pixel_centers = false;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloReduceWindow(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteStablehloReduceWindowParams>();
const StablehloReduceWindowOptions* schema_params =
op->builtin_options_2_as_StablehloReduceWindowOptions();
if (schema_params) {
if (!schema_params->window_dimensions() ||
schema_params->window_dimensions()->size() == 0) {
TF_LITE_REPORT_ERROR(error_reporter,
"'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty.");
return kTfLiteError;
}
const size_t rank = schema_params->window_dimensions()->size();
auto LoadAttr = [&error_reporter](
int64_t* params_array, size_t params_array_size_bytes,
const flatbuffers::Vector<int64_t>* flatbuffer_vector,
const char* attr_name, const size_t expected_size,
const int64_t fill_value) -> TfLiteStatus {
if (flatbuffer_vector && flatbuffer_vector->size()) {
if (expected_size != 0 && flatbuffer_vector->size() != expected_size) {
TF_LITE_REPORT_ERROR(
error_reporter,
"'%s' attribute of 'stablehlo.reduce_window' does not have the "
"expected size (%llu != %llu).",
attr_name, flatbuffer_vector->size(), expected_size);
return kTfLiteError;
}
TfLiteStatus status = FlatBufferIntVectorToArray(
params_array_size_bytes, flatbuffer_vector, params_array,
error_reporter, "stablehlo.reduce_window");
if (status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.",
attr_name);
return status;
}
} else {
std::fill_n(params_array, params_array_size_bytes / sizeof(int64_t),
fill_value);
}
return kTfLiteOk;
};
TF_LITE_ENSURE_STATUS(
LoadAttr(params->window_dimensions, sizeof(params->window_dimensions),
schema_params->window_dimensions(), "window_dimensions",
rank, 1));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->window_strides, sizeof(params->window_strides),
schema_params->window_strides(), "window_strides",
rank, 1));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->base_dilations, sizeof(params->base_dilations),
schema_params->base_dilations(), "base_dilations",
rank, 1));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->window_dilations, sizeof(params->window_dilations),
schema_params->window_dilations(), "window_dilations",
rank, 1));
TF_LITE_ENSURE_STATUS(LoadAttr(params->padding, sizeof(params->padding),
schema_params->padding(), "padding",
2 * rank,
0));
params->body_subgraph_index = schema_params->body_subgraph_index();
*builtin_data = params.release();
return kTfLiteOk;
}
TF_LITE_REPORT_ERROR(
error_reporter,
"Could not get 'stablehlo.reduce_window' operation parameters.");
return kTfLiteError;
}
TfLiteStatus ParseStablehloScatter(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStablehloScatterParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStablehloScatterParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StablehloScatterOptions* schema_params =
op->builtin_options_2_as_StablehloScatterOptions();
if (schema_params) {
params->indices_are_sorted = schema_params->indices_are_sorted();
if (schema_params->update_window_dims()) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->update_window_dims()->size() * sizeof(int64_t),
schema_params->update_window_dims(), params->update_window_dims,
error_reporter, "stablehlo_scatter"));
params->num_update_window_dims =
schema_params->update_window_dims()->size();
}
if (schema_params->inserted_window_dims()) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->inserted_window_dims()->size() * sizeof(int64_t),
schema_params->inserted_window_dims(), params->inserted_window_dims,
error_reporter, "stablehlo_scatter"));
params->num_inserted_window_dims =
schema_params->inserted_window_dims()->size();
}
if (schema_params->scatter_dims_to_operand_dims()) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->scatter_dims_to_operand_dims()->size() *
sizeof(int64_t),
schema_params->scatter_dims_to_operand_dims(),
params->scatter_dims_to_operand_dims, error_reporter,
"stablehlo_scatter"));
params->num_scatter_dims_to_operand_dims =
schema_params->scatter_dims_to_operand_dims()->size();
}
params->index_vector_dim = schema_params->index_vector_dim();
params->unique_indices = schema_params->unique_indices();
params->update_computation_subgraph_index =
schema_params->update_computation_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloRngBitGenerator(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStablehloRngBitGeneratorParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStablehloRngBitGeneratorParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StablehloRngBitGeneratorOptions* schema_params =
op->builtin_options_2_as_StablehloRngBitGeneratorOptions();
if (schema_params != nullptr) {
params->algorithm = ConvertRngAlgorithm(schema_params->algorithm());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloGather(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStablehloGatherParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStablehloGatherParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StablehloGatherOptions* schema_params =
op->builtin_options_2_as_StablehloGatherOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->offset_dims()->size() *
sizeof(int64_t),
schema_params->offset_dims(),
params->offset_dims, error_reporter,
"stablehlo_gather"));
params->num_offset_dims = schema_params->offset_dims()->size();
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->collapsed_slice_dims()->size() * sizeof(int64_t),
schema_params->collapsed_slice_dims(), params->collapsed_slice_dims,
error_reporter, "stablehlo_gather"));
params->num_collapsed_slice_dims =
schema_params->collapsed_slice_dims()->size();
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->start_index_map()->size() * sizeof(int64_t),
schema_params->start_index_map(), params->start_index_map,
error_reporter, "stablehlo_gather"));
params->num_start_index_map = schema_params->start_index_map()->size();
params->index_vector_dim = schema_params->index_vector_dim();
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->slice_sizes()->size() * sizeof(int64_t),
schema_params->slice_sizes(), params->slice_sizes, error_reporter,
"stablehlo_gather"));
params->num_slice_sizes = schema_params->slice_sizes()->size();
params->indices_are_sorted = schema_params->indices_are_sorted();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloPad(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteStablehloPadParams>();
const StablehloPadOptions* schema_params =
op->builtin_options_2_as_StablehloPadOptions();
if (schema_params) {
auto LoadAttr =
[&error_reporter](
int64_t* params_array, const size_t params_array_size_bytes,
const flatbuffers::Vector<int64_t>* const flatbuffer_vector,
const char* const attr_name) -> TfLiteStatus {
TfLiteStatus status = FlatBufferIntVectorToArray(
params_array_size_bytes, flatbuffer_vector, params_array,
error_reporter, "stablehlo.pad");
if (status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.",
attr_name);
}
return status;
};
TF_LITE_ENSURE_STATUS(
LoadAttr(params->edge_padding_low, sizeof(params->edge_padding_low),
schema_params->edge_padding_low(), "edge_padding_low"));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->edge_padding_high, sizeof(params->edge_padding_high),
schema_params->edge_padding_high(), "edge_padding_high"));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->interior_padding, sizeof(params->interior_padding),
schema_params->interior_padding(), "interior_padding"));
if (schema_params->edge_padding_low()->size() !=
schema_params->edge_padding_high()->size() ||
schema_params->edge_padding_low()->size() !=
schema_params->interior_padding()->size()) {
TF_LITE_REPORT_ERROR(error_reporter,
"'stablehlo.pad' operation parameter array sizes "
"are not consistent.");
return kTfLiteError;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TF_LITE_REPORT_ERROR(error_reporter,
"Could not get 'stablehlo.pad' operation parameters.");
return kTfLiteError;
}
TfLiteStatus ParseStablehloComposite(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteStablehloCompositeParams>();
const StableHLOCompositeOptions* schema_params =
op->builtin_options_2_as_StableHLOCompositeOptions();
if (schema_params) {
params->name = schema_params->name()->c_str();
params->version = schema_params->version();
params->subgraph_index = schema_params->decomposition_subgraph_index();
params->attributes = schema_params->composite_attributes()->data();
params->attributes_size = schema_params->composite_attributes()->size();
*builtin_data = params.release();
return kTfLiteOk;
}
TF_LITE_REPORT_ERROR(
error_reporter,
"Could not get 'stablehlo.composite' operation parameters.");
return kTfLiteError;
}
TfLiteStatus ParseStablehloShiftLeft(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
return kTfLiteOk;
}
TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSelectV2(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteShapeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteShapeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),
¶ms->out_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSoftmaxParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
if (schema_params != nullptr) {
params->beta = schema_params->beta();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSpaceToDepth(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSpaceToDepthParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSplitParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSplitParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
if (schema_params != nullptr) {
params->num_splits = schema_params->num_splits();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSplitVParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSplitVParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();
if (schema_params != nullptr) {
params->num_splits = schema_params->num_splits();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params =
safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* seq_lstm_params =
op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
params->activation =
ConvertActivation(seq_lstm_params->fused_activation_function());
params->cell_clip = seq_lstm_params->cell_clip();
params->proj_clip = seq_lstm_params->proj_clip();
params->time_major = seq_lstm_params->time_major();
params->asymmetric_quantize_inputs =
seq_lstm_params->asymmetric_quantize_inputs();
params->diagonal_recurrent_tensors =
seq_lstm_params->diagonal_recurrent_tensors();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSqueezeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSqueezeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions();
if (schema_params != nullptr) {
const auto* squeeze_dims = schema_params->squeeze_dims();
if (squeeze_dims != nullptr) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
error_reporter, "squeeze"));
params->num_squeeze_dims = squeeze_dims->size();
} else {
params->num_squeeze_dims = 0;
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseStridedSlice(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStridedSliceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StridedSliceOptions* schema_params =
op->builtin_options_as_StridedSliceOptions();
if (schema_params != nullptr) {
params->begin_mask = schema_params->begin_mask();
params->end_mask = schema_params->end_mask();
params->ellipsis_mask = schema_params->ellipsis_mask();
params->new_axis_mask = schema_params->new_axis_mask();
params->shrink_axis_mask = schema_params->shrink_axis_mask();
params->offset = schema_params->offset();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSubParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SubOptions* schema_params = op->builtin_options_as_SubOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSVDFParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSVDFParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
if (schema_params != nullptr) {
params->rank = schema_params->rank();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseTransposeConv(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteTransposeConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const TransposeConvOptions* transpose_conv_params =
op->builtin_options_as_TransposeConvOptions();
if (transpose_conv_params != nullptr) {
params->padding = ConvertPadding(transpose_conv_params->padding());
params->stride_width = transpose_conv_params->stride_w();
params->stride_height = transpose_conv_params->stride_h();
params->activation =
ConvertActivation(transpose_conv_params->fused_activation_function());
TF_LITE_ENSURE_STATUS(
ConvertTensorType(transpose_conv_params->quantized_bias_type(),
¶ms->quantized_bias_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteUnpackParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteUnpackParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
if (schema_params != nullptr) {
params->num = schema_params->num();
params->axis = schema_params->axis();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteVarHandleParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteVarHandleParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const VarHandleOptions* schema_params =
op->builtin_options_as_VarHandleOptions();
if (schema_params != nullptr) {
if (schema_params->container()) {
params->container = schema_params->container()->c_str();
}
if (schema_params->shared_name()) {
params->shared_name = schema_params->shared_name()->c_str();
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteWhileParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteWhileParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const WhileOptions* schema_params = op->builtin_options_as_WhileOptions();
if (schema_params != nullptr) {
params->cond_subgraph_index = schema_params->cond_subgraph_index();
params->body_subgraph_index = schema_params->body_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBitwiseXor(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseRightShift(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
#ifdef TF_LITE_STATIC_MEMORY
TF_LITE_REPORT_ERROR(
error_reporter,
"ParseOpData is unsupported on TfLiteMicro, please use the operator "
"specific parse functions (e.g. ParseAdd etc.).\n");
return kTfLiteError;
#else
return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
builtin_data);
#endif
}
} | #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include <cstdarg>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
using testing::AllOf;
using testing::Each;
using testing::ElementsAre;
using testing::Eq;
using testing::HasSubstr;
using testing::StrEq;
namespace tflite {
namespace {
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ += vsnprintf(buffer_ + buffer_size_,
kBufferSize - buffer_size_, format, args);
return buffer_size_;
}
const char* GetBuffer() const { return buffer_; }
int GetBufferSize() const { return buffer_size_; }
bool IsEmpty() const { return !buffer_size_; }
string GetString() const { return string(buffer_, buffer_size_); }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
class MockDataAllocator : public BuiltinDataAllocator {
public:
MockDataAllocator() : is_allocated_(false) {}
void* Allocate(size_t size, size_t alignment_hint) override {
EXPECT_FALSE(is_allocated_);
const int max_size = kBufferSize;
EXPECT_LE(size, max_size);
is_allocated_ = true;
return buffer_;
}
void Deallocate(void* data) override { is_allocated_ = false; }
private:
static constexpr int kBufferSize = 1024;
char buffer_[kBufferSize];
bool is_allocated_;
};
}
class FlatbufferConversionsTest : public ::testing::Test {
public:
const Operator* BuildTestOperator(BuiltinOptions op_type,
flatbuffers::Offset<void> options) {
flatbuffers::Offset<Operator> offset =
CreateOperatorDirect(builder_, 0, nullptr, nullptr, op_type, options,
nullptr, CustomOptionsFormat_FLEXBUFFERS, nullptr);
builder_.Finish(offset);
void* pointer = builder_.GetBufferPointer();
return flatbuffers::GetRoot<Operator>(pointer);
}
const Operator* BuildTestOperator(BuiltinOptions2 op_type,
flatbuffers::Offset<void> options) {
flatbuffers::Offset<Operator> offset = CreateOperatorDirect(
builder_, 0, nullptr, nullptr,
tflite::BuiltinOptions_NONE,
0, nullptr,
tflite::CustomOptionsFormat_FLEXBUFFERS,
nullptr, nullptr,
0, 0,
op_type,
options);
builder_.Finish(offset);
void* pointer = builder_.GetBufferPointer();
return flatbuffers::GetRoot<Operator>(pointer);
}
protected:
MockErrorReporter mock_reporter_;
MockDataAllocator mock_allocator_;
flatbuffers::FlatBufferBuilder builder_;
};
TEST_F(FlatbufferConversionsTest, ParseSqueezeAll) {
const Operator* op = BuildTestOperator(
BuiltinOptions_SqueezeOptions, CreateSqueezeOptions(builder_).Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_SQUEEZE, &mock_reporter_,
&mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, ParseDynamicReshape) {
const Operator* op = BuildTestOperator(
BuiltinOptions_ReshapeOptions, CreateReshapeOptions(builder_).Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_RESHAPE, &mock_reporter_,
&mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, TestParseOpDataConv) {
const Operator* conv_op =
BuildTestOperator(BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(builder_, Padding_SAME, 1, 2,
ActivationFunctionType_RELU, 3, 4)
.Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk,
ParseOpData(conv_op, BuiltinOperator_CONV_2D, &mock_reporter_,
&mock_allocator_, &output_data));
EXPECT_NE(nullptr, output_data);
TfLiteConvParams* params = reinterpret_cast<TfLiteConvParams*>(output_data);
EXPECT_EQ(kTfLitePaddingSame, params->padding);
EXPECT_EQ(1, params->stride_width);
EXPECT_EQ(2, params->stride_height);
EXPECT_EQ(kTfLiteActRelu, params->activation);
EXPECT_EQ(3, params->dilation_width_factor);
EXPECT_EQ(4, params->dilation_height_factor);
}
TEST_F(FlatbufferConversionsTest, ParseBadFullyConnected) {
const Operator* conv_op = BuildTestOperator(
BuiltinOptions_FullyConnectedOptions,
CreateFullyConnectedOptions(
builder_, ActivationFunctionType_RELU,
static_cast<FullyConnectedOptionsWeightsFormat>(-1), true)
.Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteError,
ParseOpData(conv_op, BuiltinOperator_FULLY_CONNECTED,
&mock_reporter_, &mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, TestParseOpDataCustom) {
const Operator* custom_op =
BuildTestOperator(BuiltinOptions_NONE, flatbuffers::Offset<void>());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk,
ParseOpData(custom_op, BuiltinOperator_CUSTOM, &mock_reporter_,
&mock_allocator_, &output_data));
EXPECT_EQ(nullptr, output_data);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorType) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_FLOAT32, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteFloat32, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeFloat16) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_FLOAT16, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteFloat16, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeBFloat16) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_BFLOAT16, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteBFloat16, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeInt4) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_INT4, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteInt4, type);
}
class StablehloReduceWindowFlatbufferConversionsTest
: public FlatbufferConversionsTest {
public:
static constexpr int kMaxDims =
TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT;
static constexpr int64_t kValidValue = 5;
auto ValidAttr() {
return builder_.CreateVector(std::vector<int64_t>(kMaxDims, kValidValue));
}
auto InvalidAttr() {
return builder_.CreateVector(
std::vector<int64_t>(kMaxDims + 1, kValidValue));
}
auto ValidPaddingAttr() {
return builder_.CreateVector(
std::vector<int64_t>(2 * kMaxDims, kValidValue));
}
auto InvalidPaddingAttr() {
return builder_.CreateVector(
std::vector<int64_t>(2 * kMaxDims + 1, kValidValue));
}
auto EmptyAttr() { return builder_.CreateVector<int64_t>({}); }
};
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, Succeeds) {
const Operator* stablehlo_reduce_window_op = BuildTestOperator(
BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
builder_.CreateVector<int64_t>({1, 2}),
builder_.CreateVector<int64_t>({3, 4}),
builder_.CreateVector<int64_t>({5, 6}),
builder_.CreateVector<int64_t>({7, 8}),
builder_.CreateVector<int64_t>({9, 10, 11, 12}),
13)
.Union());
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, 2),
ElementsAre(1, 2));
EXPECT_THAT(std::make_tuple(output_data->window_strides, 2),
ElementsAre(3, 4));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, 2),
ElementsAre(5, 6));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, 2),
ElementsAre(7, 8));
EXPECT_THAT(std::make_tuple(output_data->padding, 4),
ElementsAre(9, 10, 11, 12));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWithNoWindowDimensions) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
0,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoWindowStrides) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
0,
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoBaseDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
0,
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoWindowDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
0,
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(1));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, SucceedsWithNoPadding) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
0,
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWithEmptyWindowDimensions) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
EmptyAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyWindowStrides) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
EmptyAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyBaseDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
EmptyAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyWindowDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
EmptyAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(1));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyPadding) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
EmptyAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithParamsAtMaxDims) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowDimensionsHasMoreThanMaxDims) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
InvalidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
AllOf(HasSubstr("Found too many dimensions in the input array of "
"operation 'stablehlo.reduce_window'."),
HasSubstr("Check the 'window_dimensions' attribute.")));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowStridesHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
InvalidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr("'window_strides' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenBaseDilationsHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
InvalidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr("'base_dilations' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowDilationsHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
InvalidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr(
"'window_dilations' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenPaddingHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
InvalidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'padding' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, FailsWithWrongOptions) {
const Operator* stablehlo_reduce_window_op =
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions, 0);
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr(
"Could not get 'stablehlo.reduce_window' operation parameters."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, DeathTests) {
const Operator* stablehlo_reduce_window_op = BuildTestOperator(
BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_, ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(), 13)
.Union());
TfLiteStablehloReduceWindowParams* output_data = nullptr;
#ifdef NDEBUG
GTEST_SKIP();
#endif
EXPECT_DEATH(
ParseOpData(nullptr, BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, nullptr,
&mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
&mock_reporter_, nullptr, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
&mock_reporter_, &mock_allocator_, nullptr),
"");
}
class StablehloPadFlatbufferConversionsTest : public FlatbufferConversionsTest {
public:
static constexpr int kMaxDims =
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT;
static constexpr int64_t kValidValue = 5;
};
TEST_F(StablehloPadFlatbufferConversionsTest, Succeeds) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
builder_.CreateVector<int64_t>({2, 0, -2}),
builder_.CreateVector<int64_t>({3, 0, 3}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(std::make_tuple(output_data->edge_padding_low, 3),
ElementsAre(1, 0, -1));
EXPECT_THAT(std::make_tuple(output_data->edge_padding_high, 3),
ElementsAre(2, 0, -2));
EXPECT_THAT(std::make_tuple(output_data->interior_padding, 3),
ElementsAre(3, 0, 3));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingLowPadding) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
0,
builder_.CreateVector<int64_t>({2, 0, -2}),
builder_.CreateVector<int64_t>({3, 0, 3}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
AllOf(
HasSubstr("Input array not provided for operation 'stablehlo.pad'."),
HasSubstr("Check the 'edge_padding_low' attribute.")));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingHighPadding) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
0,
builder_.CreateVector<int64_t>({3, 0, 3}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
AllOf(
HasSubstr("Input array not provided for operation 'stablehlo.pad'."),
HasSubstr("Check the 'edge_padding_high' attribute.")));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingInteriorPadding) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
builder_.CreateVector<int64_t>({2, 0, -2}),
0)
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
AllOf(
HasSubstr("Input array not provided for operation 'stablehlo.pad'."),
HasSubstr("Check the 'interior_padding' attribute.")));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsInconsistentSizes) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
builder_.CreateVector<int64_t>({2, 0, -2}),
builder_.CreateVector<int64_t>({3, 0, -3, 5}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'stablehlo.pad' operation parameter array sizes are "
"not consistent."));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithWrongOptions) {
const Operator* stablehlo_pad_op = BuildTestOperator(BuiltinOptions_NONE, 0);
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("Could not get 'stablehlo.pad' operation parameters."));
}
TEST_F(StablehloPadFlatbufferConversionsTest, DeathTests) {
const Operator* stablehlo_pad_op = BuildTestOperator(BuiltinOptions_NONE, 0);
TfLiteStablehloPadParams* output_data = nullptr;
#ifdef NDEBUG
GTEST_SKIP();
#endif
EXPECT_DEATH(
ParseOpData(nullptr, BuiltinOperator_STABLEHLO_PAD, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
nullptr, &mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, nullptr, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, nullptr),
"");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/flatbuffer_conversions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/flatbuffer_conversions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03aad0e0-543f-4086-af9c-38f3b63304f7 | cpp | tensorflow/tensorflow | op_resolver | tensorflow/lite/core/api/op_resolver.cc | tensorflow/lite/core/api/op_resolver_test.cc | #include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
TfLiteStatus GetRegistrationFromOpCode(
const OperatorCode* opcode, const OpResolver& op_resolver,
ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
TfLiteStatus status = kTfLiteOk;
*registration = nullptr;
auto builtin_code = GetBuiltinCode(opcode);
int version = opcode->version();
if (builtin_code > BuiltinOperator_MAX) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Op builtin_code out of range: %d. Are you using old TFLite binary "
"with newer model?",
builtin_code);
status = kTfLiteError;
} else if (builtin_code != BuiltinOperator_CUSTOM) {
*registration = op_resolver.FindOp(builtin_code, version);
if (*registration == nullptr) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Didn't find op for builtin opcode '%s' version '%d'. "
"An older version of this builtin might be supported. "
"Are you using an old TFLite binary with a newer model?\n",
EnumNameBuiltinOperator(builtin_code), version);
status = kTfLiteError;
}
} else if (!opcode->custom_code()) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Operator with CUSTOM builtin_code has no custom_code.\n");
status = kTfLiteError;
} else {
const char* name = opcode->custom_code()->c_str();
*registration = op_resolver.FindOp(name, version);
if (*registration == nullptr) {
status = kTfLiteError;
}
}
return status;
}
} | #include "tensorflow/lite/core/api/op_resolver.h"
#include <cstring>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
void* MockInit(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void MockFree(TfLiteContext* context, void* buffer) {
}
TfLiteStatus MockPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus MockInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
class MockOpResolver : public OpResolver {
public:
const TfLiteRegistration* FindOp(BuiltinOperator op,
int version) const override {
if (op == BuiltinOperator_CONV_2D) {
static TfLiteRegistration r = {MockInit, MockFree, MockPrepare,
MockInvoke};
return &r;
} else {
return nullptr;
}
}
const TfLiteRegistration* FindOp(const char* op, int version) const override {
if (strcmp(op, "mock_custom") == 0) {
static TfLiteRegistration r = {MockInit, MockFree, MockPrepare,
MockInvoke};
return &r;
} else {
return nullptr;
}
}
};
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ = vsnprintf(buffer_, kBufferSize, format, args);
return buffer_size_;
}
char* GetBuffer() { return buffer_; }
int GetBufferSize() { return buffer_size_; }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
}
TEST(OpResolver, TestResolver) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
const TfLiteRegistration* registration =
resolver->FindOp(BuiltinOperator_CONV_2D, 0);
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
registration = resolver->FindOp(BuiltinOperator_CAST, 0);
EXPECT_EQ(nullptr, registration);
registration = resolver->FindOp("mock_custom", 0);
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
registration = resolver->FindOp("nonexistent_custom", 0);
EXPECT_EQ(nullptr, registration);
}
TEST(OpResolver, TestGetRegistrationFromOpCodeConv) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset =
CreateOperatorCodeDirect(builder, BuiltinOperator_CONV_2D, nullptr, 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteOk, GetRegistrationFromOpCode(conv_code, *resolver, reporter,
®istration));
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeCast) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset =
CreateOperatorCodeDirect(builder, BuiltinOperator_CAST, nullptr, 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteError, GetRegistrationFromOpCode(conv_code, *resolver,
reporter, ®istration));
EXPECT_EQ(nullptr, registration);
EXPECT_NE(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeCustom) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset = CreateOperatorCodeDirect(
builder, BuiltinOperator_CUSTOM, "mock_custom", 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteOk, GetRegistrationFromOpCode(conv_code, *resolver, reporter,
®istration));
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeNonexistentCustom) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset = CreateOperatorCodeDirect(
builder, BuiltinOperator_CUSTOM, "nonexistent_custom", 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteError, GetRegistrationFromOpCode(conv_code, *resolver,
reporter, ®istration));
EXPECT_EQ(nullptr, registration);
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f090185f-76b0-4a65-9cb4-8dd42979e2c4 | cpp | tensorflow/tensorflow | operator | tensorflow/lite/toco/tflite/operator.cc | tensorflow/lite/toco/tflite/operator_test.cc | #include "tensorflow/lite/toco/tflite/operator.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/graph_transformations/lstm_utils.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tflite/builtin_operator.h"
#include "tensorflow/lite/toco/tflite/custom_operator.h"
#include "tensorflow/lite/toco/tflite/simple_operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
#include "tensorflow/lite/tools/versioning/op_version.h"
namespace toco {
namespace tflite {
TfLiteType GetTensorType(const ArrayDataType type) {
const std::map<ArrayDataType, TfLiteType> tensor_type_map = {
{ArrayDataType::kBool, kTfLiteBool},
{ArrayDataType::kFloat, kTfLiteFloat32},
{ArrayDataType::kInt8, kTfLiteInt8},
{ArrayDataType::kUint8, kTfLiteUInt8},
{ArrayDataType::kInt16, kTfLiteInt16},
{ArrayDataType::kUint16, kTfLiteUInt16},
{ArrayDataType::kInt32, kTfLiteInt32},
{ArrayDataType::kUint32, kTfLiteUInt32},
{ArrayDataType::kInt64, kTfLiteInt64},
{ArrayDataType::kUint64, kTfLiteUInt64},
{ArrayDataType::kString, kTfLiteString},
{ArrayDataType::kComplex64, kTfLiteComplex64},
{ArrayDataType::kComplex128, kTfLiteComplex128},
{ArrayDataType::kFloat16, kTfLiteFloat16},
{ArrayDataType::kFloat64, kTfLiteFloat64}};
auto it = tensor_type_map.find(type);
if (it != tensor_type_map.end()) {
return it->second;
}
return kTfLiteNoType;
}
::tflite::OpSignature GetVersioningOpSig(
const ::tflite::BuiltinOperator op, const OperatorSignature& op_signature) {
std::vector<::tflite::OpSignatureTensorSpec> inputs, outputs;
for (const auto& input_name : op_signature.op->inputs) {
::tflite::OpSignatureTensorSpec tensor = {kTfLiteNoType};
if (op_signature.model->HasArray(input_name)) {
const Array& input_array = op_signature.model->GetArray(input_name);
tensor.type = GetTensorType(input_array.data_type);
if (input_array.has_shape()) {
tensor.dims = input_array.shape().dims();
}
}
inputs.push_back(tensor);
}
for (const auto& output_name : op_signature.op->outputs) {
::tflite::OpSignatureTensorSpec tensor = {kTfLiteNoType};
if (op_signature.model->HasArray(output_name)) {
const Array& output_array = op_signature.model->GetArray(output_name);
tensor.type = GetTensorType(output_array.data_type);
if (output_array.has_shape()) {
tensor.dims = output_array.shape().dims();
}
}
outputs.push_back(tensor);
}
return ::tflite::OpSignature{op, inputs, outputs};
}
class AveragePool
: public BuiltinOperator<AveragePoolOperator, ::tflite::Pool2DOptions,
::tflite::BuiltinOptions_Pool2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreatePool2DOptions(*builder, padding, op.stride_width,
op.stride_height, op.kwidth,
op.kheight, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->kwidth = options.filter_width();
op->kheight = options.filter_height();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class Convolution
: public BuiltinOperator<ConvOperator, ::tflite::Conv2DOptions,
::tflite::BuiltinOptions_Conv2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateConv2DOptions(*builder, padding, op.stride_width,
op.stride_height, activation_function,
op.dilation_width_factor,
op.dilation_height_factor);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->dilation_width_factor = options.dilation_w_factor();
op->dilation_height_factor = options.dilation_h_factor();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class DepthwiseConvolution
: public BuiltinOperator<DepthwiseConvOperator,
::tflite::DepthwiseConv2DOptions,
::tflite::BuiltinOptions_DepthwiseConv2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateDepthwiseConv2DOptions(
*builder, padding, op.stride_width, op.stride_height,
op.depth_multiplier, activation_function, op.dilation_width_factor,
op.dilation_height_factor);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->depth_multiplier = options.depth_multiplier();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
op->dilation_width_factor = options.dilation_w_factor();
op->dilation_height_factor = options.dilation_h_factor();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& conv_op =
static_cast<const DepthwiseConvOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteDepthwiseConvParams depthwise_conv_params = {};
depthwise_conv_params.dilation_width_factor = conv_op.dilation_width_factor;
depthwise_conv_params.dilation_height_factor =
conv_op.dilation_height_factor;
op_sig.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Add : public BuiltinOperator<AddOperator, ::tflite::AddOptions,
::tflite::BuiltinOptions_AddOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateAddOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class AddN : public BuiltinOperator<AddNOperator, ::tflite::AddNOptions,
::tflite::BuiltinOptions_AddNOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateAddNOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class SpaceToBatchND
: public BuiltinOperator<SpaceToBatchNDOperator,
::tflite::SpaceToBatchNDOptions,
::tflite::BuiltinOptions_SpaceToBatchNDOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSpaceToBatchNDOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Sub : public BuiltinOperator<SubOperator, ::tflite::SubOptions,
::tflite::BuiltinOptions_SubOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateSubOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Div : public BuiltinOperator<DivOperator, ::tflite::DivOptions,
::tflite::BuiltinOptions_DivOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateDivOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class BatchToSpaceND
: public BuiltinOperator<BatchToSpaceNDOperator,
::tflite::BatchToSpaceNDOptions,
::tflite::BuiltinOptions_BatchToSpaceNDOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateBatchToSpaceNDOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Cast : public BuiltinOperator<CastOperator, ::tflite::CastOptions,
::tflite::BuiltinOptions_CastOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateCastOptions(*builder,
DataType::Serialize(op.src_data_type),
DataType::Serialize(op.dst_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->src_data_type = DataType::Deserialize(options.in_data_type());
op->dst_data_type = DataType::Deserialize(options.out_data_type());
}
};
class Concatenation
: public BuiltinOperator<ConcatenationOperator,
::tflite::ConcatenationOptions,
::tflite::BuiltinOptions_ConcatenationOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateConcatenationOptions(*builder, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->axis = options.axis();
}
};
class DepthToSpace
: public BuiltinOperator<DepthToSpaceOperator,
::tflite::DepthToSpaceOptions,
::tflite::BuiltinOptions_DepthToSpaceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateDepthToSpaceOptions(*builder, op.block_size);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->block_size = options.block_size();
}
};
class FakeQuant
: public BuiltinOperator<FakeQuantOperator, ::tflite::FakeQuantOptions,
::tflite::BuiltinOptions_FakeQuantOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateFakeQuantOptions(
*builder, op.minmax->min, op.minmax->max, op.num_bits, op.narrow_range);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
auto* minmax = new MinMax;
minmax->min = options.min();
minmax->max = options.max();
op->minmax.reset(minmax);
op->num_bits = options.num_bits();
op->narrow_range = options.narrow_range();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& fq_op = static_cast<const FakeQuantOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteFakeQuantParams fake_quant_params = {};
fake_quant_params.narrow_range = fq_op.narrow_range;
op_sig.builtin_data = reinterpret_cast<void*>(&fake_quant_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class FullyConnected
: public BuiltinOperator<FullyConnectedOperator,
::tflite::FullyConnectedOptions,
::tflite::BuiltinOptions_FullyConnectedOptions> {
public:
using BuiltinOperator::BuiltinOperator;
::tflite::FullyConnectedOptionsWeightsFormat GetWeightFormat(
FullyConnectedWeightsFormat fmt) const {
switch (fmt) {
case FullyConnectedWeightsFormat::kDefault:
return ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT;
case FullyConnectedWeightsFormat::kShuffled4x16Int8:
return ::tflite::FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8;
default:
LOG(ERROR) << "Unhandled FC weights format";
return ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT;
}
}
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateFullyConnectedOptions(
*builder, activation_function, GetWeightFormat(op.weights_format));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
switch (options.weights_format()) {
case ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT:
op->weights_format = FullyConnectedWeightsFormat::kDefault;
break;
case ::tflite::FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
op->weights_format = FullyConnectedWeightsFormat::kShuffled4x16Int8;
break;
default:
LOG(ERROR) << "Unhandled FC weights format";
op->weights_format = FullyConnectedWeightsFormat::kDefault;
}
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& fc_op =
static_cast<const FullyConnectedOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteFullyConnectedParams fully_connected_params = {};
fully_connected_params.keep_num_dims = fc_op.keep_num_dims;
fully_connected_params.weights_format =
static_cast<TfLiteFullyConnectedWeightsFormat>(
GetWeightFormat(fc_op.weights_format));
op_sig.builtin_data = reinterpret_cast<void*>(&fully_connected_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Gather : public BuiltinOperator<GatherOperator, ::tflite::GatherOptions,
::tflite::BuiltinOptions_GatherOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
int axis = op.axis ? op.axis.value() : 0;
return ::tflite::CreateGatherOptions(*builder, axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->axis = {options.axis()};
}
};
class GatherNd
: public BuiltinOperator<GatherNdOperator, ::tflite::GatherNdOptions,
::tflite::BuiltinOptions_GatherNdOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateGatherNdOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Svdf : public BuiltinOperator<SvdfOperator, ::tflite::SVDFOptions,
::tflite::BuiltinOptions_SVDFOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateSVDFOptions(*builder, op.rank, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
op->rank = options.rank();
}
};
class L2Normalization
: public BuiltinOperator<L2NormalizationOperator, ::tflite::L2NormOptions,
::tflite::BuiltinOptions_L2NormOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateL2NormOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class L2Pool : public BuiltinOperator<L2PoolOperator, ::tflite::Pool2DOptions,
::tflite::BuiltinOptions_Pool2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreatePool2DOptions(*builder, padding, op.stride_width,
op.stride_height, op.kwidth,
op.kheight, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->kwidth = options.filter_width();
op->kheight = options.filter_height();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class LocalResponseNormalization
: public BuiltinOperator<
LocalResponseNormalizationOperator,
::tflite::LocalResponseNormalizationOptions,
::tflite::BuiltinOptions_LocalResponseNormalizationOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateLocalResponseNormalizationOptions(
*builder, op.range, op.bias, op.alpha, op.beta);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->range = options.radius();
op->bias = options.bias();
op->alpha = options.alpha();
op->beta = options.beta();
}
};
class MaxPool : public BuiltinOperator<MaxPoolOperator, ::tflite::Pool2DOptions,
::tflite::BuiltinOptions_Pool2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreatePool2DOptions(*builder, padding, op.stride_width,
op.stride_height, op.kwidth,
op.kheight, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->kwidth = options.filter_width();
op->kheight = options.filter_height();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class Mul : public BuiltinOperator<MulOperator, ::tflite::MulOptions,
::tflite::BuiltinOptions_MulOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateMulOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
int GetVersion(const OperatorSignature& op_signature) const override {
const std::string& input1_name = op_signature.op->inputs[0];
const std::string& input2_name = op_signature.op->inputs[1];
const std::string& output_name = op_signature.op->outputs[0];
const Array& input1_array = op_signature.model->GetArray(input1_name);
const Array& input2_array = op_signature.model->GetArray(input2_name);
const Array& output_array = op_signature.model->GetArray(output_name);
const auto& input1_quant = input1_array.quantization_params;
const auto& input2_quant = input2_array.quantization_params;
const auto& output_quant = output_array.quantization_params;
const float input1_scale = input1_quant ? input1_quant->scale : 0.0f;
const float input2_scale = input2_quant ? input2_quant->scale : 0.0f;
const float output_scale = output_quant ? output_quant->scale : 0.0f;
const bool input_quantized = input1_quant || input2_quant;
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
op_sig.ext_options.mul.input1_scale = input1_scale;
op_sig.ext_options.mul.input2_scale = input2_scale;
op_sig.ext_options.mul.output_scale = output_scale;
op_sig.ext_options.mul.input_quantized = input_quantized;
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Pad : public BuiltinOperator<PadOperator, ::tflite::PadOptions,
::tflite::BuiltinOptions_PadOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreatePadOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Tile
: public BuiltinOperator<TensorFlowTileOperator, ::tflite::TileOptions,
::tflite::BuiltinOptions_TileOptions> {
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateTileOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class PadV2 : public BuiltinOperator<PadV2Operator, ::tflite::PadV2Options,
::tflite::BuiltinOptions_PadV2Options> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreatePadV2Options(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Reshape
: public BuiltinOperator<TensorFlowReshapeOperator,
::tflite::ReshapeOptions,
::tflite::BuiltinOptions_ReshapeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReshapeOptions(*builder,
builder->CreateVector(op.shape));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->shape.insert(op->shape.end(), options.new_shape()->begin(),
options.new_shape()->end());
}
};
class Softmax
: public BuiltinOperator<SoftmaxOperator, ::tflite::SoftmaxOptions,
::tflite::BuiltinOptions_SoftmaxOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSoftmaxOptions(*builder, op.beta);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->beta = options.beta();
}
};
class SpaceToDepth
: public BuiltinOperator<SpaceToDepthOperator,
::tflite::SpaceToDepthOptions,
::tflite::BuiltinOptions_SpaceToDepthOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSpaceToDepthOptions(*builder, op.block_size);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->block_size = options.block_size();
}
};
class Transpose
: public BuiltinOperator<TransposeOperator, ::tflite::TransposeOptions,
::tflite::BuiltinOptions_TransposeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateTransposeOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Lstm : public BuiltinOperator<LstmCellOperator, ::tflite::LSTMOptions,
::tflite::BuiltinOptions_LSTMOptions> {
public:
using BuiltinOperator::BuiltinOperator;
::tflite::LSTMKernelType GetKernelType(
LstmCellOperator::KernelType type) const {
switch (type) {
case LstmCellOperator::KERNEL_BASIC:
return ::tflite::LSTMKernelType_BASIC;
break;
case LstmCellOperator::KERNEL_FULL:
return ::tflite::LSTMKernelType_FULL;
break;
default:
LOG(ERROR) << "Unhandled Kernel Type";
return static_cast<::tflite::LSTMKernelType>(-1);
}
}
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
::tflite::LSTMKernelType kernel_type = GetKernelType(op.kernel_type);
return ::tflite::CreateLSTMOptions(*builder,
::tflite::ActivationFunctionType_TANH,
0.0,
0.0, kernel_type);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
CHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
switch (options.kernel_type()) {
case ::tflite::LSTMKernelType_BASIC:
op->kernel_type = LstmCellOperator::KERNEL_BASIC;
break;
case ::tflite::LSTMKernelType_FULL:
op->kernel_type = LstmCellOperator::KERNEL_FULL;
break;
}
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& lstm_op =
static_cast<const LstmCellOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteLSTMParams lstm_params = {};
lstm_params.kernel_type =
static_cast<TfLiteLSTMKernelType>(GetKernelType(lstm_op.kernel_type));
op_sig.builtin_data = reinterpret_cast<void*>(&lstm_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
const auto& lstm_op = static_cast<const LstmCellOperator&>(op);
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
switch (lstm_op.kernel_type) {
case LstmCellOperator::KERNEL_FULL: {
mutating_input_variables[kInputActivationStateTensor] = true;
mutating_input_variables[kInputCellStateTensor] = true;
break;
}
case LstmCellOperator::KERNEL_BASIC: {
mutating_input_variables[LstmCellOperator::PREV_ACTIV_INPUT] = true;
mutating_input_variables[LstmCellOperator::PREV_STATE_INPUT] = true;
break;
}
}
return mutating_input_variables;
}
};
class UnidirectionalSequenceLstm
: public BuiltinOperator<
UnidirectionalSequenceLstmOperator,
::tflite::UnidirectionalSequenceLSTMOptions,
::tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateUnidirectionalSequenceLSTMOptions(
*builder,
::tflite::ActivationFunctionType_TANH,
0.0,
0.0,
true);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[kInputActivationStateTensor] = true;
mutating_input_variables[kInputCellStateTensor] = true;
return mutating_input_variables;
}
};
class BidirectionalSequenceLstm
: public BuiltinOperator<
BidirectionalSequenceLstmOperator,
::tflite::BidirectionalSequenceLSTMOptions,
::tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateBidirectionalSequenceLSTMOptions(
*builder,
::tflite::ActivationFunctionType_TANH,
0.0,
0.0,
op.merge_outputs,
true);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
op->merge_outputs = options.merge_outputs();
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[35] = true;
mutating_input_variables[36] = true;
mutating_input_variables[37] = true;
mutating_input_variables[38] = true;
return mutating_input_variables;
}
};
class BidirectionalSequenceRnn
: public BuiltinOperator<
BidirectionalSequenceRnnOperator,
::tflite::BidirectionalSequenceRNNOptions,
::tflite::BuiltinOptions_BidirectionalSequenceRNNOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateBidirectionalSequenceRNNOptions(
*builder, true,
::tflite::ActivationFunctionType_TANH,
op.merge_outputs);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
op->merge_outputs = options.merge_outputs();
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[4] = true;
mutating_input_variables[8] = true;
return mutating_input_variables;
}
};
class Mean : public BuiltinOperator<MeanOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class Sum
: public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceMax
: public BuiltinOperator<TensorFlowMaxOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceMin
: public BuiltinOperator<TensorFlowMinOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceProd
: public BuiltinOperator<TensorFlowProdOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceAny
: public BuiltinOperator<TensorFlowAnyOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ResizeBilinear
: public BuiltinOperator<ResizeBilinearOperator,
::tflite::ResizeBilinearOptions,
::tflite::BuiltinOptions_ResizeBilinearOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateResizeBilinearOptions(*builder, op.align_corners,
op.half_pixel_centers);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->align_corners = options.align_corners();
op->half_pixel_centers = options.half_pixel_centers();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& resize_bilinear_op =
static_cast<const ResizeBilinearOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteResizeBilinearParams resize_bilinear_params = {};
resize_bilinear_params.half_pixel_centers =
resize_bilinear_op.half_pixel_centers;
resize_bilinear_params.align_corners = resize_bilinear_op.align_corners;
op_sig.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class ResizeNearestNeighbor
: public BuiltinOperator<
ResizeNearestNeighborOperator, ::tflite::ResizeNearestNeighborOptions,
::tflite::BuiltinOptions_ResizeNearestNeighborOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateResizeNearestNeighborOptions(
*builder, op.align_corners, op.half_pixel_centers);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->align_corners = options.align_corners();
op->half_pixel_centers = options.half_pixel_centers();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& resize_nn_op =
static_cast<const ResizeNearestNeighborOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteResizeNearestNeighborParams resize_nearest_neighbor_params = {};
resize_nearest_neighbor_params.half_pixel_centers =
resize_nn_op.half_pixel_centers;
resize_nearest_neighbor_params.align_corners = resize_nn_op.align_corners;
op_sig.builtin_data =
reinterpret_cast<void*>(&resize_nearest_neighbor_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Squeeze
: public BuiltinOperator<SqueezeOperator, ::tflite::SqueezeOptions,
::tflite::BuiltinOptions_SqueezeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto squeeze_dims = builder->CreateVector(op.squeeze_dims);
return ::tflite::CreateSqueezeOptions(*builder, squeeze_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->squeeze_dims.insert(op->squeeze_dims.end(),
options.squeeze_dims()->begin(),
options.squeeze_dims()->end());
}
};
class Split
: public BuiltinOperator<TensorFlowSplitOperator, ::tflite::SplitOptions,
::tflite::BuiltinOptions_SplitOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSplitOptions(*builder, op.num_split);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->num_split = options.num_splits();
}
};
class SplitV
: public BuiltinOperator<TensorFlowSplitVOperator, ::tflite::SplitVOptions,
::tflite::BuiltinOptions_SplitVOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSplitVOptions(*builder, op.num_split);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->num_split = options.num_splits();
}
};
class StridedSlice
: public BuiltinOperator<StridedSliceOperator,
::tflite::StridedSliceOptions,
::tflite::BuiltinOptions_StridedSliceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateStridedSliceOptions(
*builder, op.begin_mask, op.end_mask, op.ellipsis_mask,
op.new_axis_mask, op.shrink_axis_mask);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->begin_mask = options.begin_mask();
op->end_mask = options.end_mask();
op->ellipsis_mask = options.ellipsis_mask();
op->new_axis_mask = options.new_axis_mask();
op->shrink_axis_mask = options.shrink_axis_mask();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& ss_op =
static_cast<const StridedSliceOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
op_sig.ext_options.strided_slice.num_dims = ss_op.start_indices.size();
TfLiteStridedSliceParams strided_slice_params = {};
strided_slice_params.ellipsis_mask = ss_op.ellipsis_mask;
strided_slice_params.new_axis_mask = ss_op.new_axis_mask;
op_sig.builtin_data = reinterpret_cast<void*>(&strided_slice_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class TopK_V2 : public BuiltinOperator<TopKV2Operator, ::tflite::TopKV2Options,
::tflite::BuiltinOptions_TopKV2Options> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateTopKV2Options(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class ArgMax : public BuiltinOperator<ArgMaxOperator, ::tflite::ArgMaxOptions,
::tflite::BuiltinOptions_ArgMaxOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateArgMaxOptions(
*builder, DataType::Serialize(op.output_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->output_data_type = DataType::Deserialize(options.output_type());
}
};
class ArgMin : public BuiltinOperator<ArgMinOperator, ::tflite::ArgMinOptions,
::tflite::BuiltinOptions_ArgMinOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateArgMinOptions(
*builder, DataType::Serialize(op.output_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->output_data_type = DataType::Deserialize(options.output_type());
}
};
class TransposeConv
: public BuiltinOperator<TransposeConvOperator,
::tflite::TransposeConvOptions,
::tflite::BuiltinOptions_TransposeConvOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
return ::tflite::CreateTransposeConvOptions(
*builder, padding, op.stride_width, op.stride_height);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
}
};
class SparseToDense
: public BuiltinOperator<SparseToDenseOperator,
::tflite::SparseToDenseOptions,
::tflite::BuiltinOptions_SparseToDenseOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSparseToDenseOptions(*builder, op.validate_indices);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->validate_indices = options.validate_indices();
}
};
class ExpandDims
: public BuiltinOperator<ExpandDimsOperator, ::tflite::ExpandDimsOptions,
::tflite::BuiltinOptions_ExpandDimsOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateExpandDimsOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Pack : public BuiltinOperator<PackOperator, ::tflite::PackOptions,
::tflite::BuiltinOptions_PackOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreatePackOptions(*builder, op.values_count, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->values_count = options.values_count();
op->axis = options.axis();
}
};
class Shape
: public BuiltinOperator<TensorFlowShapeOperator, ::tflite::ShapeOptions,
::tflite::BuiltinOptions_ShapeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateShapeOptions(
*builder, DataType::Serialize(op.output_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->output_data_type = DataType::Deserialize(options.out_type());
}
};
class OneHot : public BuiltinOperator<OneHotOperator, ::tflite::OneHotOptions,
::tflite::BuiltinOptions_OneHotOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateOneHotOptions(*builder, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->axis = options.axis();
}
};
class CTCBeamSearchDecoder
: public CustomOperator<CTCBeamSearchDecoderOperator> {
public:
using CustomOperator::CustomOperator;
void WriteOptions(const TocoOperator& op,
flexbuffers::Builder* fbb) const override {
fbb->Int("beam_width", op.beam_width);
fbb->Int("top_paths", op.top_paths);
fbb->Bool("merge_repeated", op.merge_repeated);
}
void ReadOptions(const flexbuffers::Map& m, TocoOperator* op) const override {
op->beam_width = m["beam_width"].AsInt32();
op->top_paths = m["top_paths"].AsInt32();
op->merge_repeated = m["merge_repeated"].AsBool();
}
int GetVersion(const OperatorSignature& op_signature) const override {
return 1;
}
};
class Unpack : public BuiltinOperator<UnpackOperator, ::tflite::UnpackOptions,
::tflite::BuiltinOptions_UnpackOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateUnpackOptions(*builder, op.num, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->num = options.num();
op->axis = options.axis();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const std::string& input_name = op_signature.op->inputs[0];
const Array& input_array = op_signature.model->GetArray(input_name);
if (input_array.data_type == ArrayDataType::kInt8 ||
input_array.data_type == ArrayDataType::kUint8) {
return 2;
}
if (input_array.data_type == ArrayDataType::kBool) {
return 3;
}
return 1;
}
};
class LeakyRelu
: public BuiltinOperator<LeakyReluOperator, ::tflite::LeakyReluOptions,
::tflite::BuiltinOptions_LeakyReluOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateLeakyReluOptions(*builder, op.alpha);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->alpha = options.alpha();
}
};
class SquaredDifference
: public BuiltinOperator<
SquaredDifferenceOperator, ::tflite::SquaredDifferenceOptions,
::tflite::BuiltinOptions_SquaredDifferenceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSquaredDifferenceOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class MirrorPad
: public BuiltinOperator<MirrorPadOperator, ::tflite::MirrorPadOptions,
::tflite::BuiltinOptions_MirrorPadOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateMirrorPadOptions(
*builder, op.mode == MirrorPadMode::kReflect
? ::tflite::MirrorPadMode::MirrorPadMode_REFLECT
: ::tflite::MirrorPadMode::MirrorPadMode_SYMMETRIC);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->mode = options.mode() == ::tflite::MirrorPadMode::MirrorPadMode_REFLECT
? MirrorPadMode::kReflect
: MirrorPadMode::kSymmetric;
}
};
class Unique : public BuiltinOperator<UniqueOperator, ::tflite::UniqueOptions,
::tflite::BuiltinOptions_UniqueOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
const UniqueOperator& unique_op = static_cast<const UniqueOperator&>(op);
return ::tflite::CreateUniqueOptions(
*builder, unique_op.idx_out_type == toco::ArrayDataType::kInt64
? ::tflite::TensorType::TensorType_INT64
: ::tflite::TensorType_INT32);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
UniqueOperator* unique_op = static_cast<UniqueOperator*>(op);
unique_op->idx_out_type =
options.idx_out_type() == ::tflite::TensorType_INT64
? toco::ArrayDataType::kInt64
: toco::ArrayDataType::kInt32;
}
};
class UnidirectionalSequenceRnn
: public BuiltinOperator<UnidirectionalSequenceRnnOperator,
::tflite::SequenceRNNOptions,
::tflite::BuiltinOptions_SequenceRNNOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSequenceRNNOptions(
*builder, true,
::tflite::ActivationFunctionType_TANH);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[4] = true;
return mutating_input_variables;
}
};
class Where : public BuiltinOperator<WhereOperator, ::tflite::WhereOptions,
::tflite::BuiltinOptions_WhereOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateWhereOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
std::unique_ptr<flexbuffers::Builder> WriteFlexOpOptions(
const std::string& tensorflow_node_def) {
auto fbb = std::make_unique<flexbuffers::Builder>();
::tensorflow::NodeDef node_def;
if (!node_def.ParseFromString(tensorflow_node_def)) {
LOG(ERROR) << "Failed to parse TensorFlow NodeDef";
return {};
}
fbb->Vector([&]() {
fbb->String(node_def.op());
fbb->String(tensorflow_node_def);
});
fbb->Finish();
LOG(INFO) << "Writing flex op: " << node_def.op();
return std::unique_ptr<flexbuffers::Builder>(fbb.release());
}
class TensorFlowUnsupported : public BaseOperator {
public:
TensorFlowUnsupported(const std::string& name, OperatorType type,
bool enable_select_tf_ops)
: BaseOperator(name, type), enable_select_tf_ops_(enable_select_tf_ops) {}
Options Serialize(const Operator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto fbb =
WriteOptions(static_cast<const TensorFlowUnsupportedOperator&>(op));
if (fbb) {
return Options::Custom(builder->CreateVector(fbb->GetBuffer()));
} else {
return Options::Custom(0);
}
}
std::unique_ptr<Operator> Deserialize(
const BuiltinOptions* builtin_options,
const CustomOptions* custom_options) const override {
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
if (custom_options) {
auto flexbuffer_map =
flexbuffers::GetRoot(custom_options->data(), custom_options->size())
.AsMap();
ReadOptions(flexbuffer_map, op.get());
}
return std::unique_ptr<Operator>(op.release());
}
std::unique_ptr<flexbuffers::Builder> WriteOptions(
const TensorFlowUnsupportedOperator& op) const {
if (enable_select_tf_ops_) {
return WriteFlexOpOptions(op.tensorflow_node_def);
}
auto fbb = std::make_unique<flexbuffers::Builder>();
::tensorflow::NodeDef node_def;
if (!node_def.ParseFromString(op.tensorflow_node_def)) {
LOG(ERROR) << "Failed to parse TensorFlow NodeDef";
return std::unique_ptr<flexbuffers::Builder>();
}
if (ShouldExportAsFlexOp(enable_select_tf_ops_, node_def.op())) {
fbb->Vector([&]() {
fbb->String(node_def.op());
fbb->String(op.tensorflow_node_def);
});
fbb->Finish();
LOG(INFO) << "Writing flex op: " << node_def.op();
return std::unique_ptr<flexbuffers::Builder>(fbb.release());
}
bool has_valid_attr = false;
size_t map_start = fbb->StartMap();
for (const auto& pair : node_def.attr()) {
const char* key = pair.first.c_str();
const auto& attr = pair.second;
switch (attr.value_case()) {
case ::tensorflow::AttrValue::kS:
fbb->String(key, attr.s());
has_valid_attr = true;
break;
case ::tensorflow::AttrValue::kI:
fbb->Int(key, attr.i());
has_valid_attr = true;
break;
case ::tensorflow::AttrValue::kF:
fbb->Float(key, attr.f());
has_valid_attr = true;
break;
case ::tensorflow::AttrValue::kB:
fbb->Bool(key, attr.b());
has_valid_attr = true;
break;
case tensorflow::AttrValue::kList:
if (attr.list().s_size() > 0) {
auto start = fbb->StartVector(key);
for (const std::string& v : attr.list().s()) {
fbb->Add(v);
}
fbb->EndVector(start, true, false);
has_valid_attr = true;
} else if (attr.list().i_size() > 0) {
auto start = fbb->StartVector(key);
for (const int64_t v : attr.list().i()) {
fbb->Add(v);
}
fbb->EndVector(start, true, false);
has_valid_attr = true;
} else if (attr.list().f_size() > 0) {
auto start = fbb->StartVector(key);
for (const float v : attr.list().f()) {
fbb->Add(v);
}
fbb->EndVector(start, true, false);
has_valid_attr = true;
} else {
LOG(WARNING)
<< "Ignoring unsupported type in list attribute with key '"
<< key << "'";
}
break;
default:
LOG(WARNING) << "Ignoring unsupported attribute type with key '"
<< key << "'";
break;
}
}
if (!has_valid_attr) {
return std::unique_ptr<flexbuffers::Builder>();
}
fbb->EndMap(map_start);
fbb->Finish();
return std::unique_ptr<flexbuffers::Builder>(fbb.release());
}
void ReadOptions(const flexbuffers::Map& m,
TensorFlowUnsupportedOperator* op) const {
::tensorflow::NodeDef node_def;
auto attr = node_def.mutable_attr();
const auto& keys = m.Keys();
for (size_t i = 0; i < keys.size(); ++i) {
const auto key = keys[i].AsKey();
const auto& value = m[key];
switch (value.GetType()) {
case flexbuffers::FBT_STRING:
(*attr)[key].set_s(value.AsString().c_str());
break;
case flexbuffers::FBT_INT:
(*attr)[key].set_i(value.AsInt64());
break;
case flexbuffers::FBT_FLOAT:
(*attr)[key].set_f(value.AsFloat());
break;
case flexbuffers::FBT_BOOL:
(*attr)[key].set_b(value.AsBool());
if (std::string(key) == "_output_quantized") {
op->quantized = value.AsBool();
}
if (std::string(key) ==
"_support_output_type_float_in_quantized_op") {
op->support_output_type_float_in_quantized_op = value.AsBool();
}
break;
case flexbuffers::FBT_VECTOR_INT: {
auto* list = (*attr)[key].mutable_list();
const auto& vector = value.AsTypedVector();
for (size_t i = 0; i < vector.size(); i++) {
list->add_i(vector[i].AsInt64());
}
break;
}
case flexbuffers::FBT_VECTOR_FLOAT: {
auto* list = (*attr)[key].mutable_list();
const auto& vector = value.AsTypedVector();
for (size_t i = 0; i < vector.size(); i++) {
list->add_f(vector[i].AsFloat());
}
break;
}
case 15 : {
auto* list = (*attr)[key].mutable_list();
const auto& vector = value.AsTypedVector();
for (size_t i = 0; i < vector.size(); i++) {
list->add_s(vector[i].AsString().str());
}
break;
}
default:
LOG(WARNING) << "Ignoring unsupported attribute type with key '"
<< key << "'";
break;
}
}
node_def.SerializeToString(&op->tensorflow_node_def);
}
int GetVersion(const OperatorSignature& op_signature) const override {
return 1;
}
private:
const bool enable_select_tf_ops_;
};
class Dequantize
: public BuiltinOperator<DequantizeOperator, ::tflite::DequantizeOptions,
::tflite::BuiltinOptions_DequantizeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateDequantizeOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class ReverseSequence
: public BuiltinOperator<ReverseSequenceOperator,
::tflite::ReverseSequenceOptions,
::tflite::BuiltinOptions_ReverseSequenceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReverseSequenceOptions(*builder, op.seq_dim,
op.batch_dim);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->seq_dim = options.seq_dim();
op->batch_dim = options.batch_dim();
}
};
namespace {
std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList(
bool enable_select_tf_ops = false) {
std::vector<std::unique_ptr<BaseOperator>> ops;
ops.push_back(
std::make_unique<Add>(::tflite::BuiltinOperator_ADD, OperatorType::kAdd));
ops.push_back(std::make_unique<AddN>(::tflite::BuiltinOperator_ADD_N,
OperatorType::kAddN));
ops.push_back(
std::make_unique<Div>(::tflite::BuiltinOperator_DIV, OperatorType::kDiv));
ops.push_back(
std::make_unique<Sub>(::tflite::BuiltinOperator_SUB, OperatorType::kSub));
ops.push_back(std::make_unique<AveragePool>(
::tflite::BuiltinOperator_AVERAGE_POOL_2D, OperatorType::kAveragePool));
ops.push_back(std::make_unique<SpaceToBatchND>(
::tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
OperatorType::kSpaceToBatchND));
ops.push_back(std::make_unique<BatchToSpaceND>(
::tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
OperatorType::kBatchToSpaceND));
ops.push_back(std::make_unique<Concatenation>(
::tflite::BuiltinOperator_CONCATENATION, OperatorType::kConcatenation));
ops.push_back(std::make_unique<Convolution>(::tflite::BuiltinOperator_CONV_2D,
OperatorType::kConv));
ops.push_back(std::make_unique<DepthwiseConvolution>(
::tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
OperatorType::kDepthwiseConv));
ops.push_back(std::make_unique<Dequantize>(
::tflite::BuiltinOperator_DEQUANTIZE, OperatorType::kDequantize));
ops.push_back(std::make_unique<FullyConnected>(
::tflite::BuiltinOperator_FULLY_CONNECTED,
OperatorType::kFullyConnected));
ops.push_back(std::make_unique<Gather>(::tflite::BuiltinOperator_GATHER,
OperatorType::kGather));
ops.push_back(std::make_unique<GatherNd>(::tflite::BuiltinOperator_GATHER_ND,
OperatorType::kGatherNd));
ops.push_back(std::make_unique<L2Normalization>(
::tflite::BuiltinOperator_L2_NORMALIZATION,
OperatorType::kL2Normalization));
ops.push_back(std::make_unique<L2Pool>(::tflite::BuiltinOperator_L2_POOL_2D,
OperatorType::kL2Pool));
ops.push_back(std::make_unique<LocalResponseNormalization>(
::tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
OperatorType::kLocalResponseNormalization));
ops.push_back(std::make_unique<MaxPool>(::tflite::BuiltinOperator_MAX_POOL_2D,
OperatorType::kMaxPool));
ops.push_back(
std::make_unique<Mul>(::tflite::BuiltinOperator_MUL, OperatorType::kMul));
ops.push_back(
std::make_unique<Pad>(::tflite::BuiltinOperator_PAD, OperatorType::kPad));
ops.push_back(std::make_unique<PadV2>(::tflite::BuiltinOperator_PADV2,
OperatorType::kPadV2));
ops.push_back(std::make_unique<Reshape>(::tflite::BuiltinOperator_RESHAPE,
OperatorType::kReshape));
ops.push_back(std::make_unique<Softmax>(::tflite::BuiltinOperator_SOFTMAX,
OperatorType::kSoftmax));
ops.push_back(std::make_unique<SpaceToDepth>(
::tflite::BuiltinOperator_SPACE_TO_DEPTH, OperatorType::kSpaceToDepth));
ops.push_back(std::make_unique<DepthToSpace>(
::tflite::BuiltinOperator_DEPTH_TO_SPACE, OperatorType::kDepthToSpace));
ops.push_back(std::make_unique<Svdf>(::tflite::BuiltinOperator_SVDF,
OperatorType::kSvdf));
ops.push_back(std::make_unique<Transpose>(::tflite::BuiltinOperator_TRANSPOSE,
OperatorType::kTranspose));
ops.push_back(std::make_unique<Mean>(::tflite::BuiltinOperator_MEAN,
OperatorType::kMean));
ops.push_back(
std::make_unique<Sum>(::tflite::BuiltinOperator_SUM, OperatorType::kSum));
ops.push_back(std::make_unique<ReduceProd>(
::tflite::BuiltinOperator_REDUCE_PROD, OperatorType::kReduceProd));
ops.push_back(std::make_unique<ReduceMax>(
::tflite::BuiltinOperator_REDUCE_MAX, OperatorType::kReduceMax));
ops.push_back(std::make_unique<ReduceMin>(
::tflite::BuiltinOperator_REDUCE_MIN, OperatorType::kReduceMin));
ops.push_back(std::make_unique<ReduceAny>(
::tflite::BuiltinOperator_REDUCE_ANY, OperatorType::kAny));
ops.push_back(std::make_unique<ResizeBilinear>(
::tflite::BuiltinOperator_RESIZE_BILINEAR,
OperatorType::kResizeBilinear));
ops.push_back(std::make_unique<ResizeNearestNeighbor>(
::tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
OperatorType::kResizeNearestNeighbor));
ops.push_back(std::make_unique<Squeeze>(::tflite::BuiltinOperator_SQUEEZE,
OperatorType::kSqueeze));
ops.push_back(std::make_unique<Split>(::tflite::BuiltinOperator_SPLIT,
OperatorType::kSplit));
ops.push_back(std::make_unique<SplitV>(::tflite::BuiltinOperator_SPLIT_V,
OperatorType::kSplitV));
ops.push_back(std::make_unique<StridedSlice>(
::tflite::BuiltinOperator_STRIDED_SLICE, OperatorType::kStridedSlice));
ops.push_back(std::make_unique<TopK_V2>(::tflite::BuiltinOperator_TOPK_V2,
OperatorType::kTopK_V2));
ops.push_back(std::make_unique<Lstm>(::tflite::BuiltinOperator_LSTM,
OperatorType::kLstmCell));
ops.push_back(std::make_unique<Cast>(::tflite::BuiltinOperator_CAST,
OperatorType::kCast));
ops.push_back(std::make_unique<ArgMax>(::tflite::BuiltinOperator_ARG_MAX,
OperatorType::kArgMax));
ops.push_back(std::make_unique<ArgMin>(::tflite::BuiltinOperator_ARG_MIN,
OperatorType::kArgMin));
ops.push_back(std::make_unique<Tile>(::tflite::BuiltinOperator_TILE,
OperatorType::kTile));
ops.push_back(std::make_unique<ExpandDims>(
::tflite::BuiltinOperator_EXPAND_DIMS, OperatorType::kExpandDims));
ops.push_back(std::make_unique<TransposeConv>(
::tflite::BuiltinOperator_TRANSPOSE_CONV, OperatorType::kTransposeConv));
ops.push_back(std::make_unique<SparseToDense>(
::tflite::BuiltinOperator_SPARSE_TO_DENSE, OperatorType::kSparseToDense));
ops.push_back(std::make_unique<Shape>(::tflite::BuiltinOperator_SHAPE,
OperatorType::kShape));
ops.push_back(std::make_unique<FakeQuant>(
::tflite::BuiltinOperator_FAKE_QUANT, OperatorType::kFakeQuant));
ops.push_back(std::make_unique<Pack>(::tflite::BuiltinOperator_PACK,
OperatorType::kPack));
ops.emplace_back(std::make_unique<UnidirectionalSequenceLstm>(
::tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
OperatorType::kUnidirectionalSequenceLstm));
ops.emplace_back(std::make_unique<BidirectionalSequenceLstm>(
::tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
OperatorType::kBidirectionalSequenceLstm));
ops.emplace_back(std::make_unique<BidirectionalSequenceRnn>(
::tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
OperatorType::kBidirectionalSequenceRnn));
ops.push_back(std::make_unique<OneHot>(::tflite::BuiltinOperator_ONE_HOT,
OperatorType::kOneHot));
ops.push_back(std::make_unique<Unpack>(::tflite::BuiltinOperator_UNPACK,
OperatorType::kUnpack));
ops.push_back(std::make_unique<LeakyRelu>(
::tflite::BuiltinOperator_LEAKY_RELU, OperatorType::kLeakyRelu));
ops.push_back(std::make_unique<SquaredDifference>(
::tflite::BuiltinOperator_SQUARED_DIFFERENCE,
OperatorType::kSquaredDifference));
ops.push_back(std::make_unique<MirrorPad>(
::tflite::BuiltinOperator_MIRROR_PAD, OperatorType::kMirrorPad));
ops.push_back(std::make_unique<Unique>(::tflite::BuiltinOperator_UNIQUE,
OperatorType::kUnique));
ops.push_back(std::make_unique<UnidirectionalSequenceRnn>(
::tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
OperatorType::kUnidirectionalSequenceRnn));
ops.push_back(std::make_unique<Where>(::tflite::BuiltinOperator_WHERE,
OperatorType::kWhere));
ops.push_back(std::make_unique<ReverseSequence>(
::tflite::BuiltinOperator_REVERSE_SEQUENCE,
OperatorType::kReverseSequence));
ops.push_back(std::make_unique<SimpleOperator<MatrixDiagOperator>>(
::tflite::BuiltinOperator_MATRIX_DIAG, OperatorType::kMatrixDiag));
ops.push_back(std::make_unique<SimpleOperator<MatrixSetDiagOperator>>(
::tflite::BuiltinOperator_MATRIX_SET_DIAG, OperatorType::kMatrixSetDiag));
ops.push_back(std::make_unique<CTCBeamSearchDecoder>(
"CTC_BEAM_SEARCH_DECODER", OperatorType::kCTCBeamSearchDecoder));
ops.push_back(std::make_unique<TensorFlowUnsupported>(
"TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported,
enable_select_tf_ops));
ops.push_back(std::make_unique<SimpleOperator<FloorOperator>>(
::tflite::BuiltinOperator_FLOOR, OperatorType::kFloor));
ops.push_back(std::make_unique<SimpleOperator<CeilOperator>>(
::tflite::BuiltinOperator_CEIL, OperatorType::kCeil));
ops.push_back(std::make_unique<SimpleOperator<EluOperator>>(
::tflite::BuiltinOperator_ELU, OperatorType::kElu));
ops.push_back(std::make_unique<SimpleOperator<RoundOperator>>(
::tflite::BuiltinOperator_ROUND, OperatorType::kRound));
ops.push_back(std::make_unique<SimpleOperator<ReluOperator>>(
::tflite::BuiltinOperator_RELU, OperatorType::kRelu));
ops.push_back(std::make_unique<SimpleOperator<Relu1Operator>>(
::tflite::BuiltinOperator_RELU_N1_TO_1, OperatorType::kRelu1));
ops.push_back(std::make_unique<SimpleOperator<Relu6Operator>>(
::tflite::BuiltinOperator_RELU6, OperatorType::kRelu6));
ops.push_back(std::make_unique<SimpleOperator<PReluOperator>>(
::tflite::BuiltinOperator_PRELU, OperatorType::kPRelu));
ops.push_back(std::make_unique<SimpleOperator<LogisticOperator>>(
::tflite::BuiltinOperator_LOGISTIC, OperatorType::kLogistic));
ops.push_back(std::make_unique<SimpleOperator<TanhOperator>>(
::tflite::BuiltinOperator_TANH, OperatorType::kTanh));
ops.push_back(std::make_unique<SimpleOperator<ExpOperator>>(
::tflite::BuiltinOperator_EXP, OperatorType::kExp));
ops.push_back(std::make_unique<SimpleOperator<CosOperator>>(
::tflite::BuiltinOperator_COS, OperatorType::kCos));
ops.push_back(std::make_unique<SimpleOperator<LogSoftmaxOperator>>(
::tflite::BuiltinOperator_LOG_SOFTMAX, OperatorType::kLogSoftmax));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowMaximumOperator>>(
::tflite::BuiltinOperator_MAXIMUM, OperatorType::kMaximum));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowMinimumOperator>>(
::tflite::BuiltinOperator_MINIMUM, OperatorType::kMinimum));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowGreaterOperator>>(
::tflite::BuiltinOperator_GREATER, OperatorType::kGreater));
ops.push_back(
std::make_unique<SimpleOperator<TensorFlowGreaterEqualOperator>>(
::tflite::BuiltinOperator_GREATER_EQUAL,
OperatorType::kGreaterEqual));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowLessOperator>>(
::tflite::BuiltinOperator_LESS, OperatorType::kLess));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowLessEqualOperator>>(
::tflite::BuiltinOperator_LESS_EQUAL, OperatorType::kLessEqual));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowEqualOperator>>(
::tflite::BuiltinOperator_EQUAL, OperatorType::kEqual));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowNotEqualOperator>>(
::tflite::BuiltinOperator_NOT_EQUAL, OperatorType::kNotEqual));
ops.push_back(std::make_unique<SimpleOperator<NegOperator>>(
::tflite::BuiltinOperator_NEG, OperatorType::kNeg));
ops.push_back(std::make_unique<SimpleOperator<SelectOperator>>(
::tflite::BuiltinOperator_SELECT, OperatorType::kSelect));
ops.push_back(std::make_unique<SimpleOperator<SliceOperator>>(
::tflite::BuiltinOperator_SLICE, OperatorType::kSlice));
ops.push_back(std::make_unique<SimpleOperator<PowOperator>>(
::tflite::BuiltinOperator_POW, OperatorType::kPow));
ops.push_back(std::make_unique<SimpleOperator<LogicalOrOperator>>(
::tflite::BuiltinOperator_LOGICAL_OR, OperatorType::kLogicalOr));
ops.emplace_back(new SimpleOperator<LogicalAndOperator>(
::tflite::BuiltinOperator_LOGICAL_AND, OperatorType::kLogicalAnd));
ops.emplace_back(new SimpleOperator<LogicalNotOperator>(
::tflite::BuiltinOperator_LOGICAL_NOT, OperatorType::kLogicalNot));
ops.emplace_back(new SimpleOperator<FloorDivOperator>(
::tflite::BuiltinOperator_FLOOR_DIV, OperatorType::kFloorDiv));
ops.emplace_back(new SimpleOperator<FloorModOperator>(
::tflite::BuiltinOperator_FLOOR_MOD, OperatorType::kFloorMod));
ops.emplace_back(new SimpleOperator<RangeOperator>(
::tflite::BuiltinOperator_RANGE, OperatorType::kRange));
ops.push_back(std::make_unique<SimpleOperator<SinOperator>>(
::tflite::BuiltinOperator_SIN, OperatorType::kSin));
ops.push_back(std::make_unique<SimpleOperator<LogOperator>>(
::tflite::BuiltinOperator_LOG, OperatorType::kLog));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowSqrtOperator>>(
::tflite::BuiltinOperator_SQRT, OperatorType::kSqrt));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowRsqrtOperator>>(
::tflite::BuiltinOperator_RSQRT, OperatorType::kRsqrt));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowSquareOperator>>(
::tflite::BuiltinOperator_SQUARE, OperatorType::kSquare));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowZerosLikeOperator>>(
::tflite::BuiltinOperator_ZEROS_LIKE, OperatorType::kZerosLike));
ops.push_back(std::make_unique<SimpleOperator<AbsOperator>>(
::tflite::BuiltinOperator_ABS, OperatorType::kAbs));
ops.push_back(std::make_unique<SimpleOperator<HardSwishOperator>>(
::tflite::BuiltinOperator_HARD_SWISH, OperatorType::kHardSwish));
ops.push_back(std::make_unique<SimpleOperator<FillOperator>>(
::tflite::BuiltinOperator_FILL, OperatorType::kFill));
ops.push_back(std::make_unique<SimpleOperator<ReverseV2Operator>>(
::tflite::BuiltinOperator_REVERSE_V2, OperatorType::kReverseV2));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowRankOperator>>(
::tflite::BuiltinOperator_RANK, OperatorType::kRank));
ops.emplace_back(new SimpleOperator<SegmentSumOperator>(
::tflite::BuiltinOperator_SEGMENT_SUM, OperatorType::kSegmentSum));
ops.emplace_back(std::make_unique<SimpleOperator<ScatterNdOperator>>(
::tflite::BuiltinOperator_SCATTER_ND, OperatorType::kScatterNd));
return ops;
}
}
std::map<OperatorType, std::unique_ptr<BaseOperator>> BuildOperatorByTypeMap(
bool enable_select_tf_ops) {
std::map<OperatorType, std::unique_ptr<BaseOperator>> result;
std::vector<std::unique_ptr<BaseOperator>> ops =
BuildOperatorList(enable_select_tf_ops);
for (auto& op : ops) {
result[op->type()] = std::move(op);
}
return result;
}
std::map<std::string, std::unique_ptr<BaseOperator>> BuildOperatorByNameMap(
bool enable_select_tf_ops) {
std::map<std::string, std::unique_ptr<BaseOperator>> result;
std::vector<std::unique_ptr<BaseOperator>> ops =
BuildOperatorList(enable_select_tf_ops);
for (auto& op : ops) {
result[op->name()] = std::move(op);
}
return result;
}
bool ShouldExportAsFlexOp(bool enable_select_tf_ops,
const std::string& tensorflow_op_name) {
if (!enable_select_tf_ops) {
return false;
}
const tensorflow::OpDef* op_def = nullptr;
if (!tensorflow::OpRegistry::Global()
->LookUpOpDef(tensorflow_op_name, &op_def)
.ok()) {
return false;
}
if (!::tflite::flex::IsAllowlistedFlexOp(tensorflow_op_name)) {
LOG(WARNING) << "Op " << tensorflow_op_name
<< " is a valid TensorFlow op but has not been allowlisted for"
" the TensorFlow Lite flex op set.";
return false;
}
return true;
}
}
} | #include "tensorflow/lite/toco/tflite/operator.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace tflite {
namespace {
class OperatorTest : public ::testing::Test {
protected:
const BaseOperator& GetOperator(const std::string& name, OperatorType type) {
using OpsByName = std::map<std::string, std::unique_ptr<BaseOperator>>;
using OpsByType = std::map<OperatorType, std::unique_ptr<BaseOperator>>;
static auto* by_name = new OpsByName(BuildOperatorByNameMap());
static auto* by_type = new OpsByType(BuildOperatorByTypeMap());
CHECK(by_name->count(name)) << "No operator for '" << name << "'.";
BaseOperator* op1 = by_name->at(name).get();
CHECK(op1->type() == type) << "while verifying '" << name << "'.";
CHECK(by_type->count(type))
<< "No operator for '" << OperatorTypeName(type) << "'.";
BaseOperator* op2 = by_type->at(type).get();
CHECK(op2->name() == name)
<< "while verifying '" << OperatorTypeName(type) << "'.";
return *op1;
}
template <typename T>
std::unique_ptr<T> SerializeAndDeserialize(const BaseOperator& op,
const T& toco_op,
Options* options = nullptr) {
flatbuffers::FlatBufferBuilder builder;
Options input_options = op.Serialize(toco_op, &builder);
if (options) {
*options = input_options;
}
builder.Finish(CreateOperator(builder, 0, 0, 0, input_options.type,
input_options.builtin, input_options.custom,
::tflite::CustomOptionsFormat_FLEXBUFFERS));
auto* output_options =
flatbuffers::GetRoot<::tflite::Operator>(builder.GetBufferPointer());
auto new_toco_op = op.Deserialize(output_options->builtin_options(),
output_options->custom_options());
CHECK(new_toco_op->type == toco_op.type)
<< "The type of the serialized and deserialized"
<< HelpfulOperatorTypeName(*new_toco_op)
<< " does not match the type of the original "
<< HelpfulOperatorTypeName(toco_op);
return std::unique_ptr<T>(dynamic_cast<T*>(new_toco_op.release()));
}
template <typename T>
void CheckSimpleOperator(const std::string& name, OperatorType type) {
Options options;
auto output_toco_op =
SerializeAndDeserialize(GetOperator(name, type), T(), &options);
ASSERT_EQ(0, options.builtin.o);
ASSERT_EQ(0, options.custom.o);
ASSERT_EQ(::tflite::BuiltinOptions_NONE, options.type);
ASSERT_NE(nullptr, output_toco_op.get());
}
template <typename T>
void CheckReducerOperator(const std::string& name, OperatorType type) {
T op;
op.keep_dims = false;
auto output_toco_op = SerializeAndDeserialize(GetOperator(name, type), op);
EXPECT_EQ(op.keep_dims, output_toco_op->keep_dims);
}
};
TEST_F(OperatorTest, SimpleOperators) {
CheckSimpleOperator<FloorOperator>("FLOOR", OperatorType::kFloor);
CheckSimpleOperator<CeilOperator>("CEIL", OperatorType::kCeil);
CheckSimpleOperator<EluOperator>("ELU", OperatorType::kElu);
CheckSimpleOperator<RoundOperator>("ROUND", OperatorType::kRound);
CheckSimpleOperator<ReluOperator>("RELU", OperatorType::kRelu);
CheckSimpleOperator<Relu1Operator>("RELU_N1_TO_1", OperatorType::kRelu1);
CheckSimpleOperator<Relu6Operator>("RELU6", OperatorType::kRelu6);
CheckSimpleOperator<LogisticOperator>("LOGISTIC", OperatorType::kLogistic);
CheckSimpleOperator<TanhOperator>("TANH", OperatorType::kTanh);
CheckSimpleOperator<ExpOperator>("EXP", OperatorType::kExp);
CheckSimpleOperator<CosOperator>("COS", OperatorType::kCos);
CheckSimpleOperator<LogSoftmaxOperator>("LOG_SOFTMAX",
OperatorType::kLogSoftmax);
CheckSimpleOperator<TensorFlowMaximumOperator>(
"MAXIMUM", OperatorType::kMaximum);
CheckSimpleOperator<TensorFlowMinimumOperator>(
"MINIMUM", OperatorType::kMinimum);
CheckSimpleOperator<TensorFlowLessOperator>("LESS", OperatorType::kLess);
CheckSimpleOperator<NegOperator>("NEG", OperatorType::kNeg);
CheckSimpleOperator<SelectOperator>("SELECT", OperatorType::kSelect);
CheckSimpleOperator<SliceOperator>("SLICE", OperatorType::kSlice);
CheckSimpleOperator<SinOperator>("SIN", OperatorType::kSin);
CheckSimpleOperator<TensorFlowEqualOperator>("EQUAL", OperatorType::kEqual);
CheckSimpleOperator<TensorFlowNotEqualOperator>("NOT_EQUAL",
OperatorType::kNotEqual);
CheckSimpleOperator<LogOperator>("LOG", OperatorType::kLog);
CheckSimpleOperator<TensorFlowSqrtOperator>("SQRT", OperatorType::kSqrt);
CheckSimpleOperator<TensorFlowRsqrtOperator>("RSQRT", OperatorType::kRsqrt);
CheckSimpleOperator<PowOperator>("POW", OperatorType::kPow);
CheckSimpleOperator<LogicalOrOperator>("LOGICAL_OR",
OperatorType::kLogicalOr);
CheckSimpleOperator<LogicalAndOperator>("LOGICAL_AND",
OperatorType::kLogicalAnd);
CheckSimpleOperator<LogicalNotOperator>("LOGICAL_NOT",
OperatorType::kLogicalNot);
CheckSimpleOperator<FloorDivOperator>("FLOOR_DIV", OperatorType::kFloorDiv);
CheckSimpleOperator<TensorFlowSquareOperator>("SQUARE",
OperatorType::kSquare);
CheckSimpleOperator<TensorFlowZerosLikeOperator>("ZEROS_LIKE",
OperatorType::kZerosLike);
CheckSimpleOperator<FloorModOperator>("FLOOR_MOD", OperatorType::kFloorMod);
CheckSimpleOperator<RangeOperator>("RANGE", OperatorType::kRange);
CheckSimpleOperator<FillOperator>("FILL", OperatorType::kFill);
CheckSimpleOperator<ReverseV2Operator>("REVERSE_V2",
OperatorType::kReverseV2);
CheckSimpleOperator<TensorFlowRankOperator>("RANK", OperatorType::kRank);
}
TEST_F(OperatorTest, BuiltinAdd) {
AddOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("ADD", OperatorType::kAdd), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinAddN) {
AddNOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("ADD_N", OperatorType::kAddN), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinReducerOps) {
CheckReducerOperator<MeanOperator>("MEAN", OperatorType::kMean);
CheckReducerOperator<TensorFlowSumOperator>("SUM", OperatorType::kSum);
CheckReducerOperator<TensorFlowProdOperator>("REDUCE_PROD",
OperatorType::kReduceProd);
CheckReducerOperator<TensorFlowMaxOperator>("REDUCE_MAX",
OperatorType::kReduceMax);
CheckReducerOperator<TensorFlowMinOperator>("REDUCE_MIN",
OperatorType::kReduceMin);
CheckReducerOperator<TensorFlowAnyOperator>("REDUCE_ANY", OperatorType::kAny);
}
TEST_F(OperatorTest, BuiltinCast) {
CastOperator op;
op.src_data_type = ArrayDataType::kFloat;
op.dst_data_type = ArrayDataType::kUint8;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("CAST", OperatorType::kCast), op);
EXPECT_EQ(op.src_data_type, output_toco_op->src_data_type);
EXPECT_EQ(op.dst_data_type, output_toco_op->dst_data_type);
}
TEST_F(OperatorTest, CustomConcatenation) {
ConcatenationOperator op;
op.axis = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("CONCATENATION", OperatorType::kConcatenation), op);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, CustomDepthToSpace) {
DepthToSpaceOperator op;
op.block_size = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEPTH_TO_SPACE", OperatorType::kDepthToSpace), op);
EXPECT_EQ(op.block_size, output_toco_op->block_size);
}
TEST_F(OperatorTest, CustomFakeQuant) {
FakeQuantOperator op;
auto* minmax = new MinMax;
minmax->min = -10;
minmax->max = 200;
op.minmax.reset(minmax);
op.num_bits = 16;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("FAKE_QUANT", OperatorType::kFakeQuant), op);
EXPECT_EQ(op.minmax->min, output_toco_op->minmax->min);
EXPECT_EQ(op.minmax->max, output_toco_op->minmax->max);
EXPECT_EQ(op.num_bits, output_toco_op->num_bits);
}
TEST_F(OperatorTest, CustomFullyConnected) {
FullyConnectedOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("FULLY_CONNECTED", OperatorType::kFullyConnected), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinGather) {
GatherOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("GATHER", OperatorType::kGather), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinGatherNd) {
GatherNdOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("GATHER_ND", OperatorType::kGatherNd), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinWhere) {
WhereOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("WHERE", OperatorType::kWhere), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinL2Pool) {
L2PoolOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("L2_POOL_2D", OperatorType::kL2Pool), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinLocalResponseNormalization) {
LocalResponseNormalizationOperator op;
op.range = 123;
op.bias = 1.23;
op.alpha = 12.3;
op.beta = .123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("LOCAL_RESPONSE_NORMALIZATION",
OperatorType::kLocalResponseNormalization),
op);
EXPECT_EQ(op.range, output_toco_op->range);
EXPECT_EQ(op.bias, output_toco_op->bias);
EXPECT_EQ(op.alpha, output_toco_op->alpha);
EXPECT_EQ(op.beta, output_toco_op->beta);
}
TEST_F(OperatorTest, BuiltinMaxPool) {
MaxPoolOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("MAX_POOL_2D", OperatorType::kMaxPool), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinReshape) {
TensorFlowReshapeOperator op;
op.shape = {1, 2, 4, 5, 8};
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESHAPE", OperatorType::kReshape), op);
EXPECT_EQ(op.shape, output_toco_op->shape);
}
TEST_F(OperatorTest, CustomSoftmax) {
SoftmaxOperator op;
op.beta = 123.1;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SOFTMAX", OperatorType::kSoftmax), op);
EXPECT_EQ(op.beta, output_toco_op->beta);
}
TEST_F(OperatorTest, BuiltinSpaceToDepth) {
SpaceToDepthOperator op;
op.block_size = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SPACE_TO_DEPTH", OperatorType::kSpaceToDepth), op);
EXPECT_EQ(op.block_size, output_toco_op->block_size);
}
TEST_F(OperatorTest, CustomSplit) {
TensorFlowSplitOperator op;
op.num_split = 123;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SPLIT", OperatorType::kSplit), op);
EXPECT_EQ(op.num_split, output_toco_op->num_split);
}
TEST_F(OperatorTest, CustomSplitV) {
TensorFlowSplitVOperator op;
op.num_split = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SPLIT_V", OperatorType::kSplitV), op);
EXPECT_EQ(op.num_split, output_toco_op->num_split);
}
TEST_F(OperatorTest, BuiltinAveragePool) {
AveragePoolOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("AVERAGE_POOL_2D", OperatorType::kAveragePool), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinConvolution) {
ConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("CONV_2D", OperatorType::kConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinDepthwiseConvolution) {
DepthwiseConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.depth_multiplier = 6;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEPTHWISE_CONV_2D", OperatorType::kDepthwiseConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.depth_multiplier, output_toco_op->depth_multiplier);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinL2Norm) {
L2NormalizationOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("L2_NORMALIZATION", OperatorType::kL2Normalization), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinMul) {
MulOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("MUL", OperatorType::kMul), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, ResizeBilinear) {
ResizeBilinearOperator op;
op.align_corners = true;
op.half_pixel_centers = false;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESIZE_BILINEAR", OperatorType::kResizeBilinear), op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeBilinear_HalfPixelCenters) {
ResizeBilinearOperator op;
op.align_corners = true;
op.half_pixel_centers = true;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESIZE_BILINEAR", OperatorType::kResizeBilinear), op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeNearestNeighbor) {
ResizeNearestNeighborOperator op;
op.align_corners = true;
op.half_pixel_centers = false;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("RESIZE_NEAREST_NEIGHBOR",
OperatorType::kResizeNearestNeighbor),
op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeNearestNeighbor_HalfPixelCenters) {
ResizeNearestNeighborOperator op;
op.align_corners = true;
op.half_pixel_centers = true;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("RESIZE_NEAREST_NEIGHBOR",
OperatorType::kResizeNearestNeighbor),
op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, Svdf) {
SvdfOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu;
op.rank = 1;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SVDF", OperatorType::kSvdf), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
EXPECT_EQ(op.rank, output_toco_op->rank);
}
TEST_F(OperatorTest, Squeeze) {
SqueezeOperator op;
op.squeeze_dims = {-2, -3, 4, 1, 4};
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SQUEEZE", OperatorType::kSqueeze), op);
EXPECT_EQ(op.squeeze_dims, output_toco_op->squeeze_dims);
}
TEST_F(OperatorTest, StridedSlice) {
StridedSliceOperator op;
op.begin_mask = 1;
op.end_mask = 2;
op.ellipsis_mask = 1;
op.new_axis_mask = 1;
op.shrink_axis_mask = 2;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("STRIDED_SLICE", OperatorType::kStridedSlice), op);
EXPECT_EQ(op.start_indices, output_toco_op->start_indices);
EXPECT_EQ(op.stop_indices, output_toco_op->stop_indices);
EXPECT_EQ(op.strides, output_toco_op->strides);
EXPECT_EQ(op.begin_mask, output_toco_op->begin_mask);
EXPECT_EQ(op.end_mask, output_toco_op->end_mask);
EXPECT_EQ(op.end_mask, output_toco_op->end_mask);
EXPECT_EQ(op.ellipsis_mask, output_toco_op->ellipsis_mask);
EXPECT_EQ(op.new_axis_mask, output_toco_op->new_axis_mask);
EXPECT_EQ(op.shrink_axis_mask, output_toco_op->shrink_axis_mask);
}
TEST_F(OperatorTest, BuiltinTopKV2) {
TopKV2Operator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TOPK_V2", OperatorType::kTopK_V2), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinArgMax) {
ArgMaxOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ARG_MAX", OperatorType::kArgMax), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinArgMin) {
ArgMinOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ARG_MIN", OperatorType::kArgMin), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinDequantize) {
DequantizeOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEQUANTIZE", OperatorType::kDequantize), op);
}
TEST_F(OperatorTest, BuiltinTransposeConv) {
TransposeConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TRANSPOSE_CONV", OperatorType::kTransposeConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
}
TEST_F(OperatorTest, BuiltinShape) {
TensorFlowShapeOperator op;
op.output_data_type = ArrayDataType::kInt64;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SHAPE", OperatorType::kShape), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinSparseToDense) {
SparseToDenseOperator op;
op.validate_indices = false;
std::unique_ptr<toco::SparseToDenseOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("SPARSE_TO_DENSE", OperatorType::kSparseToDense), op);
EXPECT_EQ(op.validate_indices, output_toco_op->validate_indices);
}
TEST_F(OperatorTest, VersioningSpareToDense) {
SparseToDenseOperator op;
op.inputs = {"indices", "output_shape", "input_values", "default_value"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model int32_model;
Array& int32_array = int32_model.GetOrCreateArray(op.inputs[2]);
int32_array.data_type = ArrayDataType::kInt32;
OperatorSignature int32_signature = {.op = &op, .model = &int32_model};
EXPECT_EQ(base_op->GetVersion(int32_signature), 1);
Model int64_model;
Array& int64_array = int64_model.GetOrCreateArray(op.inputs[2]);
int64_array.data_type = ArrayDataType::kInt64;
OperatorSignature int64_signature = {.op = &op, .model = &int64_model};
EXPECT_EQ(base_op->GetVersion(int64_signature), 2);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[2]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 3);
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[2]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 3);
}
TEST_F(OperatorTest, BuiltinPack) {
PackOperator op;
op.values_count = 3;
op.axis = 1;
std::unique_ptr<toco::PackOperator> output_toco_op =
SerializeAndDeserialize(GetOperator("PACK", OperatorType::kPack), op);
EXPECT_EQ(op.values_count, output_toco_op->values_count);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinOneHot) {
OneHotOperator op;
op.axis = 2;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ONE_HOT", OperatorType::kOneHot), op);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinUnpack) {
UnpackOperator op;
op.num = 5;
op.axis = 2;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("UNPACK", OperatorType::kUnpack), op);
EXPECT_EQ(op.num, output_toco_op->num);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinLeakyRelu) {
LeakyReluOperator op;
op.alpha = 3;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("LEAKY_RELU", OperatorType::kLeakyRelu), op);
EXPECT_EQ(op.alpha, output_toco_op->alpha);
}
TEST_F(OperatorTest, BuiltinSquaredDifference) {
SquaredDifferenceOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SQUARED_DIFFERENCE", OperatorType::kSquaredDifference), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinScatterNd) {
ScatterNdOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SCATTER_ND", OperatorType::kScatterNd), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, CustomCTCBeamSearchDecoder) {
CTCBeamSearchDecoderOperator op;
op.beam_width = 3;
op.top_paths = 2;
op.merge_repeated = false;
std::unique_ptr<toco::CTCBeamSearchDecoderOperator> output_toco_op =
SerializeAndDeserialize(GetOperator("CTC_BEAM_SEARCH_DECODER",
OperatorType::kCTCBeamSearchDecoder),
op);
EXPECT_EQ(op.beam_width, output_toco_op->beam_width);
EXPECT_EQ(op.top_paths, output_toco_op->top_paths);
EXPECT_EQ(op.merge_repeated, output_toco_op->merge_repeated);
}
TEST_F(OperatorTest, TensorFlowUnsupported) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
::tensorflow::NodeDef node_def;
auto attr = node_def.mutable_attr();
(*attr)["float_attr"].set_f(2.0);
(*attr)["str_attr"].set_s("Hello World");
(*attr)["int_attr"].set_i(17);
(*attr)["bool_attr"].set_b(true);
{
auto* list = (*attr)["list_string_attr"].mutable_list();
list->add_s("abcde");
list->add_s("1234");
list->add_s("");
list->add_s("zyxwv");
list->add_s("!-.");
}
{
auto* list = (*attr)["list_float_attr"].mutable_list();
list->add_f(std::numeric_limits<float>::min());
list->add_f(2.0);
list->add_f(-std::numeric_limits<float>::max());
}
{
auto* list = (*attr)["list_int_attr"].mutable_list();
list->add_i(1);
list->add_i(20);
list->add_i(1LL << 40);
list->add_i(-(1LL << 40));
}
node_def.SerializeToString(&op.tensorflow_node_def);
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported), op);
::tensorflow::NodeDef output_node_def;
output_node_def.ParseFromString(output_toco_op->tensorflow_node_def);
const auto& output_attr = output_node_def.attr();
EXPECT_EQ(2.0, output_attr.at("float_attr").f());
EXPECT_EQ("Hello World", output_attr.at("str_attr").s());
EXPECT_EQ(17, output_attr.at("int_attr").i());
EXPECT_EQ(true, output_attr.at("bool_attr").b());
{
const auto& list = output_attr.at("list_string_attr").list();
ASSERT_EQ(5, list.s_size());
EXPECT_EQ("abcde", list.s(0));
EXPECT_EQ("1234", list.s(1));
EXPECT_EQ("", list.s(2));
EXPECT_EQ("zyxwv", list.s(3));
EXPECT_EQ("!-.", list.s(4));
}
{
const auto& list = output_attr.at("list_float_attr").list();
ASSERT_EQ(3, list.f_size());
EXPECT_EQ(std::numeric_limits<float>::min(), list.f(0));
EXPECT_EQ(2.0, list.f(1));
EXPECT_EQ(-std::numeric_limits<float>::max(), list.f(2));
}
{
const auto& list = output_attr.at("list_int_attr").list();
ASSERT_EQ(4, list.i_size());
EXPECT_EQ(1, list.i(0));
EXPECT_EQ(20, list.i(1));
EXPECT_EQ(1LL << 40, list.i(2));
EXPECT_EQ(-(1LL << 40), list.i(3));
}
}
TEST_F(OperatorTest, TensorFlowUnsupportedWithoutAttr) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported), op);
::tensorflow::NodeDef output_node_def;
output_node_def.ParseFromString(output_toco_op->tensorflow_node_def);
EXPECT_TRUE(output_node_def.attr().empty());
}
TEST_F(OperatorTest, TestShouldExportAsFlexOp) {
EXPECT_FALSE(ShouldExportAsFlexOp(false, "Conv2D"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "Conv2D"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "EluGrad"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "RFFT"));
EXPECT_FALSE(ShouldExportAsFlexOp(true, "MyAwesomeCustomOp"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "RandomShuffle"));
}
TEST_F(OperatorTest, BuiltinMirrorPad) {
MirrorPadOperator op;
op.mode = MirrorPadMode::kReflect;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("MIRROR_PAD", OperatorType::kMirrorPad), op);
EXPECT_EQ(op.mode, output_toco_op->mode);
}
TEST_F(OperatorTest, BuiltinUnique) {
UniqueOperator op;
op.idx_out_type = ArrayDataType::kInt64;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("UNIQUE", OperatorType::kUnique), op);
ASSERT_NE(nullptr, output_toco_op.get());
EXPECT_EQ(output_toco_op->idx_out_type, op.idx_out_type);
}
TEST_F(OperatorTest, BuiltinSegmentSum) {
SegmentSumOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SEGMENT_SUM", OperatorType::kSegmentSum), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinReverseSequence) {
ReverseSequenceOperator op;
op.seq_dim = 3;
op.batch_dim = 1;
std::unique_ptr<toco::ReverseSequenceOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("REVERSE_SEQUENCE", OperatorType::kReverseSequence), op);
EXPECT_EQ(op.seq_dim, output_toco_op->seq_dim);
EXPECT_EQ(op.batch_dim, output_toco_op->batch_dim);
}
TEST_F(OperatorTest, BuiltinMatrixDiag) {
MatrixDiagOperator op;
std::unique_ptr<toco::MatrixDiagOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("MATRIX_DIAG", OperatorType::kMatrixDiag), op);
}
TEST_F(OperatorTest, BuiltinMatrixSetDiag) {
MatrixSetDiagOperator op;
std::unique_ptr<toco::MatrixSetDiagOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("MATRIX_SET_DIAG", OperatorType::kMatrixSetDiag), op);
}
template <typename Op>
void SimpleVersioningTest() {
Op op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
template <typename Op>
void SimpleOutputVersioningTest() {
Op op;
op.outputs = {"output1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.outputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.outputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
TEST_F(OperatorTest, VersioningEqualTest) {
SimpleVersioningTest<TensorFlowEqualOperator>();
}
TEST_F(OperatorTest, VersioningNotEqualTest) {
SimpleVersioningTest<TensorFlowNotEqualOperator>();
}
TEST_F(OperatorTest, VersioningLessTest) {
SimpleVersioningTest<TensorFlowLessOperator>();
}
TEST_F(OperatorTest, VersioningLessEqualTest) {
SimpleVersioningTest<TensorFlowLessEqualOperator>();
}
TEST_F(OperatorTest, VersioningGreaterTest) {
SimpleVersioningTest<TensorFlowGreaterOperator>();
}
TEST_F(OperatorTest, VersioningGreaterEqualTest) {
SimpleVersioningTest<TensorFlowGreaterEqualOperator>();
}
TEST_F(OperatorTest, VersioningSpaceToBatchNDTest) {
SpaceToBatchNDOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.copy_shape({1, 2, 2, 2});
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.copy_shape({1, 2, 2, 2});
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
Model float_model;
Array& float_array = float_model.GetOrCreateArray(op.inputs[0]);
float_array.copy_shape({1, 2, 2});
float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &op, .model = &float_model};
EXPECT_EQ(base_op->GetVersion(float_signature), 3);
}
TEST_F(OperatorTest, VersioningLogSoftmaxTest) {
SimpleVersioningTest<LogSoftmaxOperator>();
}
TEST_F(OperatorTest, VersioningPackTest) {
SimpleVersioningTest<PackOperator>();
}
TEST_F(OperatorTest, VersioningUnpackTest) {
UnpackOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model int32_model;
Array& int32_array = int32_model.GetOrCreateArray(op.inputs[0]);
int32_array.data_type = ArrayDataType::kInt32;
OperatorSignature int32_signature = {.op = &op, .model = &int32_model};
EXPECT_EQ(base_op->GetVersion(int32_signature), 1);
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 2);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
TEST_F(OperatorTest, VersioningBatchToSpaceNDTest) {
BatchToSpaceNDOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
uint8_array.copy_shape({1, 2, 2, 2});
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
int8_array.copy_shape({1, 2, 2, 2});
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
Model float_model;
Array& float_array = float_model.GetOrCreateArray(op.inputs[0]);
float_array.copy_shape({1, 2, 2});
float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &op, .model = &float_model};
EXPECT_EQ(base_op->GetVersion(float_signature), 3);
}
TEST_F(OperatorTest, VersioningTanhTest) {
SimpleVersioningTest<TanhOperator>();
}
TEST_F(OperatorTest, VersioningStridedSliceTest) {
StridedSliceOperator op;
op.inputs = {"input1"};
op.ellipsis_mask = 0;
op.new_axis_mask = 0;
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
Model bool_model;
Array& bool_array = bool_model.GetOrCreateArray(op.inputs[0]);
bool_array.data_type = ArrayDataType::kBool;
OperatorSignature bool_signature = {.op = &op, .model = &bool_model};
EXPECT_EQ(base_op->GetVersion(bool_signature), 3);
op.start_indices = {0, 0, 0, 0, 0};
op.stop_indices = {1, 2, 2, 2, 2};
op.strides = {1, 1, 1, 1, 1};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 4);
EXPECT_EQ(base_op->GetVersion(int8_signature), 4);
EXPECT_EQ(base_op->GetVersion(bool_signature), 4);
}
TEST_F(OperatorTest, VersioningSpaceToDepthTest) {
SimpleVersioningTest<SpaceToDepthOperator>();
}
TEST_F(OperatorTest, VersioningSliceTest) {
SimpleVersioningTest<SliceOperator>();
SliceOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model string_model;
Array& string_array = string_model.GetOrCreateArray(op.inputs[0]);
string_array.data_type = ArrayDataType::kString;
OperatorSignature string_signature = {.op = &op, .model = &string_model};
EXPECT_EQ(base_op->GetVersion(string_signature), 3);
}
TEST_F(OperatorTest, VersioningLogisticTest) {
SimpleVersioningTest<LogisticOperator>();
}
TEST_F(OperatorTest, VersioningL2NormTest) {
SimpleOutputVersioningTest<L2NormalizationOperator>();
}
TEST_F(OperatorTest, VersioningMaxTest) {
SimpleVersioningTest<TensorFlowMaximumOperator>();
}
TEST_F(OperatorTest, VersioningMinTest) {
SimpleVersioningTest<TensorFlowMinimumOperator>();
}
TEST_F(OperatorTest, VersioningMeanTest) {
SimpleVersioningTest<MeanOperator>();
}
TEST_F(OperatorTest, VersioningSumTest) {
SimpleVersioningTest<TensorFlowSumOperator>();
}
TEST_F(OperatorTest, VersioningAddTest) { SimpleVersioningTest<AddOperator>(); }
void SimpleMulVersioningTest(ArrayDataType data_type, float multiplier,
int version) {
MulOperator op;
op.inputs = {"input1", "input2"};
op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model model;
Array& input0 = model.GetOrCreateArray(op.inputs[0]);
Array& input1 = model.GetOrCreateArray(op.inputs[1]);
Array& output = model.GetOrCreateArray(op.outputs[0]);
input0.data_type = data_type;
input0.GetOrCreateQuantizationParams().scale = 1.0f;
input1.data_type = data_type;
input1.GetOrCreateQuantizationParams().scale = 1.0f;
output.data_type = data_type;
output.GetOrCreateQuantizationParams().scale = 1.0f / multiplier;
OperatorSignature signature = {.op = &op, .model = &model};
EXPECT_EQ(base_op->GetVersion(signature), version);
}
TEST_F(OperatorTest, VersioningMulTest) {
SimpleMulVersioningTest(ArrayDataType::kUint8, 0.5f, 1);
SimpleMulVersioningTest(ArrayDataType::kInt8, 0.5f, 2);
SimpleMulVersioningTest(ArrayDataType::kInt8, 2.0f, 3);
}
template <typename OpType>
void SimpleTwoInputsVersioningTest(ArrayDataType data_type, Shape shape1,
Shape shape2, int version) {
OpType op;
op.inputs = {"input1", "input2"};
op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model model;
Array& input0 = model.GetOrCreateArray(op.inputs[0]);
Array& input1 = model.GetOrCreateArray(op.inputs[1]);
Array& output = model.GetOrCreateArray(op.outputs[0]);
input0.data_type = data_type;
input0.copy_shape(shape1);
input1.data_type = data_type;
input1.copy_shape(shape2);
output.data_type = data_type;
OperatorSignature signature = {.op = &op, .model = &model};
EXPECT_EQ(base_op->GetVersion(signature), version);
}
template <typename OpType>
void SimpleThreeInputsVersioningTest(ArrayDataType data_type, Shape shape1,
Shape shape2, Shape shape3, int version) {
OpType op;
op.inputs = {"input1", "input2", "input3"};
op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model model;
Array& input0 = model.GetOrCreateArray(op.inputs[0]);
Array& input1 = model.GetOrCreateArray(op.inputs[1]);
Array& input2 = model.GetOrCreateArray(op.inputs[2]);
Array& output = model.GetOrCreateArray(op.outputs[0]);
input0.data_type = data_type;
input0.copy_shape(shape1);
input1.data_type = data_type;
input1.copy_shape(shape2);
input2.data_type = data_type;
input2.copy_shape(shape3);
output.data_type = data_type;
OperatorSignature signature = {.op = &op, .model = &model};
EXPECT_EQ(base_op->GetVersion(signature), version);
}
TEST_F(OperatorTest, VersioningSubTest) {
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 2}, 1);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kInt8, {1, 2, 2, 2},
{1, 2, 2, 2}, 2);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kUint8, {1, 2, 2},
{1, 2, 2}, 1);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kInt8, {1, 2, 2},
{1, 2, 2}, 2);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 1}, 1);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kInt8, {1, 2, 2, 2},
{1, 2, 2, 1}, 2);
SimpleTwoInputsVersioningTest<SubOperator>(
ArrayDataType::kUint8, {1, 2, 2, 2, 2}, {1, 2, 2, 2, 1}, 3);
SimpleTwoInputsVersioningTest<SubOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2, 2}, {1, 2, 2, 2, 1}, 3);
}
TEST_F(OperatorTest, VersioningDivTest) {
SimpleTwoInputsVersioningTest<DivOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 2}, 1);
SimpleTwoInputsVersioningTest<DivOperator>(ArrayDataType::kInt8, {1, 2, 2},
{1, 2, 2}, 1);
SimpleTwoInputsVersioningTest<DivOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 1}, 1);
SimpleTwoInputsVersioningTest<DivOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2, 2}, {1, 2, 2, 2, 1}, 2);
}
TEST_F(OperatorTest, VersioningPadTest) { SimpleVersioningTest<PadOperator>(); }
TEST_F(OperatorTest, VersioningPadV2Test) {
SimpleVersioningTest<PadV2Operator>();
}
TEST_F(OperatorTest, VersioningConcatenationTest) {
SimpleVersioningTest<ConcatenationOperator>();
}
TEST_F(OperatorTest, VersioningSelectTest) {
SimpleThreeInputsVersioningTest<SelectOperator>(
ArrayDataType::kUint8, {1, 2, 2, 2}, {1, 2, 2, 1}, {1, 2, 2, 1}, 1);
SimpleThreeInputsVersioningTest<SelectOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2}, {1, 2, 2, 1}, {1, 2, 2, 1}, 2);
SimpleThreeInputsVersioningTest<SelectOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2, 1}, {1, 2, 2, 1, 1}, {1, 2, 2, 1, 1},
3);
}
TEST_F(OperatorTest, VersioningRelu6Test) {
SimpleVersioningTest<Relu6Operator>();
}
TEST_F(OperatorTest, VersioningFullyConnectedTest) {
FullyConnectedOperator fully_connected_op;
fully_connected_op.inputs = {"input", "weight"};
fully_connected_op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op =
operator_by_type_map.at(fully_connected_op.type).get();
Model uint8_model;
Array& input_uint8_array =
uint8_model.GetOrCreateArray(fully_connected_op.inputs[0]);
input_uint8_array.data_type = ArrayDataType::kUint8;
Array& weight_uint8_array =
uint8_model.GetOrCreateArray(fully_connected_op.inputs[1]);
weight_uint8_array.data_type = ArrayDataType::kUint8;
Array& output_uint8_array =
uint8_model.GetOrCreateArray(fully_connected_op.outputs[0]);
output_uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &fully_connected_op,
.model = &uint8_model};
EXPECT_EQ(op->GetVersion(uint8_signature), 6);
Model int8_model;
Array& input_int8_array =
int8_model.GetOrCreateArray(fully_connected_op.inputs[0]);
input_int8_array.data_type = ArrayDataType::kInt8;
Array& weight_int8_array =
int8_model.GetOrCreateArray(fully_connected_op.inputs[1]);
weight_int8_array.data_type = ArrayDataType::kInt8;
Array& output_int8_array =
int8_model.GetOrCreateArray(fully_connected_op.outputs[0]);
output_int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &fully_connected_op,
.model = &int8_model};
EXPECT_EQ(op->GetVersion(int8_signature), 6);
}
TEST_F(OperatorTest, VersioningDequantizeTest) {
DequantizeOperator dequant_op;
dequant_op.inputs = {"input"};
dequant_op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op = operator_by_type_map.at(dequant_op.type).get();
Model int16_model;
Array& input_int16_array = int16_model.GetOrCreateArray(dequant_op.inputs[0]);
input_int16_array.data_type = ArrayDataType::kInt16;
OperatorSignature int16_signature = {.op = &dequant_op,
.model = &int16_model};
EXPECT_EQ(op->GetVersion(int16_signature), 3);
Model float16_model;
Array& input_float16_array =
float16_model.GetOrCreateArray(dequant_op.inputs[0]);
input_float16_array.data_type = ArrayDataType::kFloat16;
OperatorSignature float16_signature = {.op = &dequant_op,
.model = &float16_model};
EXPECT_EQ(op->GetVersion(float16_signature), 3);
Model int8_model;
Array& input_int8_array = int8_model.GetOrCreateArray(dequant_op.inputs[0]);
input_int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &dequant_op, .model = &int8_model};
EXPECT_EQ(op->GetVersion(int8_signature), 2);
Model float_model;
Array& input_float_array = float_model.GetOrCreateArray(dequant_op.inputs[0]);
input_float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &dequant_op,
.model = &float_model};
EXPECT_EQ(op->GetVersion(float_signature), 1);
}
TEST_F(OperatorTest, VersioningConv2DTest) {
ConvOperator conv_op;
conv_op.inputs = {"input", "filter"};
conv_op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op = operator_by_type_map.at(conv_op.type).get();
Model uint8_model;
Array& input_uint8_array = uint8_model.GetOrCreateArray(conv_op.inputs[0]);
input_uint8_array.data_type = ArrayDataType::kUint8;
Array& filter_uint8_array = uint8_model.GetOrCreateArray(conv_op.inputs[1]);
filter_uint8_array.data_type = ArrayDataType::kUint8;
Array& output_uint8_array = uint8_model.GetOrCreateArray(conv_op.outputs[0]);
output_uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &conv_op, .model = &uint8_model};
EXPECT_EQ(op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& input_int8_array = int8_model.GetOrCreateArray(conv_op.inputs[0]);
input_int8_array.data_type = ArrayDataType::kInt8;
Array& filter_int8_array = int8_model.GetOrCreateArray(conv_op.inputs[1]);
filter_int8_array.data_type = ArrayDataType::kInt8;
Array& output_int8_array = int8_model.GetOrCreateArray(conv_op.outputs[0]);
output_int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &conv_op, .model = &int8_model};
EXPECT_EQ(op->GetVersion(int8_signature), 3);
Model float_model;
Array& input_float_array = float_model.GetOrCreateArray(conv_op.inputs[0]);
input_float_array.data_type = ArrayDataType::kFloat;
Array& filter_int8_array1 = float_model.GetOrCreateArray(conv_op.inputs[1]);
filter_int8_array1.data_type = ArrayDataType::kInt8;
Array& output_float_array = float_model.GetOrCreateArray(conv_op.outputs[0]);
output_float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &conv_op, .model = &float_model};
EXPECT_EQ(op->GetVersion(float_signature), 2);
}
TEST_F(OperatorTest, VersioningFloorDivOperatorTest) {
FloorDivOperator floordiv_op;
floordiv_op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op = operator_by_type_map.at(floordiv_op.type).get();
Model int32_model;
Array& input_int32_array =
int32_model.GetOrCreateArray(floordiv_op.inputs[0]);
input_int32_array.data_type = ArrayDataType::kInt32;
OperatorSignature int32_signature = {.op = &floordiv_op,
.model = &int32_model};
EXPECT_EQ(op->GetVersion(int32_signature), 1);
Model float_model;
Array& input_float_array =
float_model.GetOrCreateArray(floordiv_op.inputs[0]);
input_float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &floordiv_op,
.model = &float_model};
EXPECT_EQ(op->GetVersion(float_signature), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/operator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/operator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05eda510-6916-41f3-b867-9622765d076c | cpp | tensorflow/tensorflow | c_api_opaque | tensorflow/lite/core/c/c_api_opaque.cc | tensorflow/lite/core/c/c_api_opaque_test.cc | #include "tensorflow/lite/core/c/c_api_opaque.h"
#include <stdarg.h>
#include <stdint.h>
#include <cstdio>
#include <vector>
#include "tensorflow/lite/c/c_api_opaque_internal.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/util.h"
namespace {
const TfLiteTensor* Convert(const TfLiteOpaqueTensor* opaque_tensor) {
return reinterpret_cast<const TfLiteTensor*>(opaque_tensor);
}
TfLiteTensor* Convert(TfLiteOpaqueTensor* opaque_tensor) {
return reinterpret_cast<TfLiteTensor*>(opaque_tensor);
}
TfLiteNode* Convert(TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<TfLiteNode*>(opaque_node);
}
const TfLiteNode* Convert(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node);
}
const TfLiteContext* Convert(const TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<const TfLiteContext*>(opaque_context);
}
TfLiteContext* Convert(TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<TfLiteContext*>(opaque_context);
}
TfLiteOpaqueContext* Convert(TfLiteContext* tflite_context) {
return reinterpret_cast<TfLiteOpaqueContext*>(tflite_context);
}
const ::tflite::Subgraph* GetSubgraph(
const TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<const ::tflite::Subgraph*>(
Convert(opaque_context)->impl_);
}
::tflite::Subgraph* GetSubgraph(TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<::tflite::Subgraph*>(Convert(opaque_context)->impl_);
}
}
struct TfLiteOpaqueTensorBuilder {
TfLiteType type;
void* data;
TfLiteAllocationType allocation_type;
TfLiteQuantizationParams quantization_params;
TfLiteQuantization quantization;
};
TfLiteType TfLiteOpaqueTensorType(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorType(reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
int32_t TfLiteOpaqueTensorNumDims(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorNumDims(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
int32_t TfLiteOpaqueTensorDim(const TfLiteOpaqueTensor* opaque_tensor,
int32_t dim_index) {
return TfLiteTensorDim(reinterpret_cast<const TfLiteTensor*>(opaque_tensor),
dim_index);
}
TfLiteStatus TfLiteOpaqueTensorGetNumDimsSignature(
const TfLiteOpaqueTensor* opaque_tensor, int32_t* num_dims) {
const TfLiteTensor* tensor = Convert(opaque_tensor);
if (!tensor->dims_signature) {
*num_dims = -1;
return kTfLiteOk;
}
*num_dims = tensor->dims_signature->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueTensorGetDimSignature(
const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index,
int32_t* dim_length) {
const TfLiteTensor* tensor = Convert(opaque_tensor);
if (tensor->dims_signature != nullptr && tensor->dims_signature->size != 0) {
*dim_length = tensor->dims_signature->data[dim_index];
} else {
*dim_length = tensor->dims->data[dim_index];
}
return kTfLiteOk;
}
int TfLiteOpaqueTensorIsVariable(const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->is_variable ? 1 : 0;
}
size_t TfLiteOpaqueTensorByteSize(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorByteSize(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
void* TfLiteOpaqueTensorData(const TfLiteOpaqueTensor* opaque_tensor) {
return opaque_tensor != nullptr
? TfLiteTensorData(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor))
: nullptr;
}
TfLiteAllocationType TfLiteOpaqueTensorGetAllocationType(
const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->allocation_type;
}
TfLiteAllocationStrategy TfLiteOpaqueTensorGetAllocationStrategy(
const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetAllocationStrategy(Convert(t));
}
TfLiteRunStability TfLiteOpaqueTensorGetBufferAddressStability(
const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetBufferAddressStability(Convert(t));
}
TfLiteRunStability TfLiteOpaqueTensorGetDataStability(
const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetDataStability(Convert(t));
}
TfLiteRunStep TfLiteOpaqueTensorGetDataKnownStep(const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetDataKnownStep(Convert(t));
}
TfLiteRunStep TfLiteOpaqueTensorGetShapeKnownStep(const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetShapeKnownStep(Convert(t));
}
const char* TfLiteOpaqueTensorName(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorName(reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
TfLiteQuantization TfLiteOpaqueTensorGetQuantization(
const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->quantization;
}
TfLiteQuantizationParams TfLiteOpaqueTensorGetQuantizationParams(
const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->params;
}
TfLiteStatus TfLiteOpaqueTensorCopyFromBuffer(TfLiteOpaqueTensor* opaque_tensor,
const void* input_data,
size_t input_data_size) {
return TfLiteTensorCopyFromBuffer(
reinterpret_cast<TfLiteTensor*>(opaque_tensor), input_data,
input_data_size);
}
TfLiteStatus TfLiteOpaqueTensorCopyToBuffer(
const TfLiteOpaqueTensor* opaque_tensor, void* output_data,
size_t output_data_size) {
return TfLiteTensorCopyToBuffer(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor), output_data,
output_data_size);
}
int TfLiteOpaqueTensorGetStringCount(const TfLiteOpaqueTensor* tensor) {
return tflite::GetStringCount(Convert(tensor));
}
TfLiteStatus TfLiteOpaqueTensorGetString(const TfLiteOpaqueTensor* tensor,
int index, const char** str,
int* len) {
tflite::StringRef str_ref = tflite::GetString(Convert(tensor), index);
*str = str_ref.str;
*len = str_ref.len;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueTensorWriteStrings(TfLiteOpaqueTensor* tensor,
const char* const* str_array,
int str_array_len,
const int* str_n_len) {
tflite::DynamicBuffer buf;
for (int i = 0; i < str_array_len; ++i) {
buf.AddString(str_array[i], str_n_len[i]);
}
buf.WriteToTensorAsVector(Convert(tensor));
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueTensorWriteString(TfLiteOpaqueTensor* tensor,
const char* str, const int len) {
TfLiteOpaqueTensorWriteStrings(tensor, &str, 1, &len);
return kTfLiteOk;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderCreate() {
return new TfLiteOpaqueTensorBuilder{};
}
void TfLiteOpaqueTensorBuilderDelete(TfLiteOpaqueTensorBuilder* builder) {
delete builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetType(
TfLiteOpaqueTensorBuilder* builder, TfLiteType type) {
builder->type = type;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetData(
TfLiteOpaqueTensorBuilder* builder, void* data) {
builder->data = data;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetAllocationType(
TfLiteOpaqueTensorBuilder* builder, TfLiteAllocationType allocation_type) {
builder->allocation_type = allocation_type;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetQuantizationParams(
TfLiteOpaqueTensorBuilder* builder, TfLiteQuantizationParams params) {
builder->quantization_params = params;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetQuantization(
TfLiteOpaqueTensorBuilder* builder, TfLiteQuantization quantization) {
builder->quantization = quantization;
return builder;
}
void TfLiteOpaqueTensorSetAllocationTypeToDynamic(TfLiteOpaqueTensor* tensor) {
tflite::SetTensorToDynamic(Convert(tensor));
}
const TfLiteOpaqueTensor* TfLiteOpaqueNodeGetInput(
const TfLiteOpaqueContext* opaque_context,
const TfLiteOpaqueNode* opaque_node, int index) {
const TfLiteTensor* tensor =
tflite::GetInput(reinterpret_cast<const TfLiteContext*>(opaque_context),
reinterpret_cast<const TfLiteNode*>(opaque_node), index);
return reinterpret_cast<const TfLiteOpaqueTensor*>(tensor);
}
TfLiteOpaqueTensor* TfLiteOpaqueNodeGetOutput(
TfLiteOpaqueContext* opaque_context, const TfLiteOpaqueNode* opaque_node,
int index) {
TfLiteTensor* tensor = tflite::GetOutput(
reinterpret_cast<TfLiteContext*>(opaque_context),
reinterpret_cast<const TfLiteNode*>(opaque_node), index);
return reinterpret_cast<TfLiteOpaqueTensor*>(tensor);
}
int TfLiteOpaqueNodeNumberOfInputs(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node)->inputs->size;
}
int TfLiteOpaqueNodeNumberOfOutputs(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node)->outputs->size;
}
void* TfLiteOpaqueNodeGetUserData(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node)->user_data;
}
void* TfLiteOpaqueNodeGetBuiltinData(const TfLiteOpaqueNode* opaque_node) {
return Convert(opaque_node)->builtin_data;
}
TfLiteStatus TfLiteOpaqueNodeGetCustomInitialData(
const TfLiteOpaqueNode* opaque_node, const void** init_data, int* size) {
*init_data = Convert(opaque_node)->custom_initial_data;
*size = Convert(opaque_node)->custom_initial_data_size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeInputs(const TfLiteOpaqueNode* opaque_node,
const int** inputs, int* num_inputs) {
const TfLiteNode* node = Convert(opaque_node);
*inputs = node->inputs->data;
*num_inputs = node->inputs->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeOutputs(const TfLiteOpaqueNode* opaque_node,
const int** outputs, int* num_outputs) {
const TfLiteNode* node = Convert(opaque_node);
*outputs = node->outputs->data;
*num_outputs = node->outputs->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeTemporaries(const TfLiteOpaqueNode* opaque_node,
const int** temporaries,
int* num_temporaries) {
const TfLiteNode* node = Convert(opaque_node);
*temporaries = node->temporaries->data;
*num_temporaries = node->temporaries->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeSetTemporaries(TfLiteOpaqueNode* opaque_node,
const int* temporaries,
int num_temporaries) {
if (num_temporaries < 0) {
return kTfLiteError;
}
TfLiteNode* node = Convert(opaque_node);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(num_temporaries);
for (int i = 0; i < num_temporaries; ++i) {
node->temporaries->data[i] = temporaries[i];
}
return kTfLiteOk;
}
int TfLiteOpaqueNodeGetInputTensorIndex(const TfLiteOpaqueNode* opaque_node,
int index_of_input) {
auto* node = Convert(opaque_node);
if (index_of_input < 0 || index_of_input >= node->inputs->size) {
return -1;
}
return node->inputs->data[index_of_input];
}
int TfLiteOpaqueNodeGetOutputTensorIndex(const TfLiteOpaqueNode* opaque_node,
int index_of_output) {
auto* node = Convert(opaque_node);
if (index_of_output < 0 || index_of_output >= node->outputs->size) {
return -1;
}
return node->outputs->data[index_of_output];
}
TfLiteStatus TfLiteOpaqueContextGetExecutionPlan(
TfLiteOpaqueContext* opaque_context, TfLiteIntArray** execution_plan) {
auto context = reinterpret_cast<TfLiteContext*>(opaque_context);
return context->GetExecutionPlan(context, execution_plan);
}
TfLiteStatus TfLiteOpaqueContextGetNodeAndRegistration(
struct TfLiteOpaqueContext* opaque_context, int node_index,
TfLiteOpaqueNode** node, TfLiteOperator** registration_external) {
TfLiteContext* context = reinterpret_cast<TfLiteContext*>(opaque_context);
TfLiteNode* local_node;
TfLiteRegistration* registration;
TfLiteStatus status = context->GetNodeAndRegistration(
context, node_index, &local_node, ®istration);
if (status != kTfLiteOk) return status;
*node = reinterpret_cast<TfLiteOpaqueNode*>(local_node);
if (registration->registration_external) {
*registration_external = registration->registration_external;
return kTfLiteOk;
}
auto derived_registration =
tflite::internal::CommonOpaqueConversionUtil::ObtainOperator(
context, registration, node_index);
if (derived_registration == nullptr) return kTfLiteError;
*registration_external = derived_registration;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
struct TfLiteOpaqueContext* opaque_context,
TfLiteOperator* registration_external,
const TfLiteIntArray* nodes_to_replace,
TfLiteOpaqueDelegate* opaque_delegate) {
TfLiteContext* context = reinterpret_cast<TfLiteContext*>(opaque_context);
TfLiteDelegate* delegate = reinterpret_cast<TfLiteDelegate*>(opaque_delegate);
TfLiteRegistration registration{};
registration.registration_external = registration_external;
TfLiteStatus status = context->ReplaceNodeSubsetsWithDelegateKernels(
context, registration, nodes_to_replace, delegate);
return status;
}
TfLiteOpaqueTensor* TfLiteOpaqueContextGetOpaqueTensor(
const TfLiteOpaqueContext* opaque_context, int index) {
auto context = reinterpret_cast<const TfLiteContext*>(opaque_context);
return reinterpret_cast<TfLiteOpaqueTensor*>(&context->tensors[index]);
}
TfLiteStatus TfLiteOpaqueContextGetInputs(
const struct TfLiteOpaqueContext* opaque_context, const int** inputs,
int* num_inputs) {
auto* subgraph = GetSubgraph(opaque_context);
const std::vector<int>& subgraph_inputs = subgraph->inputs();
*inputs = subgraph_inputs.data();
*num_inputs = subgraph_inputs.size();
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextGetOutputs(
const struct TfLiteOpaqueContext* opaque_context, const int** outputs,
int* num_outputs) {
auto* subgraph = GetSubgraph(opaque_context);
const std::vector<int>& subgraph_outputs = subgraph->outputs();
*outputs = subgraph_outputs.data();
*num_outputs = subgraph_outputs.size();
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextGetVariables(
const struct TfLiteOpaqueContext* opaque_context, const int** variables,
int* num_variables) {
auto* subgraph = GetSubgraph(opaque_context);
const std::vector<int>& subgraph_variables = subgraph->variables();
*variables = subgraph_variables.data();
*num_variables = subgraph_variables.size();
return kTfLiteOk;
}
size_t TfLiteOpaqueContextGetNumNodes(
const struct TfLiteOpaqueContext* opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->nodes_size();
}
size_t TfLiteOpaqueContextGetNumTensors(
const struct TfLiteOpaqueContext* opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->tensors_size();
}
const char* TfLiteOpaqueContextGetName(
const struct TfLiteOpaqueContext* opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->GetName().c_str();
}
TfLiteStatus TfLiteOpaqueContextResizeTensor(TfLiteOpaqueContext* context,
TfLiteOpaqueTensor* tensor,
TfLiteIntArray* new_size) {
TfLiteContext* tflite_context = reinterpret_cast<TfLiteContext*>(context);
return tflite_context->ResizeTensor(
tflite_context, reinterpret_cast<TfLiteTensor*>(tensor), new_size);
}
TfLiteStatus TfLiteOpaqueContextAcquireSubgraphContext(
struct TfLiteOpaqueContext* opaque_context, int subgraph_index,
TfLiteOpaqueContext** acquired_opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
TfLiteContext* acquired_context;
TfLiteStatus status =
subgraph->AcquireSubgraphContext(subgraph_index, &acquired_context);
if (status != kTfLiteOk) {
return status;
}
*acquired_opaque_context = Convert(acquired_context);
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextReleaseSubgraphContext(
struct TfLiteOpaqueContext* opaque_context, int subgraph_index) {
return GetSubgraph(opaque_context)->ReleaseSubgraphContext(subgraph_index);
}
TfLiteStatus TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable(
TfLiteOpaqueContext* opaque_context, int subgraph_index) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->MarkSubgraphAsDelegationSkippable(subgraph_index);
}
TfLiteStatus TfLiteOpaqueContextGetNodeInitDataMmapInfo(
const TfLiteOpaqueContext* context, const TfLiteOpaqueNode* node, int* fd,
int64_t* custom_initial_data_offset_in_file,
int64_t* custom_initial_data_size) {
auto* subgraph = GetSubgraph(context);
return subgraph->GetNodeInitDataMmapInfo(Convert(node), fd,
custom_initial_data_offset_in_file,
custom_initial_data_size);
}
TfLiteStatus TfLiteOpaqueContextAddTensor(TfLiteOpaqueContext* context,
TfLiteOpaqueTensorBuilder* builder,
int* new_tensor_index) {
if (builder->allocation_type != kTfLiteDynamic &&
builder->allocation_type != kTfLiteArenaRw &&
builder->allocation_type != kTfLiteArenaRwPersistent) {
TfLiteOpaqueContextReportError(
context,
"Invalid allocation type '%d'. Allocation type for "
"TfLiteOpaqueContextAddTensor must be one of: "
"'kTfLiteDynamic', 'kTfLiteArenaRw' or 'kTfLiteArenaRwPersistent'.",
builder->allocation_type);
return kTfLiteError;
}
if (builder->allocation_type == kTfLiteDynamic && builder->data == nullptr) {
TfLiteOpaqueContextReportError(context,
"For tensors of allocation type "
"'kTfLiteDynamic' 'data' must be provided.");
return kTfLiteError;
}
if ((builder->allocation_type == kTfLiteArenaRw ||
builder->allocation_type == kTfLiteArenaRwPersistent) &&
builder->data != nullptr) {
TfLiteOpaqueContextReportError(
context,
"For tensors of allocation type "
"'kTfLiteArenaRw' or 'kTfLiteArenaRwPersistent' "
"'data' must not be provided.");
return kTfLiteError;
}
auto* tflite_context = Convert(context);
int index = -1;
auto status = tflite_context->AddTensors(tflite_context, 1, &index);
if (status != kTfLiteOk) return status;
tflite_context->tensors[index].type = builder->type;
tflite_context->tensors[index].data.data = builder->data;
tflite_context->tensors[index].allocation_type = builder->allocation_type;
tflite_context->tensors[index].params = builder->quantization_params;
tflite_context->tensors[index].quantization = builder->quantization;
if (new_tensor_index != nullptr) {
*new_tensor_index = index;
}
return status;
}
TfLiteStatus TfLiteOpaqueContextGetSizeOfType(TfLiteOpaqueContext* context,
const TfLiteType type,
size_t* bytes) {
return tflite::GetSizeOfType(Convert(context), type, bytes);
}
void TfLiteOpaqueContextReportError(struct TfLiteOpaqueContext* opaque_context,
const char* format, ...) {
va_list vlist;
va_start(vlist, format);
TfLiteOpaqueContextReportErrorVa(opaque_context, format, vlist);
va_end(vlist);
}
void TfLiteOpaqueContextReportErrorVa(
struct TfLiteOpaqueContext* opaque_context, const char* format,
va_list vlist) {
va_list copy;
va_copy(copy, vlist);
int n = vsnprintf(nullptr, 0, format, copy);
if (n < 0) {
return;
}
size_t size = (size_t)n + 1;
char* buffer = new char[size];
n = vsnprintf(buffer, size, format, vlist);
if (n < 0) {
delete[] buffer;
return;
}
auto* context = reinterpret_cast<TfLiteContext*>(opaque_context);
TF_LITE_KERNEL_LOG(context, "%s", buffer);
delete[] buffer;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate(
const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder) {
if (!opaque_delegate_builder) return nullptr;
TfLiteDelegate* result = new TfLiteDelegate{};
result->opaque_delegate_builder = new TfLiteOpaqueDelegateBuilder{};
*(result->opaque_delegate_builder) = *opaque_delegate_builder;
return reinterpret_cast<TfLiteOpaqueDelegate*>(result);
}
void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* opaque_delegate) {
if (!opaque_delegate) return;
const TfLiteDelegate* tflite_delegate =
reinterpret_cast<const TfLiteDelegate*>(opaque_delegate);
delete tflite_delegate->opaque_delegate_builder;
delete tflite_delegate;
}
#endif
void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate) {
if (!delegate) return nullptr;
const auto* tflite_delegate =
reinterpret_cast<const TfLiteDelegate*>(delegate);
if (!tflite_delegate->opaque_delegate_builder) return tflite_delegate->data_;
return tflite_delegate->opaque_delegate_builder->data;
} | #include "tensorflow/lite/core/c/c_api_opaque.h"
#include <stddef.h>
#include <cstring>
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api.h"
namespace tflite {
namespace {
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithMemNoneBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithMmapRoBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithArenaRwBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithArenaRwPersistentBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithDynamicBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithPersistentRoBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithCustomBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithVariantObjectBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithMemNoneBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithMmapRoBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithArenaRwBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithArenaRwPersistentBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithDynamicBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithPersistentRoBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithCustomBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithVariantObjectBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorData, ValidInput) {
TfLiteTensor t;
char data[] = "data";
t.data.raw = data;
EXPECT_EQ(TfLiteOpaqueTensorData(reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
data);
}
TEST(TestTfLiteOpaqueTensorData, NullInput) {
EXPECT_EQ(TfLiteOpaqueTensorData(nullptr), nullptr);
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithMemNoneBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithMmapRoBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithArenaRwBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithArenaRwPersistentBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithDynamicBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithPersistentRoBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithCustomBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithVariantObjectBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithMemNoneBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithMmapRoBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithArenaRwBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithArenaRwPersistentBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithDynamicBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithPersistentRoBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithCustomBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithVariantObjectBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithMemNoneBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithMmapRoBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithArenaRwBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithArenaRwPersistentBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithDynamicBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithPersistentRoBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithCustomBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithVariantObjectBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueDelegate, CreateAndDelete) {
std::unique_ptr<TfLiteOpaqueDelegateBuilder> opaque_delegate_builder(
new TfLiteOpaqueDelegateBuilder{});
TfLiteOpaqueDelegate* opaque_delegate =
TfLiteOpaqueDelegateCreate(opaque_delegate_builder.get());
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
TEST(TestTfLiteOpaqueDelegate, Create_WithNull) {
EXPECT_EQ(nullptr, TfLiteOpaqueDelegateCreate(nullptr));
}
TEST(TestTfLiteOpaqueDelegate, Delete_WithNull) {
TfLiteOpaqueDelegateDelete(nullptr);
}
TEST(TestTfLiteOpaqueDelegate, GetData_WellFormedOpaqueDelegate) {
int delegate_data = 42;
TfLiteOpaqueDelegateBuilder builder{};
builder.data = &delegate_data;
TfLiteOpaqueDelegate* opaque_delegate = TfLiteOpaqueDelegateCreate(&builder);
EXPECT_EQ(&delegate_data, TfLiteOpaqueDelegateGetData(opaque_delegate));
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
TEST(TestTfLiteOpaqueDelegate,
GetData_NotConstructedWithTfLiteOpaqueDelegateCreate) {
int delegate_data = 42;
TfLiteDelegate non_opaque_delegate = TfLiteDelegateCreate();
non_opaque_delegate.data_ = &delegate_data;
auto* opaque_delegate =
reinterpret_cast<TfLiteOpaqueDelegate*>(&non_opaque_delegate);
EXPECT_EQ(&delegate_data, TfLiteOpaqueDelegateGetData(opaque_delegate));
}
TEST(TestTfLiteOpaqueDelegate, GetData_NoDataSetViaOpaqueDelegateBuilder) {
TfLiteOpaqueDelegateBuilder builder{};
TfLiteOpaqueDelegate* opaque_delegate = TfLiteOpaqueDelegateCreate(&builder);
EXPECT_EQ(nullptr, TfLiteOpaqueDelegateGetData(opaque_delegate));
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
namespace my_custom_op {
struct MyOpData {
int temp_tensor_index;
};
void* Init(TfLiteOpaqueContext* context, const char* buffer, size_t length) {
auto* op_data = new MyOpData{};
return op_data;
}
void Free(TfLiteOpaqueContext* context, void* buffer) {
delete reinterpret_cast<MyOpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
auto* op_data =
reinterpret_cast<MyOpData*>(TfLiteOpaqueNodeGetUserData(node));
const int num_temporaries = 1;
int temporary_tensor_indices[num_temporaries];
TfLiteStatus status =
TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
-1);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteError);
status = TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
0);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteOk);
TfLiteOpaqueTensorBuilder* builder = TfLiteOpaqueTensorBuilderCreate();
TfLiteOpaqueTensorBuilderSetType(builder, kTfLiteFloat32);
TfLiteOpaqueTensorBuilderSetAllocationType(builder, kTfLiteArenaRw);
TfLiteOpaqueContextAddTensor(context, builder, &temporary_tensor_indices[0]);
TfLiteOpaqueTensorBuilderDelete(builder);
status = TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
num_temporaries);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteOk);
op_data->temp_tensor_index = temporary_tensor_indices[0];
TfLiteOpaqueTensor* temp_tensor =
TfLiteOpaqueContextGetOpaqueTensor(context, op_data->temp_tensor_index);
TfLiteIntArray* temp_size = TfLiteIntArrayCreate(1);
temp_size->data[0] = 1;
return TfLiteOpaqueContextResizeTensor(context, temp_tensor, temp_size);
}
TfLiteStatus Invoke(TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
auto* op_data =
reinterpret_cast<MyOpData*>(TfLiteOpaqueNodeGetUserData(node));
const int* temporary_tensor_indices;
int num_temporaries;
TfLiteOpaqueNodeTemporaries(node, &temporary_tensor_indices,
&num_temporaries);
TF_LITE_OPAQUE_ENSURE(context, num_temporaries == 1);
TF_LITE_OPAQUE_ENSURE(
context, temporary_tensor_indices[0] == op_data->temp_tensor_index);
TfLiteOpaqueTensor* temp_tensor =
TfLiteOpaqueContextGetOpaqueTensor(context, op_data->temp_tensor_index);
TF_LITE_OPAQUE_ENSURE(context,
TfLiteOpaqueTensorType(temp_tensor) == kTfLiteFloat32);
TF_LITE_OPAQUE_ENSURE(context, TfLiteOpaqueTensorGetAllocationType(
temp_tensor) == kTfLiteArenaRw);
size_t temp_bytes = TfLiteOpaqueTensorByteSize(temp_tensor);
void* temp_data = TfLiteOpaqueTensorData(temp_tensor);
TF_LITE_OPAQUE_ENSURE(context, temp_bytes != 0);
TF_LITE_OPAQUE_ENSURE(context, temp_data != nullptr);
EXPECT_EQ(1, TfLiteOpaqueNodeNumberOfInputs(node));
const TfLiteOpaqueTensor* input = TfLiteOpaqueNodeGetInput(context, node, 0);
size_t input_bytes = TfLiteOpaqueTensorByteSize(input);
void* input_data = TfLiteOpaqueTensorData(input);
EXPECT_EQ(input_bytes, temp_bytes);
std::memcpy(temp_data, input_data, input_bytes);
EXPECT_EQ(1, TfLiteOpaqueNodeNumberOfOutputs(node));
TfLiteOpaqueTensor* output = TfLiteOpaqueNodeGetOutput(context, node, 0);
size_t output_bytes = TfLiteOpaqueTensorByteSize(output);
void* output_data = TfLiteOpaqueTensorData(output);
EXPECT_EQ(output_bytes, temp_bytes);
std::memcpy(output_data, temp_data, output_bytes);
return kTfLiteOk;
}
}
TEST(TestTfLiteOpaqueNode, CustomOpWithSetAndGetTemporaries) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreate(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepare(reg, my_custom_op::Prepare);
TfLiteOperatorSetInit(reg, my_custom_op::Init);
TfLiteOperatorSetFree(reg, my_custom_op::Free);
TfLiteOperatorSetInvoke(reg, my_custom_op::Invoke);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithLegacyCallbacks) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreate(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepare(reg, [](auto context, auto node) {
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInit(reg, [](auto context, auto buffer, auto length) {
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFree(
reg, [](auto context, auto data) { my_custom_op::Free(context, data); });
TfLiteOperatorSetInvoke(reg, [](auto context, auto node) {
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithNoUserData) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreate(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepareWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInitWithData(
reg, [](auto user_data, auto context, auto buffer, auto length) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFreeWithData(reg,
[](auto user_data, auto context, auto data) {
EXPECT_EQ(nullptr, user_data);
my_custom_op::Free(context, data);
});
TfLiteOperatorSetInvokeWithData(reg,
[](auto user_data, auto context, auto node) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithData) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreate(kTfLiteBuiltinCustom, "Sinh", 1,
reinterpret_cast<void*>(345));
TfLiteOperatorSetPrepareWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInitWithData(
reg, [](auto user_data, auto context, auto buffer, auto length) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFreeWithData(
reg, [](auto user_data, auto context, auto data) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
my_custom_op::Free(context, data);
});
TfLiteOperatorSetInvokeWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/c/c_api_opaque.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/c/c_api_opaque_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c190acea-833e-4eb8-9ce8-56690a50f14c | cpp | tensorflow/tensorflow | c_api_experimental | tensorflow/c/eager/c_api_experimental.cc | tensorflow/c/eager/c_api_experimental_test.cc | #include "tensorflow/c/eager/c_api_experimental.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/time/time.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "xla/tsl/c/tsl_status_internal.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/framework/cancellation.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/distributed_runtime/coordination/coordination_service_error_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/strcat.h"
using tensorflow::string;
void TFE_OpReset(TFE_Op* op_to_reset, const char* op_or_function_name,
const char* raw_device_name, TF_Status* status) {
if (op_to_reset) {
tensorflow::ImmediateExecutionOperation* op =
tensorflow::unwrap(op_to_reset);
op->Clear();
status->status = op->Reset(op_or_function_name, raw_device_name);
} else {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"op_to_reset should not be nullptr");
}
}
void TFE_ContextEnableGraphCollection(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->SetShouldStoreGraphs(true);
}
void TFE_ContextDisableGraphCollection(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->SetShouldStoreGraphs(false);
}
uint64_t TFE_GetContextId(TFE_Context* ctx) {
tensorflow::EagerContext* context =
tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
return context->GetContextId();
}
void TFE_MonitoringCounterCellIncrementBy(TFE_MonitoringCounterCell* cell,
int64_t value) {
cell->cell.IncrementBy(value);
}
int64_t TFE_MonitoringCounterCellValue(TFE_MonitoringCounterCell* cell) {
return cell->cell.value();
}
TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(const char* name,
TF_Status* status,
const char* description) {
auto* result = new TFE_MonitoringCounter0({name, description});
tsl::Set_TF_Status_from_Status(status, result->counter->GetStatus());
if (!result->counter->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteCounter0(TFE_MonitoringCounter0* counter) {
delete counter;
}
TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter0(
TFE_MonitoringCounter0* counter) {
return static_cast<TFE_MonitoringCounterCell*>(
static_cast<void*>(counter->counter->GetCell()));
}
TFE_MonitoringCounter1* TFE_MonitoringNewCounter1(const char* name,
TF_Status* status,
const char* description,
const char* label1) {
auto* result = new TFE_MonitoringCounter1({name, description, label1});
tsl::Set_TF_Status_from_Status(status, result->counter->GetStatus());
if (!result->counter->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteCounter1(TFE_MonitoringCounter1* counter) {
delete counter;
}
TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter1(
TFE_MonitoringCounter1* counter, const char* label1) {
return static_cast<TFE_MonitoringCounterCell*>(
static_cast<void*>(counter->counter->GetCell(label1)));
}
TFE_MonitoringCounter2* TFE_MonitoringNewCounter2(const char* name,
TF_Status* status,
const char* description,
const char* label1,
const char* label2) {
auto* result =
new TFE_MonitoringCounter2({name, description, label1, label2});
tsl::Set_TF_Status_from_Status(status, result->counter->GetStatus());
if (!result->counter->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteCounter2(TFE_MonitoringCounter2* counter) {
delete counter;
}
TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter2(
TFE_MonitoringCounter2* counter, const char* label1, const char* label2) {
return static_cast<TFE_MonitoringCounterCell*>(
static_cast<void*>(counter->counter->GetCell(label1, label2)));
}
void TFE_MonitoringIntGaugeCellSet(TFE_MonitoringIntGaugeCell* cell,
int64_t value) {
cell->cell.Set(value);
}
int64_t TFE_MonitoringIntGaugeCellValue(TFE_MonitoringIntGaugeCell* cell) {
return cell->cell.value();
}
TFE_MonitoringIntGauge0* TFE_MonitoringNewIntGauge0(const char* name,
TF_Status* status,
const char* description) {
auto* result = new TFE_MonitoringIntGauge0({name, description});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteIntGauge0(TFE_MonitoringIntGauge0* gauge) {
delete gauge;
}
TFE_MonitoringIntGaugeCell* TFE_MonitoringGetCellIntGauge0(
TFE_MonitoringIntGauge0* gauge) {
return static_cast<TFE_MonitoringIntGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell()));
}
TFE_MonitoringIntGauge1* TFE_MonitoringNewIntGauge1(const char* name,
TF_Status* status,
const char* description,
const char* label1) {
auto* result = new TFE_MonitoringIntGauge1({name, description, label1});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteIntGauge1(TFE_MonitoringIntGauge1* gauge) {
delete gauge;
}
TFE_MonitoringIntGaugeCell* TFE_MonitoringGetCellIntGauge1(
TFE_MonitoringIntGauge1* gauge, const char* label1) {
return static_cast<TFE_MonitoringIntGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell(label1)));
}
TFE_MonitoringIntGauge2* TFE_MonitoringNewIntGauge2(const char* name,
TF_Status* status,
const char* description,
const char* label1,
const char* label2) {
auto* result =
new TFE_MonitoringIntGauge2({name, description, label1, label2});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteIntGauge2(TFE_MonitoringIntGauge2* gauge) {
delete gauge;
}
TFE_MonitoringIntGaugeCell* TFE_MonitoringGetCellIntGauge2(
TFE_MonitoringIntGauge2* gauge, const char* label1, const char* label2) {
return static_cast<TFE_MonitoringIntGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell(label1, label2)));
}
void TFE_MonitoringStringGaugeCellSet(TFE_MonitoringStringGaugeCell* cell,
const char* value) {
cell->cell.Set({value});
}
const void TFE_MonitoringStringGaugeCellValue(
TFE_MonitoringStringGaugeCell* cell, TF_Buffer* buf) {
tensorflow::string value = cell->cell.value();
void* data = tensorflow::port::Malloc(value.length());
value.copy(static_cast<char*>(data), value.length(), 0);
buf->data = data;
buf->length = value.length();
buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
}
TFE_MonitoringStringGauge0* TFE_MonitoringNewStringGauge0(
const char* name, TF_Status* status, const char* description) {
auto* result = new TFE_MonitoringStringGauge0({name, description});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteStringGauge0(TFE_MonitoringStringGauge0* gauge) {
delete gauge;
}
TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge0(
TFE_MonitoringStringGauge0* gauge) {
return static_cast<TFE_MonitoringStringGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell()));
}
TFE_MonitoringStringGauge1* TFE_MonitoringNewStringGauge1(
const char* name, TF_Status* status, const char* description,
const char* label1) {
auto* result = new TFE_MonitoringStringGauge1({name, description, label1});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteStringGauge1(TFE_MonitoringStringGauge1* gauge) {
delete gauge;
}
TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge1(
TFE_MonitoringStringGauge1* gauge, const char* label1) {
return static_cast<TFE_MonitoringStringGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell(label1)));
}
TFE_MonitoringStringGauge2* TFE_MonitoringNewStringGauge2(
const char* name, TF_Status* status, const char* description,
const char* label1, const char* label2) {
auto* result =
new TFE_MonitoringStringGauge2({name, description, label1, label2});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteStringGauge2(TFE_MonitoringStringGauge2* gauge) {
delete gauge;
}
TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge2(
TFE_MonitoringStringGauge2* gauge, const char* label1, const char* label2) {
return static_cast<TFE_MonitoringStringGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell(label1, label2)));
}
TFE_MonitoringStringGauge3* TFE_MonitoringNewStringGauge3(
const char* name, TF_Status* status, const char* description,
const char* label1, const char* label2, const char* label3) {
auto* result = new TFE_MonitoringStringGauge3(
{name, description, label1, label2, label3});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteStringGauge3(TFE_MonitoringStringGauge3* gauge) {
delete gauge;
}
TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge3(
TFE_MonitoringStringGauge3* gauge, const char* label1, const char* label2,
const char* label3) {
return static_cast<TFE_MonitoringStringGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell(label1, label2, label3)));
}
TFE_MonitoringStringGauge4* TFE_MonitoringNewStringGauge4(
const char* name, TF_Status* status, const char* description,
const char* label1, const char* label2, const char* label3,
const char* label4) {
auto* result = new TFE_MonitoringStringGauge4(
{name, description, label1, label2, label3, label4});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteStringGauge4(TFE_MonitoringStringGauge4* gauge) {
delete gauge;
}
TFE_MonitoringStringGaugeCell* TFE_MonitoringGetCellStringGauge4(
TFE_MonitoringStringGauge4* gauge, const char* label1, const char* label2,
const char* label3, const char* label4) {
return static_cast<TFE_MonitoringStringGaugeCell*>(static_cast<void*>(
gauge->gauge->GetCell(label1, label2, label3, label4)));
}
void TFE_MonitoringBoolGaugeCellSet(TFE_MonitoringBoolGaugeCell* cell,
bool value) {
cell->cell.Set(value);
}
bool TFE_MonitoringBoolGaugeCellValue(TFE_MonitoringBoolGaugeCell* cell) {
return cell->cell.value();
}
TFE_MonitoringBoolGauge0* TFE_MonitoringNewBoolGauge0(const char* name,
TF_Status* status,
const char* description) {
auto* result = new TFE_MonitoringBoolGauge0({name, description});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteBoolGauge0(TFE_MonitoringBoolGauge0* gauge) {
delete gauge;
}
TFE_MonitoringBoolGaugeCell* TFE_MonitoringGetCellBoolGauge0(
TFE_MonitoringBoolGauge0* gauge) {
return static_cast<TFE_MonitoringBoolGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell()));
}
TFE_MonitoringBoolGauge1* TFE_MonitoringNewBoolGauge1(const char* name,
TF_Status* status,
const char* description,
const char* label1) {
auto* result = new TFE_MonitoringBoolGauge1({name, description, label1});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteBoolGauge1(TFE_MonitoringBoolGauge1* gauge) {
delete gauge;
}
TFE_MonitoringBoolGaugeCell* TFE_MonitoringGetCellBoolGauge1(
TFE_MonitoringBoolGauge1* gauge, const char* label1) {
return static_cast<TFE_MonitoringBoolGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell(label1)));
}
TFE_MonitoringBoolGauge2* TFE_MonitoringNewBoolGauge2(const char* name,
TF_Status* status,
const char* description,
const char* label1,
const char* label2) {
auto* result =
new TFE_MonitoringBoolGauge2({name, description, label1, label2});
tsl::Set_TF_Status_from_Status(status, result->gauge->GetStatus());
if (!result->gauge->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteBoolGauge2(TFE_MonitoringBoolGauge2* gauge) {
delete gauge;
}
TFE_MonitoringBoolGaugeCell* TFE_MonitoringGetCellBoolGauge2(
TFE_MonitoringBoolGauge2* gauge, const char* label1, const char* label2) {
return static_cast<TFE_MonitoringBoolGaugeCell*>(
static_cast<void*>(gauge->gauge->GetCell(label1, label2)));
}
void TFE_MonitoringSamplerCellAdd(TFE_MonitoringSamplerCell* cell,
double value) {
cell->cell.Add(value);
}
void TFE_MonitoringSamplerCellValue(TFE_MonitoringSamplerCell* cell,
TF_Buffer* buf) {
string content;
cell->cell.value().SerializeToString(&content);
void* data = tensorflow::port::Malloc(content.length());
content.copy(static_cast<char*>(data), content.length(), 0);
buf->data = data;
buf->length = content.length();
buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
}
TFE_MonitoringBuckets* TFE_MonitoringNewExponentialBuckets(double scale,
double growth_factor,
int bucket_count) {
return new TFE_MonitoringBuckets([scale, growth_factor, bucket_count]() {
return tensorflow::monitoring::Buckets::Exponential(scale, growth_factor,
bucket_count);
});
}
void TFE_MonitoringDeleteBuckets(TFE_MonitoringBuckets* buckets) {
delete buckets;
}
TFE_MonitoringSampler0* TFE_MonitoringNewSampler0(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* status,
const char* description) {
auto* result = new TFE_MonitoringSampler0(
{name, buckets->create_buckets(), description});
tsl::Set_TF_Status_from_Status(status, result->sampler->GetStatus());
if (!result->sampler->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteSampler0(TFE_MonitoringSampler0* sampler) {
delete sampler;
}
TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler0(
TFE_MonitoringSampler0* sampler) {
return static_cast<TFE_MonitoringSamplerCell*>(
static_cast<void*>(sampler->sampler->GetCell()));
}
TFE_MonitoringSampler1* TFE_MonitoringNewSampler1(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* status,
const char* description, const char* label1) {
auto* result = new TFE_MonitoringSampler1(
{name, buckets->create_buckets(), description, label1});
tsl::Set_TF_Status_from_Status(status, result->sampler->GetStatus());
if (!result->sampler->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteSampler1(TFE_MonitoringSampler1* sampler) {
delete sampler;
}
TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler1(
TFE_MonitoringSampler1* sampler, const char* label1) {
return static_cast<TFE_MonitoringSamplerCell*>(
static_cast<void*>(sampler->sampler->GetCell(label1)));
}
TFE_MonitoringSampler2* TFE_MonitoringNewSampler2(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* status,
const char* description, const char* label1, const char* label2) {
auto* result = new TFE_MonitoringSampler2(
{name, buckets->create_buckets(), description, label1, label2});
tsl::Set_TF_Status_from_Status(status, result->sampler->GetStatus());
if (!result->sampler->GetStatus().ok()) {
delete result;
return nullptr;
}
return result;
}
void TFE_MonitoringDeleteSampler2(TFE_MonitoringSampler2* sampler) {
delete sampler;
}
TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler2(
TFE_MonitoringSampler2* sampler, const char* label1, const char* label2) {
return static_cast<TFE_MonitoringSamplerCell*>(
static_cast<void*>(sampler->sampler->GetCell(label1, label2)));
}
void TFE_ContextOptionsSetTfrt(TFE_ContextOptions* options, bool use_tfrt) {
options->use_tfrt = use_tfrt;
}
TFE_CancellationManager* TFE_NewCancellationManager() {
return tensorflow::wrap(new tensorflow::CancellationManager);
}
void TFE_CancellationManagerStartCancel(
TFE_CancellationManager* cancellation_manager) {
tensorflow::unwrap(cancellation_manager)->StartCancel();
}
bool TFE_CancellationManagerIsCancelled(
TFE_CancellationManager* cancellation_manager) {
return tensorflow::unwrap(cancellation_manager)->IsCancelled();
}
bool TFE_CancellationManagerIsCancelling(
TFE_CancellationManager* cancellation_manager) {
return tensorflow::unwrap(cancellation_manager)->IsCancelling();
}
TFE_CancellationToken TFE_CancellationManagerGetToken(
TFE_CancellationManager* cancellation_manager) {
return tensorflow::unwrap(cancellation_manager)->get_cancellation_token();
}
bool TFE_CancellationManagerRegisterCallback(
TFE_CancellationManager* cancellation_manager, TFE_CancellationToken token,
const TFE_CancelCallback* c_callback, const char* callback_name) {
tensorflow::CancelCallback callback = [callback = c_callback->callback,
context = c_callback->context]() {
callback(context);
};
return tensorflow::unwrap(cancellation_manager)
->RegisterCallbackWithErrorLogging(token, callback, callback_name);
}
bool TFE_CancellationManagerDeregisterCallback(
TFE_CancellationManager* cancellation_manager,
TFE_CancellationToken token) {
return tensorflow::unwrap(cancellation_manager)->DeregisterCallback(token);
}
bool TFE_CancellationManagerTryDeregisterCallback(
TFE_CancellationManager* cancellation_manager,
TFE_CancellationToken token) {
return tensorflow::unwrap(cancellation_manager)->TryDeregisterCallback(token);
}
void TFE_DeleteCancellationManager(
TFE_CancellationManager* cancellation_manager) {
delete tensorflow::unwrap(cancellation_manager);
}
void TFE_OpSetCancellationManager(TFE_Op* op,
TFE_CancellationManager* cancellation_manager,
TF_Status* status) {
tensorflow::unwrap(op)->SetCancellationManager(
tensorflow::unwrap(cancellation_manager));
status->status = absl::OkStatus();
}
TFE_Executor* TFE_NewExecutor(bool is_async, bool enable_streaming_enqueue,
int in_flight_nodes_limit) {
return new TFE_Executor(is_async, enable_streaming_enqueue,
in_flight_nodes_limit);
}
void TFE_DeleteExecutor(TFE_Executor* executor) { delete executor; }
bool TFE_ExecutorIsAsync(TFE_Executor* executor) {
return executor->executor()->Async();
}
void TFE_ExecutorWaitForAllPendingNodes(TFE_Executor* executor,
TF_Status* status) {
status->status = executor->executor()->WaitForAllPendingNodes();
}
void TFE_ExecutorClearError(TFE_Executor* executor) {
executor->executor()->ClearError();
}
void TFE_ContextSetExecutorForThread(TFE_Context* ctx, TFE_Executor* executor) {
tensorflow::unwrap(ctx)->SetExecutorForThread(executor->executor());
}
TFE_Executor* TFE_ContextGetExecutorForThread(TFE_Context* ctx) {
return new TFE_Executor(&tensorflow::unwrap(ctx)->Executor());
}
void TFE_HostAddressSpace(TFE_Context* ctx, TF_Buffer* buf) {
auto address_space = tensorflow::DeviceNameUtils::AddressSpace(
tensorflow::unwrap(ctx)->HostCPUParsedName());
auto str = tensorflow::DeviceNameUtils::ParsedNameToString(address_space);
void* data = tensorflow::port::Malloc(str.length());
str.copy(static_cast<char*>(data), str.length(), 0);
buf->data = data;
buf->length = str.length();
buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
}
void TFE_ContextGetFunctionDef(TFE_Context* ctx, const char* function_name,
TF_Buffer* buf, TF_Status* status) {
auto* function_def = tensorflow::unwrap(ctx)->FindFunctionDef(function_name);
if (function_def == nullptr) {
status->status = tensorflow::errors::NotFound(
"Unable to find FunctionDef with name: ", function_name);
return;
}
string str = function_def->SerializeAsString();
void* data = tensorflow::port::Malloc(str.length());
str.copy(static_cast<char*>(data), str.length(), 0);
buf->data = data;
buf->length = str.length();
buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
status->status = absl::OkStatus();
}
void TFE_ContextGetGraphDebugInfo(TFE_Context* ctx, const char* function_name,
TF_Buffer* buf, TF_Status* status) {
auto function_record = tensorflow::unwrap(ctx)->FindRecord(function_name);
if (function_record == nullptr) {
status->status = tensorflow::errors::NotFound(
"Unable to find function with name: ", function_name);
return;
}
tensorflow::GraphDebugInfo debug_info =
tensorflow::StackTracesMapToGraphDebugInfo(
function_record->stack_traces());
string str = debug_info.SerializeAsString();
void* data = tensorflow::port::Malloc(str.length());
str.copy(static_cast<char*>(data), str.length(), 0);
buf->data = data;
buf->length = str.length();
buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
status->status = absl::OkStatus();
}
TF_Tensor* TFE_AllocateHostTensor(TFE_Context* ctx, TF_DataType dtype,
const int64_t* dims, int num_dims,
TF_Status* status) {
std::vector<int64_t> dimvec(num_dims);
for (int i = 0; i < num_dims; ++i) {
dimvec[i] = static_cast<int64_t>(dims[i]);
}
if (ctx == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid Context");
return nullptr;
}
tensorflow::AbstractTensorInterface* t =
tensorflow::unwrap(ctx)->CreateTensor(
static_cast<tensorflow::DataType>(dtype), dimvec);
if (t == nullptr) {
status->status =
tensorflow::errors::InvalidArgument("Unsupported dtype: ", dtype);
return nullptr;
}
return new TF_Tensor{t};
}
TFE_TensorHandle* TFE_NewTensorHandleFromTensor(TFE_Context* ctx, TF_Tensor* t,
TF_Status* status) {
return tensorflow::wrap(
tensorflow::unwrap(ctx)->CreateLocalHandle(t->tensor));
}
TFE_TensorHandle* TFE_CreatePackedTensorHandle(TFE_Context* ctx,
TFE_TensorHandle** handles,
int* num_handles,
TF_Status* status) {
std::vector<tensorflow::TensorHandle*> tensor_handles;
tensor_handles.reserve(*num_handles);
for (int i = 0; i < *num_handles; ++i) {
tensorflow::ImmediateExecutionTensorHandle* unwrapped_handle =
tensorflow::unwrap(handles[i]);
if (tensorflow::CustomDeviceTensorHandle::classof(unwrapped_handle)) {
auto* custom_device_handle =
tensorflow::down_cast<tensorflow::CustomDeviceTensorHandle*>(
unwrapped_handle);
tensorflow::ImmediateExecutionTensorHandle* result;
status->status = custom_device_handle->device()->Pack(
absl::Span<tensorflow::ImmediateExecutionTensorHandle*>(
tensorflow::unwrap(handles), *num_handles),
&result);
return tensorflow::wrap(result);
}
tensor_handles.push_back(
tensorflow::TensorHandleFromInterface(unwrapped_handle));
}
tensorflow::EagerContext* context =
tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
tensorflow::TensorHandle* handle = nullptr;
status->status = tensorflow::TensorHandle::CreatePackedHandle(
std::move(tensor_handles), context, &handle);
return tensorflow::wrap(handle);
}
void TFE_ContextSetSoftDevicePlacement(TFE_Context* ctx, unsigned char enable,
TF_Status* status) {
tensorflow::unwrap(ctx)->SetAllowSoftPlacement(enable);
}
void TFE_ContextSetLogDevicePlacement(TFE_Context* ctx, unsigned char enable,
TF_Status* status) {
tensorflow::unwrap(ctx)->SetLogDevicePlacement(enable);
}
void TFE_ContextSetRunEagerOpAsFunction(TFE_Context* ctx, unsigned char enable,
TF_Status* status) {
tensorflow::unwrap(ctx)->SetRunEagerOpAsFunction(enable);
}
void TFE_ContextSetJitCompileRewrite(TFE_Context* ctx, unsigned char enable,
TF_Status* status) {
tensorflow::unwrap(ctx)->SetJitCompileRewrite(enable);
}
const char* TFE_TensorHandleDeviceType(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
return tensorflow::unwrap(h)->DeviceType(&status->status);
}
int TFE_TensorHandleDeviceID(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return -1;
}
return tensorflow::unwrap(h)->DeviceId(&status->status);
}
TF_CAPI_EXPORT extern void TFE_TensorHandleGetStatus(TFE_TensorHandle* h,
TF_Status* status) {
status->status = tensorflow::unwrap(h)->TensorHandleStatus();
}
void TFE_GetExecutedOpNames(TFE_Context* ctx, TF_Buffer* buf,
TF_Status* status) {
const std::vector<std::string>& op_names =
tensorflow::unwrap(ctx)->GetLoggedOpsTestonly();
std::ostringstream op_names_oss;
for (const auto& op : op_names) {
op_names_oss << op << ", ";
}
const std::string& op_names_str = op_names_oss.str();
void* data = tensorflow::port::Malloc(op_names_str.length());
op_names_str.copy(static_cast<char*>(data), op_names_str.length(), 0);
buf->data = data;
buf->length = op_names_str.length();
buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
status->status = absl::OkStatus();
}
void TFE_SetLogicalCpuDevices(TFE_Context* ctx, int num_cpus,
const char* prefix, TF_Status* status) {
std::vector<std::unique_ptr<tensorflow::Device>> devices;
if (prefix == nullptr || strlen(prefix) == 0)
prefix = "/job:localhost/replica:0/task:0";
tensorflow::SessionOptions sess_options;
(*sess_options.config.mutable_device_count())["CPU"] = num_cpus;
status->status =
tensorflow::DeviceFactory::AddCpuDevices(sess_options, prefix, &devices);
for (auto d = devices.begin(); d != devices.end();) {
if (absl::StrContains(d->get()->name(), "CPU:0")) {
d = devices.erase(d);
} else {
++d;
}
}
status->status = tensorflow::unwrap(ctx)->AddDevices(std::move(devices));
}
void TFE_InsertConfigKeyValue(TFE_Context* ctx, const char* key,
const char* value, TF_Status* status) {
tensorflow::ImmediateExecutionDistributedManager* dist_mgr =
tensorflow::unwrap(ctx)->GetDistributedManager();
tsl::CoordinationServiceAgent* coord_agent =
dist_mgr->GetCoordinationServiceAgent();
if (coord_agent == nullptr) {
status->status = tensorflow::errors::FailedPrecondition(
"Coordination service agent is not enabled.");
return;
}
status->status = coord_agent->InsertKeyValue(key, value);
}
void TFE_GetConfigKeyValue(TFE_Context* ctx, const char* key,
int64_t timeout_in_ms, TF_Buffer* value_buf,
TF_Status* status) {
tensorflow::ImmediateExecutionDistributedManager* dist_mgr =
tensorflow::unwrap(ctx)->GetDistributedManager();
tsl::CoordinationServiceAgent* coord_agent =
dist_mgr->GetCoordinationServiceAgent();
if (coord_agent == nullptr) {
status->status = tensorflow::errors::FailedPrecondition(
"Coordination service is not enabled.");
return;
}
absl::Duration timeout;
if (timeout_in_ms > 0) {
timeout = absl::Milliseconds(timeout_in_ms);
} else {
timeout = absl::InfiniteDuration();
}
auto status_or_value = coord_agent->GetKeyValue(key, timeout);
status->status = status_or_value.status();
if (!status_or_value.ok()) return;
const std::string& value_string = status_or_value.value();
void* data = tensorflow::port::Malloc(value_string.length());
value_string.copy(static_cast<char*>(data), value_string.length(), 0);
value_buf->data = data;
value_buf->length = value_string.length();
value_buf->data_deallocator = [](void* data, size_t length) {
tensorflow::port::Free(data);
};
}
void TFE_DeleteConfigKeyValue(TFE_Context* ctx, const char* key,
TF_Status* status) {
tensorflow::ImmediateExecutionDistributedManager* dist_mgr =
tensorflow::unwrap(ctx)->GetDistributedManager();
tsl::CoordinationServiceAgent* coord_agent =
dist_mgr->GetCoordinationServiceAgent();
if (coord_agent == nullptr) {
status->status = tensorflow::errors::FailedPrecondition(
"Coordination service is not enabled.");
return;
}
status->status = coord_agent->DeleteKeyValue(key);
}
void TFE_ReportErrorToCluster(TFE_Context* ctx, int error_code,
const char* error_message, TF_Status* status) {
tensorflow::ImmediateExecutionDistributedManager* dist_mgr =
tensorflow::unwrap(ctx)->GetDistributedManager();
tsl::CoordinationServiceAgent* coord_agent =
dist_mgr->GetCoordinationServiceAgent();
if (coord_agent == nullptr) {
status->status = tensorflow::errors::FailedPrecondition(
"Coordination service is not enabled.");
return;
}
tensorflow::Status s(static_cast<absl::StatusCode>(error_code),
error_message);
status->status = coord_agent->ReportError(s);
}
void TFE_GetTaskStates(TFE_Context* ctx, const TF_Buffer& tasks, void* states,
TF_Status* status) {
tensorflow::ImmediateExecutionDistributedManager* dist_mgr =
tensorflow::unwrap(ctx)->GetDistributedManager();
tsl::CoordinationServiceAgent* coord_agent =
dist_mgr->GetCoordinationServiceAgent();
if (coord_agent == nullptr) {
status->status = tensorflow::errors::FailedPrecondition(
"Coordination service is not enabled.");
return;
}
std::vector<tensorflow::CoordinatedTask> task_vec(tasks.length);
auto* task_iter = static_cast<const tensorflow::CoordinatedTask*>(tasks.data);
for (size_t i = 0; i < tasks.length; ++i) {
task_vec[i].set_job_name(task_iter->job_name());
task_vec[i].set_task_id(task_iter->task_id());
++task_iter;
}
auto results = coord_agent->GetTaskState(task_vec);
if (!results.ok()) {
status->status = results.status();
return;
}
auto* state_iter = static_cast<TF_Status*>(states);
for (size_t i = 0; i < tasks.length; ++i) {
const auto& result = (*results)[i];
TF_Status s;
TF_SetStatus(&s, static_cast<TF_Code>(result.error_code()),
std::string(result.error_message()).data());
if (TF_GetCode(&s) != TF_Code::TF_OK) {
tensorflow::CoordinationServiceError error;
*error.mutable_source_task() = result.error_payload().source_task();
TF_SetPayload(&s, tensorflow::CoordinationErrorPayloadKey().data(),
error.SerializeAsString().c_str());
}
*state_iter = std::move(s);
++state_iter;
}
status->status = absl::OkStatus();
}
void TFE_WaitAtBarrier(TFE_Context* ctx, const char* barrier_id,
int64_t barrier_timeout_in_ms, TF_Status* status) {
tensorflow::ImmediateExecutionDistributedManager* dist_mgr =
tensorflow::unwrap(ctx)->GetDistributedManager();
tsl::CoordinationServiceAgent* coord_agent =
dist_mgr->GetCoordinationServiceAgent();
if (coord_agent == nullptr) {
status->status = tensorflow::errors::FailedPrecondition(
"Coordination service is not enabled.");
return;
}
status->status = coord_agent->WaitAtBarrier(
barrier_id, absl::Milliseconds(barrier_timeout_in_ms), {});
}
void TFE_InitializeLocalOnlyContext(TFE_Context* ctx, int keep_alive_secs,
const void* proto, size_t proto_len,
TF_Status* status) {
tensorflow::ServerDef server_def;
if (!server_def.ParseFromArray(proto, proto_len)) {
status->status = tensorflow::errors::InvalidArgument(
"Invalid tensorflow.ServerDef protocol buffer");
return;
}
status->status =
tensorflow::unwrap(ctx)
->GetDistributedManager()
->InitializeLocalOnlyContext(server_def, keep_alive_secs);
} | #include "tensorflow/c/eager/c_api_experimental.h"
#include <string.h>
#include <string>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h"
#include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/lib/monitoring/collection_registry.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
using tensorflow::string;
namespace tensorflow {
namespace {
static bool HasSubstr(absl::string_view base, absl::string_view substr) {
bool ok = absl::StrContains(base, substr);
EXPECT_TRUE(ok) << base << ", expected substring " << substr;
return ok;
}
TEST(CAPI, MonitoringCounter0) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0("test/counter", status, "description");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, 1);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 1);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/counter",
metrics->point_set_map.at("test/counter")->metric_name);
EXPECT_EQ(
1, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
TFE_MonitoringCounterCellIncrementBy(cell, 5);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 6);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(
6, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
TFE_MonitoringDeleteCounter0(counter);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(metrics->point_set_map.end(),
metrics->point_set_map.find("test/counter"));
}
TEST(CAPI, MonitoringCounterMultiple) {
TF_Status* status = TF_NewStatus();
auto* counter1 = TFE_MonitoringNewCounter1("test/counter1", status,
"description", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellCounter1(counter1, "test");
TFE_MonitoringCounterCellIncrementBy(cell1, 1);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell1), 1);
auto* counter2 = TFE_MonitoringNewCounter2("test/counter2", status,
"description", "label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
auto* cell2 = TFE_MonitoringGetCellCounter2(counter2, "foo", "bar");
TFE_MonitoringCounterCellIncrementBy(cell2, 2);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell2), 2);
TFE_MonitoringDeleteCounter1(counter1);
TFE_MonitoringDeleteCounter2(counter2);
}
TEST(CAPI, MonitoringGauge0) {
TF_Status* status = TF_NewStatus();
auto* gauge = TFE_MonitoringNewIntGauge0("test/gauge", status, "test");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell = TFE_MonitoringGetCellIntGauge0(gauge);
TFE_MonitoringIntGaugeCellSet(cell, 1);
EXPECT_EQ(TFE_MonitoringIntGaugeCellValue(cell), 1);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/gauge", metrics->point_set_map.at("test/gauge")->metric_name);
EXPECT_EQ(1,
metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
TFE_MonitoringIntGaugeCellSet(cell, 5);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(5,
metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
TFE_MonitoringDeleteIntGauge0(gauge);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringMultipleGauge) {
TF_Status* status = TF_NewStatus();
auto* gauge1 =
TFE_MonitoringNewBoolGauge1("test/gauge1", status, "test", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellBoolGauge1(gauge1, "foo");
TFE_MonitoringBoolGaugeCellSet(cell1, true);
EXPECT_TRUE(TFE_MonitoringBoolGaugeCellValue(cell1));
TFE_MonitoringDeleteBoolGauge1(gauge1);
auto* gauge2 = TFE_MonitoringNewStringGauge2("test/gauge2", status, "test",
"label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell2 = TFE_MonitoringGetCellStringGauge2(gauge2, "foo", "bar");
TFE_MonitoringStringGaugeCellSet(cell2, "str");
auto* buf = new TF_Buffer;
TFE_MonitoringStringGaugeCellValue(cell2, buf);
string data(static_cast<const char*>(buf->data), buf->length);
TF_DeleteBuffer(buf);
EXPECT_EQ(data, "str");
TFE_MonitoringDeleteStringGauge2(gauge2);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringSampler0) {
TF_Status* status = TF_NewStatus();
auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
auto* sampler =
TFE_MonitoringNewSampler0("test/sampler", buckets, status, "test");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell = TFE_MonitoringGetCellSampler0(sampler);
TFE_MonitoringSamplerCellAdd(cell, 1.0);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/sampler",
metrics->point_set_map.at("test/sampler")->metric_name);
EXPECT_EQ(1.0, metrics->point_set_map.at("test/sampler")
->points.at(0)
->histogram_value.sum());
TFE_MonitoringSamplerCellAdd(cell, 5.0);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(6.0, metrics->point_set_map.at("test/sampler")
->points.at(0)
->histogram_value.sum());
TFE_MonitoringDeleteBuckets(buckets);
TFE_MonitoringDeleteSampler0(sampler);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringMultipleSampler) {
TF_Status* status = TF_NewStatus();
auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
auto* sampler1 = TFE_MonitoringNewSampler1("test/sampler1", buckets, status,
"test", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellSampler1(sampler1, "foo");
TFE_MonitoringSamplerCellAdd(cell1, 1.0);
TFE_MonitoringSamplerCellAdd(cell1, 2.0);
TF_Buffer* result1 = TF_NewBuffer();
TFE_MonitoringSamplerCellValue(cell1, result1);
tensorflow::HistogramProto histogram1;
EXPECT_TRUE(histogram1.ParseFromString(
{reinterpret_cast<const char*>(result1->data), result1->length}));
EXPECT_EQ(histogram1.sum(), 3.0);
TF_DeleteBuffer(result1);
TFE_MonitoringDeleteSampler1(sampler1);
auto* sampler2 = TFE_MonitoringNewSampler2("test/sampler2", buckets, status,
"test", "label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell2 = TFE_MonitoringGetCellSampler2(sampler2, "foo", "bar");
TFE_MonitoringSamplerCellAdd(cell2, 2.0);
TFE_MonitoringSamplerCellAdd(cell2, 3.0);
TF_Buffer* result2 = TF_NewBuffer();
TFE_MonitoringSamplerCellValue(cell2, result2);
tensorflow::HistogramProto histogram2;
EXPECT_TRUE(histogram2.ParseFromString(
{reinterpret_cast<const char*>(result2->data), result2->length}));
EXPECT_EQ(histogram2.sum(), 5.0);
TF_DeleteBuffer(result2);
TFE_MonitoringDeleteSampler2(sampler2);
TFE_MonitoringDeleteBuckets(buckets);
TF_DeleteStatus(status);
}
TEST(CAPI, CancellationManager) {
TFE_CancellationManager* c_mgr = TFE_NewCancellationManager();
EXPECT_FALSE(TFE_CancellationManagerIsCancelled(c_mgr));
TFE_CancelCallback callback1;
callback1.callback = [](void* context) {
ADD_FAILURE() << "Callback1 should be deregistered.";
};
TFE_CancellationToken token1 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token1, &callback1,
"callback1"));
TFE_CancelCallback callback2;
bool callback2_invoked = false;
callback2.context = &callback2_invoked;
callback2.callback = [](void* context) {
*reinterpret_cast<bool*>(context) = true;
};
TFE_CancellationToken token2 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token2, &callback2,
"callback2"));
TFE_CancellationToken token3 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token3, &callback1,
"callback3"));
EXPECT_TRUE(TFE_CancellationManagerDeregisterCallback(c_mgr, token1));
EXPECT_TRUE(TFE_CancellationManagerTryDeregisterCallback(c_mgr, token3));
TFE_CancellationManagerStartCancel(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerIsCancelled(c_mgr));
EXPECT_TRUE(callback2_invoked);
TFE_DeleteCancellationManager(c_mgr);
}
TEST(CAPI, ExecutorContextDestructionOrder) {
TF_Status* status = TF_NewStatus();
{
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* executor = TFE_NewExecutor(
false, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_DeleteContext(ctx);
TFE_DeleteExecutor(executor);
}
{
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* executor = TFE_NewExecutor(
false, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TF_DeleteStatus(status);
}
TEST(CAPI, Function_ident_CPU) {
TF_Graph* function_graph = TF_NewGraph();
TF_OperationDescription* arg_descr =
TF_NewOperation(function_graph, "Placeholder", "arg");
TF_SetAttrType(arg_descr, "dtype", TF_INT32);
TF_Status* status = TF_NewStatus();
TF_Operation* arg = TF_FinishOperation(arg_descr, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_OperationDescription* id_descr =
TF_NewOperation(function_graph, "Identity", "id");
TF_SetAttrType(id_descr, "T", TF_INT32);
TF_AddInput(id_descr, {arg, 0});
TF_Operation* id = TF_FinishOperation(id_descr, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_Output input{arg, 0};
TF_Output output{id, 0};
TF_Function* fn =
TF_GraphToFunction(function_graph, "ident", 0, 1, &id, 1, &input, 1,
&output, nullptr, nullptr, "test", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteGraph(function_graph);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_ContextAddFunction(ctx, fn, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteFunction(fn);
for (bool async : {false, true, false}) {
TFE_Executor* old_executor = TFE_ContextGetExecutorForThread(ctx);
TFE_Executor* executor = TFE_NewExecutor(
async, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* t =
TF_AllocateTensor(TF_INT32, nullptr, 0, 1 * sizeof(tensorflow::int32));
*reinterpret_cast<tensorflow::int32*>(TF_TensorData(t)) = 42;
TFE_TensorHandle* h = TFE_NewTensorHandle(t, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteTensor(t);
TFE_Op* op = TFE_NewOp(ctx, "ident", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_OpAddInput(op, h, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
std::vector<TFE_TensorHandle*> result;
result.push_back(nullptr);
int num_retvals = 1;
TFE_Execute(op, result.data(), &num_retvals, status);
TFE_DeleteOp(op);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
ASSERT_EQ(num_retvals, 1);
TF_Tensor* r = TFE_TensorHandleResolve(result[0], status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
EXPECT_EQ(*reinterpret_cast<tensorflow::int32*>(TF_TensorData(r)), 42);
TFE_ContextSetExecutorForThread(ctx, old_executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
TFE_DeleteExecutor(old_executor);
TFE_DeleteTensorHandle(h);
TF_DeleteTensor(r);
TFE_DeleteTensorHandle(result[0]);
}
TFE_ContextRemoveFunction(ctx, "ident", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContext(ctx);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteStatus(status);
}
void Executor_MatMul_CPU(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* old_executor = TFE_ContextGetExecutorForThread(ctx);
TFE_Executor* executor = TFE_NewExecutor(
async, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_TensorHandle* retvals[2] = {nullptr, nullptr};
int num_retvals = 2;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
EXPECT_EQ(1, num_retvals);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(retvals[0]);
TFE_ContextSetExecutorForThread(ctx, old_executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
TFE_DeleteExecutor(old_executor);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(7, product[0]);
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
TF_DeleteStatus(status);
}
TEST(CAPI, Executor_MatMul_CPU) { Executor_MatMul_CPU(false); }
TEST(CAPI, Executor_MatMul_CPUAsync) { Executor_MatMul_CPU(true); }
void Deleter(void* data, size_t unused, void* tensor_handle) {
TFE_DeleteTensorHandle(static_cast<TFE_TensorHandle*>(tensor_handle));
}
TEST(CAPI, TensorHandleOnDeviceMemory) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TF_Tensor* m_data = TFE_TensorHandleResolve(m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float* m_float = static_cast<float*>(TF_TensorData(m_data));
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int num_devices = TF_DeviceListCount(devices);
for (int d = 0; d < num_devices; ++d) {
const char* name = TF_DeviceListName(devices, d, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* copy = TFE_TensorHandleCopyToDevice(m, ctx, name, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
void* data = TFE_TensorHandleDevicePointer(copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
size_t size = TFE_TensorHandleDeviceMemorySize(copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int64_t dims[] = {2, 2};
TFE_TensorHandle* copy_aliased = TFE_NewTensorHandleFromDeviceMemory(
ctx, name, TF_FLOAT, dims, 2, data, size, &Deleter, copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* on_host =
TFE_TensorHandleCopyToDevice(copy_aliased, ctx, "CPU:0", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* resolved = TFE_TensorHandleResolve(on_host, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const float* resolved_data =
static_cast<const float*>(TF_TensorData(resolved));
EXPECT_EQ(0, memcmp(m_float, resolved_data, 4 * sizeof(float)));
TF_DeleteTensor(resolved);
TFE_DeleteTensorHandle(copy_aliased);
TFE_DeleteTensorHandle(on_host);
}
TF_DeleteDeviceList(devices);
TF_DeleteTensor(m_data);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, TensorHandleNullptr) {
TFE_TensorHandle* h = nullptr;
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
const char* device_type = TFE_TensorHandleDeviceType(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_type, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
int device_id = TFE_TensorHandleDeviceID(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_id, -1);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
}
TEST(CAPI, TensorHandleDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
const char* device_type = TFE_TensorHandleDeviceType(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = TFE_TensorHandleDeviceID(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
hcpu, ctx, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_Op* shape_op = ShapeOp(ctx, hgpu);
TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
device_type = TFE_TensorHandleDeviceType(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "GPU")) << device_type;
device_id = TFE_TensorHandleDeviceID(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
TFE_DeleteOp(shape_op);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(hgpu);
}
TFE_DeleteTensorHandle(hcpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleDefaults) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* h_default = TestMatrixTensorHandle(ctx);
const char* device_type = TFE_TensorHandleDeviceType(h_default, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = TFE_TensorHandleDeviceID(h_default, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
TFE_TensorHandle* h_cpu = TFE_TensorHandleCopyToDevice(
h_default, ctx, "/device:CPU:0", status.get());
const char* device_type_cpu = TFE_TensorHandleDeviceType(h_cpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type_cpu, "CPU")) << device_type_cpu;
int device_id_cpu = TFE_TensorHandleDeviceID(h_cpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id_cpu) << device_id_cpu;
TFE_DeleteTensorHandle(h_default);
TFE_DeleteTensorHandle(h_cpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, CreateLocalContextAsReset) {
tensorflow::ServerDef server_def = GetServerDef("worker", 2);
server_def.mutable_default_session_config()->set_isolate_session_state(false);
ServerFactory* factory;
ASSERT_TRUE(ServerFactory::GetFactory(server_def, &factory).ok());
server_def.set_job_name("worker");
server_def.set_task_index(0);
std::unique_ptr<tensorflow::ServerInterface> w0;
ASSERT_TRUE(
factory->NewServer(server_def, ServerFactory::Options(), &w0).ok());
ASSERT_TRUE(w0->Start().ok());
server_def.set_task_index(1);
std::unique_ptr<tensorflow::ServerInterface> w1;
ASSERT_TRUE(
factory->NewServer(server_def, ServerFactory::Options(), &w1).ok());
ASSERT_TRUE(w1->Start().ok());
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
opts->session_options.options.config.set_isolate_session_state(false);
TFE_ContextOptionsSetDevicePlacementPolicy(opts, TFE_DEVICE_PLACEMENT_SILENT);
TFE_Context* ctx = TFE_NewContext(opts, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
server_def.set_task_index(0);
auto cluster = server_def.mutable_cluster();
auto client_job = cluster->add_job();
client_job->set_name("localhost");
int client_port = tensorflow::testing::PickUnusedPortOrDie();
client_job->mutable_tasks()->insert(
{0, strings::StrCat("localhost:", client_port)});
server_def.set_job_name("localhost");
auto serialized = server_def.SerializeAsString();
TFE_ContextSetServerDef(ctx, 0, serialized.data(), serialized.size(), status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
server_def.set_job_name("worker");
server_def.set_task_index(0);
tensorflow::ClusterDef* cluster_def = server_def.mutable_cluster();
tensorflow::JobDef* job_def = cluster_def->mutable_job(0);
int worker_port = tensorflow::testing::PickUnusedPortOrDie();
job_def->mutable_tasks()->at(0) =
tensorflow::strings::StrCat("localhost:", worker_port);
serialized = server_def.SerializeAsString();
TFE_InitializeLocalOnlyContext(ctx, 0, serialized.data(), serialized.size(),
status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
w0.release();
w1.release();
}
TEST(CAPI, ShareVariableAcrossContextsAfterUpdateContextWorksWithTimeout) {
tensorflow::ServerDef server_def_0 = GetServerDef(3);
server_def_0.mutable_default_session_config()->set_isolate_session_state(
false);
tensorflow::ServerDef server_def_1 =
ReplaceTaskInServerDef(server_def_0, 0);
string serialized_server_def_0 = server_def_0.SerializeAsString();
string serialized_server_def_1 = server_def_1.SerializeAsString();
server_def_0.set_task_index(1);
std::unique_ptr<tensorflow::GrpcServer> worker_server1;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
server_def_0.set_task_index(2);
std::unique_ptr<tensorflow::GrpcServer> worker_server2;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server2)
.ok());
ASSERT_TRUE(worker_server2->Start().ok());
int32_t init_timeout_in_ms = 300000;
TFE_Context* ctx_0 =
CreateContext(serialized_server_def_0,
false, init_timeout_in_ms);
TFE_Context* ctx_1 =
CreateContext(serialized_server_def_1,
false, init_timeout_in_ms);
const char remote_device[] = "/job:localhost/replica:0/task:2/device:CPU:0";
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_0);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_1);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
TFE_TensorHandle* handle_0 =
CreateVariable(ctx_0, 1.2, remote_device, "var");
TF_Status* status = TF_NewStatus();
TFE_ContextAsyncWait(ctx_0, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
int port = tensorflow::testing::PickUnusedPortOrDie();
ReplaceTaskInServerDef(&server_def_0, 1, "localhost", port);
ReplaceTaskInServerDef(&server_def_1, 1, "localhost", port);
server_def_0.set_task_index(1);
worker_server1.release();
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
{
server_def_0.set_task_index(0);
string serialized_update = server_def_0.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDefWithTimeout(ctx_0, 0, serialized_update.data(),
serialized_update.size(),
init_timeout_in_ms, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
server_def_1.set_task_index(0);
string serialized_update = server_def_1.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDefWithTimeout(ctx_1, 0, serialized_update.data(),
serialized_update.size(),
init_timeout_in_ms, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
TFE_TensorHandle* var_handle =
CreateVarHandle(ctx_1, remote_device, "var");
TFE_TensorHandle* handle_1 = nullptr;
int num_retvals = 1;
TF_Status* status = TF_NewStatus();
TFE_Op* op = TFE_NewOp(ctx_1, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &handle_1, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(op);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(handle_1));
EXPECT_EQ(0, TFE_TensorHandleNumDims(handle_1, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(handle_1, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(1.2f, value);
TFE_DeleteTensorHandle(handle_1);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(var_handle);
}
TFE_DeleteTensorHandle(handle_0);
TFE_DeleteContext(ctx_0);
TFE_DeleteContext(ctx_1);
worker_server1.release();
worker_server2.release();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_experimental.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_experimental_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ecf36f2-7ab5-4fb0-8321-4f385b395019 | cpp | tensorflow/tensorflow | backend_async_kernel_interface | tensorflow/lite/async/backend_async_kernel_interface.cc | tensorflow/lite/async/backend_async_kernel_interface_test.cc | #include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <vector>
#include "tensorflow/lite/async/c/async_kernel.h"
#include "tensorflow/lite/async/c/types.h"
namespace tflite {
namespace delegates {
namespace internal {
TfLiteStatus RegisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBuffer(context, io_type, buffer, attrs, handle);
}
TfLiteStatus RegisterBufferSlice(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBufferSlice(context, buffer, attrs, handle);
}
TfLiteStatus UnregisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
const TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->UnregisterBuffer(context, handle);
}
void SupportedBufferTypes(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& buf_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedBufferTypes(io_type);
*types = buf_types.data();
*n_types = buf_types.size();
}
void SupportedSynchronizations(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& sync_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedSynchronizations(io_type);
*types = sync_types.data();
*n_types = sync_types.size();
}
bool ReconcileRestrictions(const TfLiteAsyncKernel* async_kernel,
const TfLiteOpaqueContext* context,
const TfLiteOpaqueNode* node, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged,
TfLiteAttributeMap* conflict) {
return reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->ReconcileRestrictions(context, node, tensor_index,
user_provided_attributes, merged, conflict);
}
TfLiteStatus SetAttributes(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node,
int tensor_index, const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetAttributes(context, node, tensor_index, attrs);
}
TfLiteStatus Prepare(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Prepare(context, node);
}
TfLiteStatus Eval(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Eval(context, node, task);
}
TfLiteStatus Wait(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Wait(context, task);
}
TfLiteStatus Finish(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Finish(context, task);
}
TfLiteStatus SetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetBufferAttributes(buffer, attrs);
}
TfLiteStatus GetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->GetBufferAttributes(buffer, attrs);
}
}
BackendAsyncKernelInterface::BackendAsyncKernelInterface() {
kernel_ = TfLiteAsyncKernelCreate(this);
TfLiteAsyncKernelSetRegisterBuffer(kernel_, internal::RegisterBuffer);
TfLiteAsyncKernelSetRegisterBufferSlice(kernel_,
internal::RegisterBufferSlice);
TfLiteAsyncKernelSetUnregisterBuffer(kernel_, internal::UnregisterBuffer);
TfLiteAsyncKernelSetSupportedBufferTypes(kernel_,
internal::SupportedBufferTypes);
TfLiteAsyncKernelSetSupportedSynchronizations(
kernel_, internal::SupportedSynchronizations);
TfLiteAsyncKernelSetReconcileRestrictions(kernel_,
internal::ReconcileRestrictions);
TfLiteAsyncKernelSetSetAttributes(kernel_, internal::SetAttributes);
TfLiteAsyncKernelSetSetBufferAttributes(kernel_,
internal::SetBufferAttributes);
TfLiteAsyncKernelSetGetBufferAttributes(kernel_,
internal::GetBufferAttributes);
TfLiteAsyncKernelSetPrepare(kernel_, internal::Prepare);
TfLiteAsyncKernelSetEval(kernel_, internal::Eval);
TfLiteAsyncKernelSetWait(kernel_, internal::Wait);
TfLiteAsyncKernelSetFinish(kernel_, internal::Finish);
}
}
} | #include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/async/c/types.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
using ::testing::_;
namespace tflite::delegates {
namespace {
TEST(BackendAsyncKernelInterfaceTest, BasicTest) {
testing::StrictMock<async::testing::MockAsyncKernel> kernel;
EXPECT_CALL(kernel, RegisterBuffer(_, _, _, _, _));
EXPECT_CALL(kernel, RegisterBufferSlice(_, _, _, _));
EXPECT_CALL(kernel, UnregisterBuffer(_, _));
EXPECT_CALL(kernel, ReconcileRestrictions(_, _, _, _, _, _));
EXPECT_CALL(kernel, SetAttributes(_, _, _, _));
EXPECT_CALL(kernel, SetBufferAttributes(_, _));
EXPECT_CALL(kernel, GetBufferAttributes(_, _));
EXPECT_CALL(kernel, Prepare(_, _));
EXPECT_CALL(kernel, Eval(_, _, _));
EXPECT_CALL(kernel, Wait(_, _));
EXPECT_CALL(kernel, Finish(_, _));
auto* tflite_kernel = kernel.kernel();
tflite_kernel->register_buffer(tflite_kernel, nullptr, kTfLiteIoTypeInput,
nullptr, nullptr, 0);
tflite_kernel->register_buffer_slice(tflite_kernel, nullptr, 0, nullptr, 0);
tflite_kernel->unregister_buffer(tflite_kernel, nullptr, 0);
tflite_kernel->reconcile_restrictions(tflite_kernel, nullptr, nullptr, 0,
nullptr, nullptr, nullptr);
tflite_kernel->set_attributes(tflite_kernel, nullptr, nullptr, 0, nullptr);
tflite_kernel->set_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->get_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->prepare(tflite_kernel, nullptr, nullptr);
tflite_kernel->eval(tflite_kernel, nullptr, nullptr, nullptr);
tflite_kernel->wait(tflite_kernel, nullptr, nullptr);
tflite_kernel->finish(tflite_kernel, nullptr, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/async/backend_async_kernel_interface.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/async/backend_async_kernel_interface_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee9ec021-ba6f-4611-b011-93d469b89593 | cpp | tensorflow/tensorflow | tokenize | tensorflow/lite/testing/tokenize.cc | tensorflow/lite/testing/tokenize_test.cc | #include "tensorflow/lite/testing/tokenize.h"
#include <istream>
#include <string>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
void Tokenize(std::istream* input, TokenProcessor* processor) {
enum State { kBuildQuotedToken, kBuildToken, kIdle };
std::string current_token;
State state = kIdle;
auto start_token = [&](char c) {
state = kBuildToken;
current_token.clear();
current_token = c;
};
auto issue_token = [&]() {
state = kIdle;
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto start_quoted_token = [&]() {
state = kBuildQuotedToken;
current_token.clear();
};
auto issue_quoted_token = [&]() {
state = kIdle;
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto issue_delim = [&](char d) {
current_token = string(1, d);
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto is_delim = [](char c) { return c == '{' || c == '}' || c == ':'; };
auto is_quote = [](char c) { return c == '"'; };
for (auto it = std::istreambuf_iterator<char>(*input);
it != std::istreambuf_iterator<char>(); ++it) {
switch (state) {
case kIdle:
if (is_delim(*it)) {
issue_delim(*it);
} else if (is_quote(*it)) {
start_quoted_token();
} else if (!isspace(*it)) {
start_token(*it);
}
break;
case kBuildToken:
if (is_delim(*it)) {
issue_token();
issue_delim(*it);
} else if (is_quote(*it)) {
issue_token();
start_quoted_token();
} else if (isspace(*it)) {
issue_token();
} else {
current_token += *it;
}
break;
case kBuildQuotedToken:
if (is_quote(*it)) {
issue_quoted_token();
} else {
current_token += *it;
}
break;
}
}
if (state != kIdle) {
issue_token();
}
}
}
} | #include "tensorflow/lite/testing/tokenize.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class TokenCollector : public TokenProcessor {
public:
void ConsumeToken(std::string* token) override { tokens_.push_back(*token); }
const std::vector<std::string>& Tokens() { return tokens_; }
private:
std::vector<std::string> tokens_;
};
std::vector<std::string> TokenizeString(const std::string& s) {
std::stringstream ss(s);
TokenCollector collector;
Tokenize(&ss, &collector);
return collector.Tokens();
}
TEST(TokenizeTest, TokenDetection) {
EXPECT_THAT(TokenizeString("x :1"), ElementsAre("x", ":", "1"));
EXPECT_THAT(TokenizeString("x:1"), ElementsAre("x", ":", "1"));
EXPECT_THAT(TokenizeString("x {1"), ElementsAre("x", "{", "1"));
EXPECT_THAT(TokenizeString("x{1"), ElementsAre("x", "{", "1"));
EXPECT_THAT(TokenizeString("x }1"), ElementsAre("x", "}", "1"));
EXPECT_THAT(TokenizeString("x}1"), ElementsAre("x", "}", "1"));
EXPECT_THAT(TokenizeString("x \"1"), ElementsAre("x", "1"));
EXPECT_THAT(TokenizeString("x\"1"), ElementsAre("x", "1"));
}
TEST(TokenizeTest, QuotedTokenDetection) {
EXPECT_THAT(TokenizeString("\"w:x{y}z\"1"), ElementsAre("w:x{y}z", "1"));
EXPECT_THAT(TokenizeString("\"w:x{y}z\"\"1\""), ElementsAre("w:x{y}z", "1"));
}
TEST(TokenizeTest, Delimiters) {
EXPECT_THAT(TokenizeString("}"), ElementsAre("}"));
EXPECT_THAT(TokenizeString("}}"), ElementsAre("}", "}"));
EXPECT_THAT(TokenizeString("{"), ElementsAre("{"));
EXPECT_THAT(TokenizeString("{{"), ElementsAre("{", "{"));
EXPECT_THAT(TokenizeString(":"), ElementsAre(":"));
EXPECT_THAT(TokenizeString("::"), ElementsAre(":", ":"));
}
TEST(TokenizeTest, CornerCases) {
EXPECT_THAT(TokenizeString(" i { b:a } "),
ElementsAre("i", "{", "b", ":", "a", "}"));
EXPECT_THAT(TokenizeString(" }"), ElementsAre("}"));
EXPECT_THAT(TokenizeString(" } "), ElementsAre("}"));
EXPECT_THAT(TokenizeString(" {} "), ElementsAre("{", "}"));
EXPECT_THAT(TokenizeString(" x{} y{} "),
ElementsAre("x", "{", "}", "y", "{", "}"));
EXPECT_THAT(TokenizeString("x:1 y:2 "),
ElementsAre("x", ":", "1", "y", ":", "2"));
EXPECT_THAT(TokenizeString("x:\"1\" y:2 "),
ElementsAre("x", ":", "1", "y", ":", "2"));
EXPECT_THAT(TokenizeString("x:\"1, 2\" y:\"\" "),
ElementsAre("x", ":", "1, 2", "y", ":", ""));
}
TEST(TokenizeTest, NewLines) {
EXPECT_THAT(TokenizeString("x:\n1,\n 2 \n y :\n3 \n"),
ElementsAre("x", ":", "1,", "2", "y", ":", "3"));
}
TEST(TokenizeTest, LongString) {
EXPECT_THAT(
TokenizeString(" i { b:a } input {"
"a: \"1e-1, 2,3\" b:\"1,2,3\"\n c{ "
"id:1 x{d{a:"
"1}}} f:2 "
"\n}\n t:1"),
ElementsAreArray({"i", "{", "b", ":", "a", "}", "input", "{",
"a", ":", "1e-1, 2,3", "b", ":", "1,2,3", "c", "{",
"id", ":", "1", "x", "{", "d", "{", "a",
":", "1", "}", "}", "}", "f", ":", "2",
"}", "t", ":", "1"}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tokenize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tokenize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.