ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
900 | cpp | tensorflow/tensorflow | batch_matmul | tensorflow/lite/kernels/batch_matmul.cc | tensorflow/lite/kernels/batch_matmul_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
#include <algorithm>
#include <cstdint>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
namespace batch_matmul {
inline int broadcast_dim(int lhs_dim, int rhs_dim) {
if (lhs_dim == rhs_dim) return lhs_dim;
if (lhs_dim == 1) return rhs_dim;
TFLITE_DCHECK_EQ(rhs_dim, 1);
return lhs_dim;
}
inline int extent(const RuntimeShape& shape, int x) {
if (shape.Dims(x) == 1) {
return 0;
}
int prod = 1;
for (int i = x + 1; i < shape.DimensionsCount(); ++i) {
prod *= shape.Dims(i);
}
return prod;
}
}
template <typename Ta, typename Tb, typename Tout>
inline void BatchMatMul(const RuntimeShape& lhs_shape, const Ta* lhs_data,
const RuntimeShape& rhs_shape, const Tb* rhs_data,
const RuntimeShape& output_shape, Tout* output_data) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const Ta* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const Tb* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const Ta* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const Tb* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const Ta* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const Tb* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
Tout* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
for (int i = 0; i < lhs_rows; ++i) {
Tout total = 0;
for (int k = 0; k < accum_depth; ++k) {
total += static_cast<Tout>(lhs_ptr2[accum_depth * i + k]) *
static_cast<Tout>(rhs_ptr2[j * accum_depth + k]);
}
int idx = lhs_rows * j + i;
out_ptr[idx] = total;
}
}
}
}
}
}
inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data,
const RuntimeShape& rhs_shape, const int8_t* rhs_data,
const float* scaling_factors,
const int32_t* input_offset, int32_t* row_sums,
const RuntimeShape& output_shape, float* output_data,
bool* compute_row_sums) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int ioff_ext0 = rhs_ext0 == 0 ? 0 : rhs_cols;
const int ioff_ext1 = rhs_ext1 == 0 ? 0 : rhs_cols;
const int ioff_ext2 = rhs_ext2 == 0 ? 0 : rhs_cols;
const int woff_ext0 = lhs_ext0 == 0 ? 0 : lhs_rows;
const int woff_ext1 = lhs_ext1 == 0 ? 0 : lhs_rows;
const int woff_ext2 = lhs_ext2 == 0 ? 0 : lhs_rows;
if (!compute_row_sums || *compute_row_sums) {
int num_weights_matrices = 1;
for (int i = 1; i < extended_lhs_shape.DimensionsCount() - 2; ++i) {
num_weights_matrices *= extended_lhs_shape.Dims(i);
}
tensor_utils::ReductionSumVector(
lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
const int32_t* ioff_ptr0 = input_offset + (b0 * ioff_ext0);
const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0);
const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
const int32_t* ioff_ptr1 = ioff_ptr0 + (b1 * ioff_ext1);
const float* scale_ptr1 = scale_ptr0 + (b1 * ioff_ext1);
const int32_t* woff_ptr1 = woff_ptr0 + (b1 * woff_ext1);
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
const int32_t* ioff_ptr2 = ioff_ptr1 + (b2 * ioff_ext2);
const float* scale_ptr2 = scale_ptr1 + (b2 * ioff_ext2);
const int32_t* woff_ptr2 = woff_ptr1 + (b2 * woff_ext2);
float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
const float batch_scaling_factor = scale_ptr2[j];
const float batch_offset = static_cast<float>(ioff_ptr2[j]);
for (int i = 0; i < lhs_rows; ++i) {
int32_t total = 0;
for (int k = 0; k < accum_depth; ++k) {
total +=
lhs_ptr2[accum_depth * i + k] * rhs_ptr2[j * accum_depth + k];
}
int32_t row_sum = woff_ptr2[i];
total -= row_sum * batch_offset;
int idx = lhs_rows * j + i;
out_ptr[idx] += batch_scaling_factor * total;
}
}
}
}
}
}
template <typename T, typename AccumT>
inline void BatchMatMul(const FullyConnectedParams& params,
const RuntimeShape& lhs_shape, const T* lhs_data,
const RuntimeShape& rhs_shape, const T* rhs_data,
const RuntimeShape& output_shape, T* output_data) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const T* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const T* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const T* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const T* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const T* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const T* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
T* out_ptr = output_data +
((b0 * batch_dim1 * batch_dim2) + b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
for (int i = 0; i < lhs_rows; ++i) {
AccumT total = 0;
for (int k = 0; k < accum_depth; ++k) {
AccumT lhs_val = lhs_ptr2[accum_depth * i + k];
AccumT rhs_val = rhs_ptr2[accum_depth * j + k];
total += (lhs_val + filter_offset) * (rhs_val + input_offset);
}
int32_t total_scaled = MultiplyByQuantizedMultiplier(
total, output_multiplier, output_shift);
total_scaled += output_offset;
total_scaled = std::max(total_scaled, output_activation_min);
total_scaled = std::min(total_scaled, output_activation_max);
const int idx = lhs_rows * j + i;
out_ptr[idx] = static_cast<T>(total_scaled);
}
}
}
}
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/batch_matmul.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/batch_matmul.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace batch_matmul {
static const int kInputLHSTensor = 0;
static const int kInputRHSTensor = 1;
static const int kOutputTensor = 0;
static const int kNumTempTensorsForAdjoints = 2;
static const int kNumTempTensorsForHybrid = 5;
enum KernelType {
kReference,
kGenericOptimized,
};
struct OpData {
int32_t output_multiplier;
int output_shift;
int32_t output_activation_min;
int32_t output_activation_max;
int scratch_tensor_index;
bool rhs_transposed;
bool compute_row_sums = false;
};
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
params = reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data);
lhs = GetInput(context, node, kInputLHSTensor);
rhs = GetInput(context, node, kInputRHSTensor);
output = GetOutput(context, node, 0);
}
TfLiteBatchMatMulParams* params;
const TfLiteTensor* lhs;
const TfLiteTensor* rhs;
TfLiteTensor* output;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->rhs_transposed = false;
context->AddTensors(context,
kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const RuntimeShape& extended_lhs_shape,
const RuntimeShape& extended_rhs_shape,
bool adj_x, bool adj_y, int output_rank,
TfLiteTensor* output) {
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
int broadcast_dim = lhs_dim;
if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) {
broadcast_dim = rhs_dim;
}
output_shape->data[i] = broadcast_dim;
}
int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2;
int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1;
output_shape->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index);
output_shape->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index);
TfLiteStatus stat = context->ResizeTensor(context, output, output_shape);
return stat;
}
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* lhs = op_context->lhs;
const TfLiteTensor* rhs = op_context->rhs;
TfLiteIntArrayFree(node->temporaries);
bool is_hybrid =
(op_context->lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8);
if (is_hybrid) {
node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints +
kNumTempTensorsForHybrid);
} else {
node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints);
}
const int lhs_rank = NumDimensions(lhs);
const int rhs_rank = NumDimensions(rhs);
const int batch_size = op_context->params->adj_x
? lhs->dims->data[lhs_rank - 1]
: lhs->dims->data[lhs_rank - 2];
const int num_units = op_context->params->adj_y
? rhs->dims->data[rhs_rank - 2]
: rhs->dims->data[rhs_rank - 1];
{
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 0, &scratch_buffer));
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(lhs_rank);
for (int i = 0; i < lhs_rank - 2; ++i) {
scratch_buffer_size->data[i] = lhs->dims->data[i];
}
scratch_buffer_size->data[lhs_rank - 2] = lhs->dims->data[lhs_rank - 1];
scratch_buffer_size->data[lhs_rank - 1] = lhs->dims->data[lhs_rank - 2];
scratch_buffer->type = op_context->lhs->type;
scratch_buffer->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
}
{
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &scratch_buffer));
scratch_buffer->name = "BatchMatMul_scratch_buffer";
const TfLiteTensor* rhs = op_context->rhs;
int rhs_rank = NumDimensions(rhs);
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(rhs_rank);
for (int i = 0; i < rhs_rank - 2; ++i) {
scratch_buffer_size->data[i] = rhs->dims->data[i];
}
scratch_buffer_size->data[rhs_rank - 2] = rhs->dims->data[rhs_rank - 1];
scratch_buffer_size->data[rhs_rank - 1] = rhs->dims->data[rhs_rank - 2];
if (IsConstantTensor(op_context->rhs)) {
scratch_buffer->allocation_type = kTfLiteArenaRwPersistent;
} else {
scratch_buffer->allocation_type = kTfLiteArenaRw;
}
scratch_buffer->type = op_context->rhs->type;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
}
if (is_hybrid) {
int num_batches = 1;
for (int i = 0; i < lhs_rank - 2; ++i) {
num_batches *= lhs->dims->data[i];
}
int num_weights_matrices = 1;
for (int i = 0; i < rhs_rank - 2; ++i) {
num_weights_matrices *= rhs->dims->data[i];
}
op_data->compute_row_sums = true;
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&input_quantized));
input_quantized->type = op_context->rhs->type;
input_quantized->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* input_quantized_size =
TfLiteIntArrayCopy(op_context->lhs->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
node->temporaries->data[3] = op_data->scratch_tensor_index + 3;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {num_batches * batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = scaling_dims[0];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[4] = op_data->scratch_tensor_index + 4;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = num_units;
accum_size->data[1] = batch_size;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[5] = op_data->scratch_tensor_index + 5;
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 5, &input_offsets));
input_offsets->type = kTfLiteInt32;
input_offsets->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) {
TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1);
input_offsets_size->data[0] = num_batches * batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets,
input_offsets_size));
}
node->temporaries->data[6] = op_data->scratch_tensor_index + 6;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 6, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_weights_matrices * num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1);
row_sums_size->data[0] = row_sums_dims[0];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
bool adj_x = op_context.params->adj_x;
bool adj_y = op_context.params->adj_y;
const TfLiteTensor* lhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputLHSTensor, &lhs_data));
const TfLiteTensor* rhs_data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputRHSTensor, &rhs_data));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if ((lhs_data->type == kTfLiteInt8 || lhs_data->type == kTfLiteInt16) &&
output->type != kTfLiteInt32) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, lhs_data, rhs_data, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent);
op_data->output_shift = exponent;
if (lhs_data->type == kTfLiteInt8) {
op_data->output_activation_min = std::numeric_limits<int8_t>::min();
op_data->output_activation_max = std::numeric_limits<int8_t>::max();
} else {
op_data->output_activation_min = std::numeric_limits<int16_t>::min();
op_data->output_activation_max = std::numeric_limits<int16_t>::max();
}
}
if (lhs_data->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, lhs_data->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, rhs_data->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 ||
lhs_data->type == kTfLiteInt8 ||
lhs_data->type == kTfLiteInt16);
TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 ||
rhs_data->type == kTfLiteInt8 ||
rhs_data->type == kTfLiteInt16);
TF_LITE_ENSURE(context, (lhs_data->type == kTfLiteFloat32 &&
rhs_data->type == kTfLiteInt8) ||
lhs_data->type == rhs_data->type);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 5);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2);
TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 5);
const int lhs_rank = NumDimensions(lhs_data);
const int rhs_rank = NumDimensions(rhs_data);
const int output_rank = std::max(lhs_rank, rhs_rank);
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data));
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data));
for (int i = 0; i < output_rank - 2; ++i) {
const int lhs_dim = extended_lhs_shape.Dims(i);
const int rhs_dim = extended_rhs_shape.Dims(i);
if (lhs_dim != rhs_dim) {
if (lhs_dim != 1) {
TF_LITE_ENSURE_EQ(context, rhs_dim, 1);
}
}
}
int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2)
: extended_lhs_shape.Dims(output_rank - 1);
int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1)
: extended_rhs_shape.Dims(output_rank - 2);
TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs);
TfLiteStatus status =
ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x,
adj_y, output_rank, output);
return status;
}
template <typename scalar>
void TransposeRowsColumnsImpl(const TfLiteTensor* tensor_in,
const scalar* input, TfLiteTensor* tensor_out,
scalar* output) {
RuntimeShape transposed_shape(GetTensorShape(tensor_in));
RuntimeShape shape(GetTensorShape(tensor_in));
TransposeParams params;
int rank = NumDimensions(tensor_in);
params.perm_count = rank;
for (int i = 0; i < rank - 2; ++i) {
params.perm[i] = i;
}
params.perm[rank - 2] = rank - 1;
params.perm[rank - 1] = rank - 2;
transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2));
transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1));
optimized_ops::Transpose(params, shape, input, transposed_shape, output);
}
TfLiteStatus TransposeRowsColumns(TfLiteContext* context,
const TfLiteTensor* tensor_in,
TfLiteTensor* tensor_out) {
if (tensor_in->type == kTfLiteFloat32) {
TransposeRowsColumnsImpl<float>(tensor_in, GetTensorData<float>(tensor_in),
tensor_out,
GetTensorData<float>(tensor_out));
return kTfLiteOk;
} else if (tensor_in->type == kTfLiteInt8) {
TransposeRowsColumnsImpl<int8_t>(
tensor_in, GetTensorData<int8_t>(tensor_in), tensor_out,
GetTensorData<int8_t>(tensor_out));
return kTfLiteOk;
} else if (tensor_in->type == kTfLiteInt16) {
TransposeRowsColumnsImpl<int16_t>(
tensor_in, GetTensorData<int16_t>(tensor_in), tensor_out,
GetTensorData<int16_t>(tensor_out));
return kTfLiteOk;
} else {
TF_LITE_KERNEL_LOG(
context, "Can only transpose tensors with float, int8 or int16 type.");
return kTfLiteError;
}
}
RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) {
RuntimeShape swapped_shape(shape);
const int32_t dims = shape.DimensionsCount();
swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1));
swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2));
return swapped_shape;
}
template <KernelType kernel_type>
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, OpData* data,
const RuntimeShape& input_shape,
const TfLiteTensor* input,
const RuntimeShape& filter_shape,
const TfLiteTensor* filter,
TfLiteTensor* input_quantized,
TfLiteTensor* scaling_factors,
TfLiteTensor* accum_scratch, TfLiteTensor* row_sums,
TfLiteTensor* input_offsets, TfLiteTensor* output) {
const auto* params =
reinterpret_cast<TfLiteBatchMatMulParams*>(node->builtin_data);
const int32_t num_input_dims = input_shape.DimensionsCount();
const int input_size = input_shape.Dims(num_input_dims - 2);
const int batch_size = input_shape.Dims(num_input_dims - 1);
int num_batches_to_quantize = batch_size;
for (int i = 0; i < input_shape.DimensionsCount() - 2; ++i) {
num_batches_to_quantize *= input_shape.Dims(i);
}
const int scaling_factor_size = GetTensorShape(scaling_factors).FlatSize();
TF_LITE_ENSURE(context, scaling_factor_size >= num_batches_to_quantize);
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* input_offset_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
input_offset_ptr = GetTensorData<int32_t>(input_offsets);
row_sums_p | #include <stddef.h>
#include <stdint.h>
#include <initializer_list>
#include <limits>
#include <map>
#include <numeric>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_BATCH_MATMUL_REF();
TfLiteRegistration* Register_BATCH_MATMUL_GENERIC_OPTIMIZED();
}
}
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
tflite::TensorType GetTFLiteType() {
if (std::is_same<T, int8_t>::value) {
return TensorType_INT8;
}
if (std::is_same<T, int16_t>::value) {
return TensorType_INT16;
}
if (std::is_same<T, int32_t>::value) {
return TensorType_INT32;
}
return TensorType_FLOAT32;
}
template <typename T>
class BatchMatMulOpModel : public SingleOpModel {
public:
BatchMatMulOpModel(const TensorData& lhs, const TensorData& rhs,
bool adj_x = false, bool adj_y = false) {
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput(rhs);
output_id_ = AddOutput(GetTFLiteType<T>());
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
int lhs() const { return lhs_id_; }
int rhs() const { return rhs_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
std::vector<int32_t> GetOutputShape() { return GetTensorShape(output_id_); }
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
};
const auto kKernelMap = new std::map<string, TfLiteRegistration*>({
{"Reference", ops::builtin::Register_BATCH_MATMUL_REF()},
{"GenericOptimized",
ops::builtin::Register_BATCH_MATMUL_GENERIC_OPTIMIZED()},
});
class BatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(BatchMatMulOpTest, Float32Test_Ones) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {3, 2, 1, 4}},
{TensorType_FLOAT32, {3, 1, 4, 1}});
std::vector<float> lhs(24);
std::iota(lhs.begin(), lhs.end(), 1);
std::vector<float> rhs(12);
std::iota(rhs.begin(), rhs.end(), 1);
std::vector<float> res{30, 70, 278, 382, 782, 950};
model.PopulateTensor<float>(model.lhs(), lhs);
model.PopulateTensor<float>(model.rhs(), rhs);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray(res));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 2, 1, 1}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Flatten) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {3, 2, 2, 4}},
{TensorType_FLOAT32, {3, 1, 4, 1}});
std::vector<float> lhs(48);
std::iota(lhs.begin(), lhs.end(), 1);
std::vector<float> rhs(12);
std::iota(rhs.begin(), rhs.end(), 1);
std::vector<float> res{30, 70, 110, 150, 486, 590,
694, 798, 1454, 1622, 1790, 1958};
model.PopulateTensor<float>(model.lhs(), lhs);
model.PopulateTensor<float>(model.rhs(), rhs);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray(res));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 2, 2, 1}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Simple) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {1, 3, 4}});
model.PopulateTensor<float>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Int8Test_Simple) {
BatchMatMulOpModel<int32_t> model({TensorType_INT8, {1, 2, 3}},
{TensorType_INT8, {1, 3, 4}});
model.PopulateTensor<int8_t>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int8_t>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({74, 80, 86, 92, 173, 188, 203, 218}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Int8Test_LargeElement) {
BatchMatMulOpModel<int32_t> model({TensorType_INT8, {1, 2, 3}},
{TensorType_INT8, {1, 3, 4}});
model.PopulateTensor<int8_t>(model.lhs(), {121, 122, 123, 124, 125, 126});
model.PopulateTensor<int8_t>(model.rhs(), {117, 118, 119, 110, 111, 112, 113,
114, 115, 116, 117, 118});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray(
{41844, 42210, 42576, 41732, 42873, 43248, 43623, 42758}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_SimpleRHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {1, 4, 3}}, false, true);
model.PopulateTensor<float>(model.lhs(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 15, 8, 12, 16, 9, 13, 17, 10, 14, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_SimpleLHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {1, 3, 2}},
{TensorType_FLOAT32, {1, 3, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(), {1, 4, 2, 5, 3, 6});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({74., 80., 86., 92., 173., 188., 203., 218.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BatchSizeTwo) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 2, 3}},
{TensorType_FLOAT32, {2, 3, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({74., 80., 86., 92., 173., 188., 203., 218., 560., 584.,
608., 632., 767., 800., 833., 866.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 2, 3}},
{TensorType_FLOAT32, {3, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({74., 80., 86., 92., 173., 188., 203., 218., 272., 296.,
320., 344., 371., 404., 437., 470.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BroadcastLHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 3, 2}},
{TensorType_FLOAT32, {3, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(),
{1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({74., 80., 86., 92., 173., 188., 203., 218., 272., 296.,
320., 344., 371., 404., 437., 470.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 3, 2}},
{TensorType_FLOAT32, {3, 2, 4}});
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({29., 32., 35., 38., 65., 72., 79., 86., 101.,
112., 123., 134., 53., 56., 59., 62., 121., 128.,
135., 142., 189., 200., 211., 222., 77., 80., 83.,
86., 177., 184., 191., 198., 277., 288., 299., 310.,
137., 152., 167., 182., 173., 192., 211., 230., 209.,
232., 255., 278., 257., 272., 287., 302., 325., 344.,
363., 382., 393., 416., 439., 462., 377., 392., 407.,
422., 477., 496., 515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2LHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 2, 3}},
{TensorType_FLOAT32, {3, 2, 4}}, true, false);
model.PopulateTensor<float>(model.lhs(),
{1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({29., 32., 35., 38., 65., 72., 79., 86., 101.,
112., 123., 134., 53., 56., 59., 62., 121., 128.,
135., 142., 189., 200., 211., 222., 77., 80., 83.,
86., 177., 184., 191., 198., 277., 288., 299., 310.,
137., 152., 167., 182., 173., 192., 211., 230., 209.,
232., 255., 278., 257., 272., 287., 302., 325., 344.,
363., 382., 393., 416., 439., 462., 377., 392., 407.,
422., 477., 496., 515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2RHSAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 3, 2}},
{TensorType_FLOAT32, {3, 4, 2}}, false, true);
model.PopulateTensor<float>(model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 8, 12, 9, 13, 10, 14, 15, 19, 16, 20,
17, 21, 18, 22, 23, 27, 24, 28, 25, 29, 26, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({29., 32., 35., 38., 65., 72., 79., 86., 101.,
112., 123., 134., 53., 56., 59., 62., 121., 128.,
135., 142., 189., 200., 211., 222., 77., 80., 83.,
86., 177., 184., 191., 198., 277., 288., 299., 310.,
137., 152., 167., 182., 173., 192., 211., 230., 209.,
232., 255., 278., 257., 272., 287., 302., 325., 344.,
363., 382., 393., 416., 439., 462., 377., 392., 407.,
422., 477., 496., 515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_Broadcast2BothAdjoint) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {2, 1, 2, 3}},
{TensorType_FLOAT32, {3, 4, 2}}, true, true);
model.PopulateTensor<float>(model.lhs(),
{1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12});
model.PopulateTensor<float>(model.rhs(),
{7, 11, 8, 12, 9, 13, 10, 14, 15, 19, 16, 20,
17, 21, 18, 22, 23, 27, 24, 28, 25, 29, 26, 30});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({29., 32., 35., 38., 65., 72., 79., 86., 101.,
112., 123., 134., 53., 56., 59., 62., 121., 128.,
135., 142., 189., 200., 211., 222., 77., 80., 83.,
86., 177., 184., 191., 198., 277., 288., 299., 310.,
137., 152., 167., 182., 173., 192., 211., 230., 209.,
232., 255., 278., 257., 272., 287., 302., 325., 344.,
363., 382., 393., 416., 439., 462., 377., 392., 407.,
422., 477., 496., 515., 534., 577., 600., 623., 646.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 3, 3, 4}));
}
TEST_P(BatchMatMulOpTest, Float32Test_BroadcastFromRHS) {
BatchMatMulOpModel<float> model({TensorType_FLOAT32, {4, 5}},
{TensorType_FLOAT32, {3, 1, 5, 2}});
model.PopulateTensor<float>(
model.lhs(),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
model.PopulateTensor<float>(
model.rhs(),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({185., 200., 460., 500., 735., 800., 1010., 1100.,
335., 350., 860., 900., 1385., 1450., 1910., 2000.,
485., 500., 1260., 1300., 2035., 2100., 2810., 2900.}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({3, 1, 4, 2}));
}
INSTANTIATE_TEST_SUITE_P(
BatchMatMulOpTest, BatchMatMulOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
class ConstRHSBatchMatMulOpModel : public MultiOpModel {
public:
ConstRHSBatchMatMulOpModel(const TensorData& lhs,
std::initializer_list<int> rhs_shape,
std::initializer_list<float> rhs_data,
bool adj_x = false, bool adj_y = false) {
lhs_id_ = AddInput(lhs);
rhs_id_ = AddConstInput<float>(TensorType_FLOAT32, rhs_data, rhs_shape);
matmul_output_id_ = AddOutput(lhs.type);
std::vector<int> matmul_inputs{lhs_id_, rhs_id_};
std::vector<int> matmul_outputs{matmul_output_id_};
AddBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y).Union(),
matmul_inputs, matmul_outputs);
neg_output_id_ = AddOutput(lhs.type);
std::vector<int> neg_inputs{matmul_output_id_};
std::vector<int> neg_outputs{neg_output_id_};
AddBuiltinOp(BuiltinOperator_NEG, BuiltinOptions_NegOptions,
CreateNegOptions(builder_).Union(), neg_inputs, neg_outputs);
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)});
}
int lhs() const { return lhs_id_; }
std::vector<float> GetOutput() {
return ExtractVector<float>(neg_output_id_);
}
std::vector<int32_t> GetOutputShape() {
return GetTensorShape(neg_output_id_);
}
protected:
int lhs_id_;
int rhs_id_;
int matmul_output_id_;
int neg_output_id_;
};
TEST(ConstRHSBatchMatMulOpModel, RHSNotAdjoint) {
ConstRHSBatchMatMulOpModel model({TensorType_FLOAT32, {1, 6, 2}}, {2, 3},
{6, 3, 7, 4, 6, 9});
model.PopulateTensor<float>(model.lhs(),
{6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({-48, -36, -69, -58, -45, -85, -72, -72, -123,
-36, -42, -68, -58, -45, -85, -46, -51, -84}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 6, 3}));
model.PopulateTensor<float>(model.lhs(),
{6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({-48, -36, -69, -58, -45, -85, -72, -72, -123,
-36, -42, -68, -58, -45, -85, -46, -51, -84}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 6, 3}));
}
class HybridBatchMatMulOpModel : public SingleOpModel {
public:
HybridBatchMatMulOpModel(int units, int batches, const TensorData& lhs,
const TensorData& rhs,
const TensorData& output = {TensorType_FLOAT32},
bool asymmetric_quantize_inputs = true,
bool adj_x = false, bool adj_y = false)
: units_(units), batches_(batches) {
int total_input_size = 1;
for (size_t i = 0; i < lhs.shape.size(); ++i) {
total_input_size *= lhs.shape[i];
}
input_size_ = total_input_size / batches_;
lhs_id_ = AddInput(lhs);
rhs_id_ = AddInput(rhs);
output_id_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_BATCH_MATMUL,
BuiltinOptions_BatchMatMulOptions,
CreateBatchMatMulOptions(builder_, adj_x, adj_y,
asymmetric_quantize_inputs)
.Union());
BuildInterpreter({GetShape(lhs_id_), GetShape(rhs_id_)},
-1,
false,
false);
}
void SetWeights(const std::vector<float>& data) {
SymmetricQuantizeAndPopulate(rhs_id_, data);
AllocateAndDelegate(true);
}
void SetSignedWeights(std::initializer_list<float> f) {
SignedSymmetricQuantizeAndPopulate(rhs_id_, f);
AllocateAndDelegate(true);
}
void SetInput(const std::vector<float>& f) { PopulateTensor(lhs_id_, f); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_id_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_id_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
int lhs() const { return lhs_id_; }
int rhs() const { return rhs_id_; }
protected:
int lhs_id_;
int rhs_id_;
int output_id_;
int units_;
int batches_;
int input_size_;
};
class HybridAsymmetricBatchMatMulOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
TEST_P(HybridAsymmetricBatchMatMulOpTest, SimpleTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
193,
193,
193,
247,
247,
247,
},
3.f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, MultipleNumBatchQuantizedInt8) {
HybridBatchMatMulOpModel m(
10, 4,
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_INT8, {3, 10}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
},
0.64f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 2, 10}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, RegressionTestQuantizedInt8) {
HybridBatchMatMulOpModel m(
10, 2,
{TensorType_FLOAT32, {2, 3}},
{TensorType_INT8, {3, 10}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
});
m.SetInput({
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
},
0.64f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 10}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSize) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {6, 3}},
{TensorType_INT8, {3, 8}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights(
{1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjX) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {3, 6}},
{TensorType_INT8, {3, 8}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
true);
m.SetSignedWeights(
{1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3});
m.SetInput(
{11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjY) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {6, 3}},
{TensorType_INT8, {8, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
false,
true);
m.SetSignedWeights(
{1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3});
m.SetInput({
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
11, 12, 13,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest,
TestQuantizedInt8BatchesAndUnitsGreaterThanAccumDimSizeAdjXAdjY) {
HybridBatchMatMulOpModel m(
8, 6,
{TensorType_FLOAT32, {3, 6}},
{TensorType_INT8, {8, 3}, 0, 0, 10.0 / 127.0, 0},
{TensorType_FLOAT32},
true,
true,
true);
m.SetSignedWeights(
{1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3});
m.SetInput(
{11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74,
74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74},
0.15f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 8}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastWeights) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
23, 23, 23,
57, 57, 57,
193, 193, 193,
247, 247, 247,
},
3.f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 3}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastBigWeights) {
HybridBatchMatMulOpModel m(
9, 2,
{TensorType_FLOAT32, {2, 2, 10}},
{TensorType_INT8, {10, 9}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, 1, 1, 17, 17, 17, 26, 26, 26,
2, 2, 2, 18, 18, 18, 27, 27, 27,
3, 3, 3, 19, 19, 19, 28, 28, 28,
4, 4, 4, 20, 20, 20, 29, 29, 29,
5, 5, 5, 21, 21, 21, 30, 30, 30,
6, 6, 6, 22, 22, 22, 31, 31, 31,
7, 7, 7, 23, 23, 23, 32, 32, 32,
8, 8, 8, 24, 24, 24, 33, 33, 33,
9, 9, 9, 25, 25, 25, 34, 34, 34,
10, 10, 10, 26, 26, 26, 35, 35, 35,
});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
11, 12, 13, 14, 15, 16, 17, 18, -19, -20,
11, 12, 13, 14, 15, 16, 17, -18, 19, -20,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
23, 23, 23, 295, 295, 295, 448, 448, 448,
57, 57, 57, 361, 361, 361, 532, 532, 532,
193, 193, 193, 1425, 1425, 1425, 2118, 2118, 2118,
247, 247, 247, 1511, 1511, 1511, 2222, 2222, 2222
},
10.0f)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 9}));
}
TEST_P(HybridAsymmetricBatchMatMulOpTest, QuantizedInt8BroadcastInputs) {
HybridBatchMatMulOpModel m(
3, 2,
{TensorType_FLOAT32, {2, 10}},
{TensorType_INT8, {2, 10, 3}, 0, 0, 10.0 / 127.0, 0});
m.SetSignedWeights({
1, -3, 1,
2, -2, 2,
3, -1, 3,
4, 0, 4,
5, 1, 5,
6, 2, 6,
7, 3, 7,
8, 4, 8,
9, 5, 9,
10, 6, 10,
1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4, 4,
5, 5, 5, |
901 | cpp | tensorflow/tensorflow | conv3d | tensorflow/lite/kernels/conv3d.cc | tensorflow/lite/kernels/conv3d_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV3D_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV3D_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Conv3D(const Conv3DParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_num_channels = MatchingDim(input_shape, 4, filter_shape, 3);
const int output_num_channels = MatchingDim(filter_shape, 4, output_shape, 4);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_num_channels);
}
const int input_width = input_shape.Dims(3);
const int input_height = input_shape.Dims(2);
const int input_depth = input_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_depth = filter_shape.Dims(0);
const int output_width = output_shape.Dims(3);
const int output_height = output_shape.Dims(2);
const int output_depth = output_shape.Dims(1);
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int pad_depth = params.padding_values.depth;
for (int batch = 0; batch < batches; ++batch) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
const int in_d_origin = (out_d * params.stride_depth) - pad_depth;
for (int out_y = 0; out_y < output_height; ++out_y) {
const int in_y_origin = (out_y * params.stride_height) - pad_height;
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin = (out_x * params.stride_width) - pad_width;
for (int out_channel = 0; out_channel < output_num_channels;
++out_channel) {
float total = 0.f;
for (int filter_d = 0; filter_d < filter_depth; ++filter_d) {
const int in_d = in_d_origin + params.dilation_depth * filter_d;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y =
in_y_origin + params.dilation_height * filter_y;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x =
in_x_origin + params.dilation_width * filter_x;
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height) && (in_d >= 0) &&
(in_d < input_depth);
if (!is_point_inside_image) {
continue;
}
for (int in_channel = 0; in_channel < input_num_channels;
++in_channel) {
float input_value = input_data[Offset(
input_shape, batch, in_d, in_y, in_x, in_channel)];
float filter_value =
filter_data[Offset(filter_shape, filter_d, filter_y,
filter_x, in_channel, out_channel)];
total += (input_value * filter_value);
}
}
}
}
float bias_value = 0.0f;
if (bias_data) {
bias_value = bias_data[out_channel];
}
output_data[Offset(output_shape, batch, out_d, out_y, out_x,
out_channel)] =
ActivationFunctionWithMinMax(total + bias_value,
params.float_activation_min,
params.float_activation_max);
}
}
}
}
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/conv3d.h"
#include <cstddef>
#include <cstdint>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace conv3d {
enum KernelType {
kReference,
kGenericOptimized,
};
const int kTensorNotAllocated = -1;
static constexpr size_t kMaxIm2colBufferSizeMobile = 1024 * 1024 * 1024;
struct OpData {
Padding3DValues padding;
int im2col_tensor_id = kTensorNotAllocated;
bool need_im2col = false;
bool im2col_oversized = false;
int32_t im2col_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* opdata = new OpData;
return opdata;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus AllocateTemporaryTensorsIfRequired(
KernelType kernel_type, TfLiteContext* context, TfLiteNode* node,
OpData* opdata, TfLiteConv3DParams* params, const TfLiteTensor* filter,
size_t im2col_bytes) {
int temporaries_count = 0;
const bool need_dilated_im2col = params->dilation_width_factor != 1 ||
params->dilation_height_factor != 1 ||
params->dilation_depth_factor != 1;
const bool need_non_dilated_im2col =
params->stride_depth != 1 || params->stride_width != 1 ||
params->stride_height != 1 || filter->dims->data[2] != 1 ||
filter->dims->data[1] != 1 || filter->dims->data[0] != 1;
opdata->need_im2col = (kernel_type == kGenericOptimized) &&
(need_dilated_im2col || need_non_dilated_im2col);
if (IsMobilePlatform() && opdata->need_im2col &&
im2col_bytes >= kMaxIm2colBufferSizeMobile) {
opdata->need_im2col = false;
opdata->im2col_oversized = true;
}
if (opdata->need_im2col) {
if (opdata->im2col_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &opdata->im2col_tensor_id));
}
opdata->im2col_index = temporaries_count++;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(temporaries_count);
return kTfLiteOk;
}
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params = static_cast<TfLiteConv3DParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->inputs->size == 2 || node->inputs->size == 3);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
TF_LITE_ENSURE_EQ(context, input->dims->size, 5);
TF_LITE_ENSURE_EQ(context, filter->dims->size, 5);
TF_LITE_ENSURE_EQ(context, input->dims->data[4], filter->dims->data[3]);
TfLiteType input_type = input->type;
TF_LITE_ENSURE_TYPES_EQ(context, input_type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type);
const TfLiteTensor* bias = GetInput(context, node, 2);
if (bias) {
TF_LITE_ENSURE_TYPES_EQ(context, bias->type, input_type);
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 4));
}
int batches = input->dims->data[0];
int channels_out = filter->dims->data[4];
int depth = input->dims->data[1];
int height = input->dims->data[2];
int width = input->dims->data[3];
int filter_depth = filter->dims->data[0];
int filter_height = filter->dims->data[1];
int filter_width = filter->dims->data[2];
int input_channel = filter->dims->data[3];
int out_width, out_height, out_depth;
opdata->padding = ComputePadding3DValues(
params->stride_height, params->stride_width, params->stride_depth,
params->dilation_height_factor, params->dilation_width_factor,
params->dilation_depth_factor, height, width, depth, filter_height,
filter_width, filter_depth, params->padding, &out_height, &out_width,
&out_depth);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(5);
output_size->data[0] = batches;
output_size->data[1] = out_depth;
output_size->data[2] = out_height;
output_size->data[3] = out_width;
output_size->data[4] = channels_out;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
size_t input_type_size;
TF_LITE_ENSURE_STATUS(GetSizeOfType(context, input->type, &input_type_size));
const size_t im2col_bytes = batches * out_depth * out_height * out_width *
input_channel * filter_depth * filter_height *
filter_width * input_type_size;
TF_LITE_ENSURE_OK(context, AllocateTemporaryTensorsIfRequired(
kernel_type, context, node, opdata, params,
filter, im2col_bytes));
if (opdata->need_im2col) {
TfLiteIntArray* im2col_size = TfLiteIntArrayCreate(5);
im2col_size->data[0] = output_size->data[0];
im2col_size->data[1] = output_size->data[1];
im2col_size->data[2] = output_size->data[2];
im2col_size->data[3] = output_size->data[3];
im2col_size->data[4] =
input_channel * filter_depth * filter_height * filter_width;
TfLiteTensor* im2col;
node->temporaries->data[opdata->im2col_index] = opdata->im2col_tensor_id;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node,
opdata->im2col_index, &im2col));
im2col->type = input->type;
im2col->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, im2col, im2col_size));
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return Prepare(kernel_type, context, node);
}
TfLiteStatus EvalFloat(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node, TfLiteConv3DParams* params,
OpData* opdata, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
TfLiteTensor* im2col, TfLiteTensor* output) {
float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
Conv3DParams runtime_params;
runtime_params.padding_values = opdata->padding;
runtime_params.stride_depth = params->stride_depth;
runtime_params.stride_height = params->stride_height;
runtime_params.stride_width = params->stride_width;
runtime_params.dilation_depth = params->dilation_depth_factor;
runtime_params.dilation_height = params->dilation_height_factor;
runtime_params.dilation_width = params->dilation_width_factor;
runtime_params.float_activation_min = output_activation_min;
runtime_params.float_activation_max = output_activation_max;
switch (kernel_type) {
case kReference: {
reference_ops::Conv3D(runtime_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(filter),
GetTensorData<float>(filter), GetTensorShape(bias),
GetTensorData<float>(bias), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kGenericOptimized: {
return optimized_ops::Conv3D(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
GetTensorShape(im2col), GetTensorData<float>(im2col),
CpuBackendContext::GetFromContext(context));
}
}
}
TfLiteStatus Eval(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteConv3DParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* bias = GetInput(context, node, 2);
TfLiteTensor* im2col = opdata->need_im2col
? &context->tensors[opdata->im2col_tensor_id]
: nullptr;
if (opdata->im2col_oversized) {
kernel_type = kReference;
}
switch (input->type) {
case kTfLiteFloat32:
return EvalFloat(kernel_type, context, node, params, opdata, input,
filter, bias, im2col, output);
default:
TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return Eval(kernel_type, context, node);
}
}
TfLiteRegistration* Register_CONV_3D_REF() {
static TfLiteRegistration r = {conv3d::Init, conv3d::Free,
conv3d::Prepare<conv3d::kReference>,
conv3d::Eval<conv3d::kReference>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_GENERIC_OPT() {
static TfLiteRegistration r = {conv3d::Init, conv3d::Free,
conv3d::Prepare<conv3d::kGenericOptimized>,
conv3d::Eval<conv3d::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_CONV_3D() {
return Register_CONV_3D_GENERIC_OPT();
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class Conv3dOpModel : public SingleOpModel {
public:
Conv3dOpModel(const TensorData& input, const TensorData& filter,
const TensorData& bias, const TensorData& output,
Padding padding = Padding_VALID, int32_t stride_depth = 1,
int32_t stride_width = 1, int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
input_ = AddInput(input);
filter_ = AddInput(filter);
bias_ = AddInput(bias);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
Conv3dOpModel(const TensorData& input, const TensorData& filter,
const TensorData& output, Padding padding = Padding_VALID,
int32_t stride_depth = 1, int32_t stride_width = 1,
int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
input_ = AddInput(input);
filter_ = AddInput(filter);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter({GetShape(input_), GetShape(filter_)});
}
void SetFilter(std::vector<float> f) { PopulateTensor(filter_, f); }
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetInput(std::vector<float> data) { PopulateTensor(input_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int filter_;
int bias_;
int output_;
};
template <typename T>
std::vector<T> CreateRangeVector(int N) {
std::vector<T> result;
for (int i = 0; i < N; ++i) result.push_back(i);
return result;
}
TEST(Conv3dOpModel, InvalidInputDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}}),
"input->dims->size != 5");
}
TEST(Conv3dOpModel, InvalidFilterDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}}),
"filter->dims->size != 5");
}
TEST(Conv3dOpModel, MismatchChannelSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 2}},
{TensorType_FLOAT32, {}}),
"input->dims->data.4. != filter->dims->data.3.");
}
TEST(Conv3dOpModel, MismatchBiasSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 2}},
{TensorType_FLOAT32, {1, 3, 2, 2, 1}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {}}),
"NumElements.bias. != SizeOfDimension.filter, 4.");
}
TEST(Conv3dOpModel, SimpleFloat32Test) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}});
m.SetInput(CreateRangeVector<float>(32));
m.SetFilter({-1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 1, 1, 3, 2));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({30, 6, 26, 10, 22, 14}));
}
TEST(Conv3dOpModel, PaddingValidTest) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 3, 4, 5, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}});
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 2, 3, 4, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({-214, 266, -234, 270, -254, 274, -274, 278, -314, 286,
-334, 290, -354, 294, -374, 298, -414, 306, -434, 310,
-454, 314, -474, 318, -614, 346, -634, 350, -654, 354,
-674, 358, -714, 366, -734, 370, -754, 374, -774, 378,
-814, 386, -834, 390, -854, 394, -874, 398}));
}
TEST(Conv3dOpModel, PaddingSameTest) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 3, 4, 5, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_SAME);
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
-1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 4, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-172, 290, -176, 298, -180, 306, -184, 314, 36, 198, -192,
330, -196, 338, -200, 346, -204, 354, 56, 218, -212, 370,
-216, 378, -220, 386, -224, 394, 76, 238, -226, 82, -230,
82, -234, 82, -238, 82, -80, 80, -252, 450, -256, 458,
-260, 466, -264, 474, 116, 278, -272, 490, -276, 498, -280,
506, -284, 514, 136, 298, -292, 530, -296, 538, -300, 546,
-304, 554, 156, 318, -306, 82, -310, 82, -314, 82, -318,
82, -80, 80, 158, -158, 162, -162, 166, -166, 170, -170,
176, -176, 178, -178, 182, -182, 186, -186, 190, -190, 196,
-196, 198, -198, 202, -202, 206, -206, 210, -210, 216, -216,
220, -220, 224, -224, 228, -228, 232, -232, 237, -237}));
}
TEST(Conv3dOpModel, StrideTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_VALID, 2,
2, 2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 1, 2, 2));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({52, 8, 68, 8, 244, 8, 260, 8}));
}
TEST(Conv3dOpModel, StrideAndPaddingSameTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_SAME, 2,
2, 2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter({-1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1,
1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 2, 2, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-70, -28, -86, -12, -82, -16, -90, -8, -262,
164, -278, 180, -178, 80, -186, 88}));
}
TEST(Conv3dOpModel, DilationTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {}}, Padding_VALID, 1,
1, 1,
ActivationFunctionType_NONE,
1, 1,
2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter(CreateRangeVector<float>(32));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 1, 3, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({7248, 7592, 7728, 8104, 8208, 8616, 18768,
19880, 19248, 20392, 19728, 20904}));
}
TEST(Conv3dOpModel, BiasTest) {
Conv3dOpModel m({TensorType_FLOAT32, {2, 2, 3, 4, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {}},
Padding_VALID, 2,
2, 2);
m.SetInput(CreateRangeVector<float>(96));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 1, 1, 2, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({53, 10, 69, 10, 245, 10, 261, 10}));
}
TEST(Conv3dOpModel, NoIm2ColTensorTest) {
Conv3dOpModel m({TensorType_FLOAT32, {1, 2, 2, 2, 4}},
{TensorType_FLOAT32, {1, 1, 1, 4, 4}},
{TensorType_FLOAT32, {}}, Padding_VALID);
m.SetInput(CreateRangeVector<float>(32));
m.SetFilter(CreateRangeVector<float>(16));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 2, 2, 2, 4));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({56, 62, 68, 74, 152, 174, 196, 218, 248, 286, 324,
362, 344, 398, 452, 506, 440, 510, 580, 650, 536, 622,
708, 794, 632, 734, 836, 938, 728, 846, 964, 1082}));
}
}
} |
902 | cpp | tensorflow/tensorflow | add | tensorflow/lite/kernels/add.cc | tensorflow/lite/kernels/add_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_ADD_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_ADD_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewAddNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/add.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Add : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
auto adds = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
auto scalar = std::get_if<float>(&attr.param);
const auto* hwc_tensor =
std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.param);
if (hwc_tensor) {
std::string code;
const std::string x_coord = hwc_tensor->shape.w == 1 ? "0" : "gid.x";
const std::string y_coord = hwc_tensor->shape.h == 1 ? "0" : "gid.y";
const std::string s_coord = hwc_tensor->shape.c == 1 ? "0" : "gid.z";
code = absl::StrCat("vec4 second_val = $hwc_buffer[", x_coord, ", ",
y_coord, ", ", s_coord, "]$;\n");
if (hwc_tensor->shape.c == 1) {
code += " second_val.y = second_val.x;\n";
code += " second_val.z = second_val.x;\n";
code += " second_val.w = second_val.x;\n";
}
code += " value_0 += second_val;\n";
*generated_code = {
{},
{{"hwc_buffer",
MakeReadonlyObject(
uint3(hwc_tensor->shape.w, hwc_tensor->shape.h,
DivideRoundUp(hwc_tensor->shape.c, 4)),
ConvertToPHWC4(
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param)))}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (!adds && !scalar) {
if (ctx.input_shapes.size() == 2 &&
ctx.input_shapes[0] != ctx.input_shapes[1] &&
ctx.input_shapes[1][1] == 1 && ctx.input_shapes[1][2] == 1 &&
ctx.input_shapes[0][3] == ctx.input_shapes[1][3]) {
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
"value_0 = $input_data_0[gid.x, gid.y, gid.z]$ + "
" $input_data_1[0, 0, gid.z]$;",
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
std::string code = "value_0 = value_0";
for (int index = 1; index < ctx.input_shapes.size(); ++index) {
if (ctx.input_shapes[index] != ctx.input_shapes[0]) {
return absl::InvalidArgumentError("Shapes are not equal");
}
absl::StrAppend(&code, " + value_", index);
}
absl::StrAppend(&code, ";");
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (scalar) {
*generated_code = {
{{"scalar", *scalar}},
{},
{},
uint3(),
uint3(),
"value_0 += $scalar$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
*generated_code = {
{},
{{"add_buffer", MakeReadonlyObject(adds->data)}},
{},
uint3(ctx.input_shapes[0][2], ctx.input_shapes[0][1],
DivideRoundUp(ctx.input_shapes[0][3], 4)),
uint3(),
"value_0 += $add_buffer[gid.z]$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewAddNodeShader() {
return std::make_unique<Add>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/add.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(AddTest, TwoInputTensorsOfTheSameShape) {
TensorRef<BHWC> augend, addend, output;
augend.type = DataType::FLOAT32;
augend.ref = 0;
augend.shape = BHWC(1, 2, 2, 1);
addend.type = DataType::FLOAT32;
addend.ref = 1;
addend.shape = BHWC(1, 2, 2, 1);
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 1);
ElementwiseAttributes attr;
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)},
{augend, addend}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8}));
ASSERT_TRUE(model.PopulateTensor(1, {0.1, 0.2, 0.3, 0.5}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.9, 0.4, 1.0, 1.3}));
}
TEST(AddTest, InputTensorAndScalar) {
ElementwiseAttributes attr;
attr.param = 0.1f;
TensorRef<BHWC> input, output;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 1, 2);
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 1, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.9, 0.3, 0.8, 0.9, 1.2, 2.1}));
}
TEST(AddTest, InputTensorWithConstantBroadcast) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> tensor;
tensor.shape.v = 2;
tensor.id = 1;
tensor.data.push_back(10.0);
tensor.data.push_back(20.0);
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{11.0, 22.0, 13.0, 24.0, 15.0, 26.0, 17.0, 28.0}));
}
TEST(AddTest, InputTensorWithRuntimeBroadcast) {
TensorRef<BHWC> input1;
input1.type = DataType::FLOAT32;
input1.ref = 0;
input1.shape = BHWC(1, 2, 2, 2);
TensorRef<BHWC> input2;
input2.type = DataType::FLOAT32;
input2.ref = 1;
input2.shape = BHWC(1, 1, 1, 2);
ElementwiseAttributes attr;
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)},
{input1, input2}, {output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_TRUE(model.PopulateTensor(1, {10.0, 20.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{11.0, 22.0, 13.0, 24.0, 15.0, 26.0, 17.0, 28.0}));
}
TEST(AddTest, InputTensorWithConstantHWC) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor;
tensor.shape = HWC(2, 2, 2);
tensor.id = 1;
tensor.data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0};
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0}));
}
TEST(AddTest, InputTensorWithConstantHWCBroadcastChannels) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor;
tensor.shape = HWC(2, 2, 1);
tensor.id = 1;
tensor.data = {1.0, 2.0, 3.0, 4.0};
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2.0, 3.0, 5.0, 6.0, 8.0, 9.0, 11.0, 12.0}));
}
TEST(AddTest, InputTensorWithConstantHWCBroadcastWidth) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor;
tensor.shape = HWC(2, 1, 2);
tensor.id = 1;
tensor.data = {1.0, 2.0, 3.0, 4.0};
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2.0, 4.0, 4.0, 6.0, 8.0, 10.0, 10.0, 12.0}));
}
}
}
}
} |
903 | cpp | tensorflow/tensorflow | tile | tensorflow/lite/kernels/tile.cc | tensorflow/lite/kernels/tile_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_TILE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_TILE_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewTileNodeShader();
}
}
}
#endif
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertTile : public OpConverterBase<ConvertTile> {
public:
explicit ConvertTile(const OpConverterParams *params)
: OpConverterBase<ConvertTile>(
params,
{DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}) {}
static constexpr std::array<InputArgSpec, 2> InputSpec() {
return std::array<InputArgSpec, 2>{
InputArgSpec::Create("input_tensor", TrtInputArg::kBoth),
InputArgSpec::Create("weight", TrtInputArg::kBoth)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &repl = inputs.at(1);
if (params.use_implicit_batch && repl.is_tensor()) {
return errors::InvalidArgument(
"Conversion for Tile is not implemented for multipliers "
"passed as a tensor in implicit batch mode.");
}
nvinfer1::DataType dtype;
const int *multiplies;
if (repl.is_weights()) {
TFTRT_CHECK_SHAPE_TENSOR(repl.weights().GetTensor());
dtype = repl.weights().TrtDType();
multiplies = repl.weights().GetPointer<int>();
} else {
dtype = repl.tensor()->getType();
multiplies = nullptr;
}
const auto &node = params.node_def;
TF_RETURN_IF_ERROR(check_type(dtype, nvinfer1::DataType::kINT32, node, 1));
const auto dims = inputs.at(0).GetTrtDims();
const auto nb_dims =
dims.nbDims +
(params.use_implicit_batch && inputs.at(0).is_tensor() ? 1 : 0);
if (multiplies) {
const int mult_numb = repl.weights().count();
if (mult_numb != nb_dims) {
return errors::InvalidArgument(
"The length of the replication vector (", mult_numb,
") of the Tile operation in '", node.name(),
"' is expected to be equal to the rank of the input vector (",
nb_dims, ").");
}
if (std::any_of(multiplies, multiplies + nb_dims,
[](int i) { return i <= 0; })) {
const auto &mul = absl::StrJoin(multiplies, multiplies + nb_dims, ", ");
return errors::InvalidArgument(
"All replications of the Tile operation in '", node.name(),
"' should be positive, got (", mul, ").");
}
if (params.use_implicit_batch && multiplies[0] > 1) {
return errors::Unimplemented(
"The Tile operation along the batch dimension in '", node.name(),
"' is not implemented.");
}
} else {
const auto &repl_dims = repl.GetTrtDims();
if (repl_dims.nbDims != 1) {
return errors::InvalidArgument(
"When replications are defined as a tensor, that tensor must be "
"1-dimensional. Got ",
repl_dims.nbDims, "-dimensional tensor.");
}
if (repl_dims.d[0] >= 0 && repl_dims.d[0] != nb_dims) {
return errors::InvalidArgument(
"When replications are defined as a tensor, "
"the number of its elements (",
repl_dims.d[0], ") must be equal to the rank of the input tensor (",
nb_dims, ").");
}
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
auto *converter = params.converter;
auto *network = converter->network();
const auto &tensor = inputs.at(0);
const auto &replics = inputs.at(1);
const auto dims = tensor.GetTrtDims();
const auto nb_dims = dims.nbDims;
nvinfer1::Dims output_size{nb_dims, {1}};
bool dynamic_flag = replics.is_tensor() || !HasStaticShape(dims);
if (!dynamic_flag) {
const auto dim_offset =
params.use_implicit_batch && tensor.is_tensor() ? 1 : 0;
const auto *input_size = dims.d;
const int *pReplics = replics.weights().GetPointer<int>() + dim_offset;
for (int i = 0; i < nb_dims; i++)
output_size.d[i] = pReplics[i] * input_size[i];
}
StatusOr<TRTNetworkBuilder> builder;
if (tensor.is_weights() || (dynamic_flag && replics.is_weights())) {
builder =
TRTNetworkBuilder::Create(converter->network(), params.weight_store);
TRT_ENSURE_OK(builder);
}
ITensorProxyPtr input_tensor;
if (tensor.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(tensor.weights().GetTrtWeights(), dims);
TRT_ENSURE_PTR_OK(weights_const);
input_tensor = (*weights_const)->getOutput(0);
} else {
input_tensor = tensor.tensor();
}
auto &input_trt_tensor = *input_tensor->trt_tensor();
nvinfer1::ITensor *target_shape = nullptr;
if (dynamic_flag) {
nvinfer1::ITensor *mult;
if (replics.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(replics.weights().GetTrtWeights(),
replics.GetTrtDims());
TRT_ENSURE_PTR_OK(weights_const);
mult = (*weights_const)->getOutput(0);
} else {
const ITensorProxyPtr multiplies = replics.tensor()->trt_tensor();
mult = multiplies->trt_tensor();
}
nvinfer1::ITensor *shape =
network->addShape(input_trt_tensor)->getOutput(0);
target_shape = network
->addElementWise(*shape, *mult,
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
}
nvinfer1::Dims start{nb_dims, {}};
DimsAdapter stride(std::vector<int>(nb_dims, 1));
auto layer = network->addSlice(input_trt_tensor, start, output_size,
stride.AsTrtDims());
layer->setMode(nvinfer1::SliceMode::kWRAP);
if (target_shape) layer->setInput(2, *target_shape);
converter->SetLayerName(layer, params.node_def.name(), "to_tile");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (tensor.is_weights() && params.use_implicit_batch) {
DimsAdapter adap(output_tensor->getDimensions());
TF_RETURN_IF_ERROR(adap.RemoveBatchDimension());
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params.converter, TRT_TensorOrWeights(output_tensor),
adap.AsTrtDims(), false, &output_tensor, params.node_def));
}
AddOutput(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertTile>(), "Tile");
}
}
}
#endif | #include "tensorflow/lite/delegates/gpu/gl/kernels/tile.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(TileTest, ChannelsTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 1, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 1, 6), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f,
4.0f, 5.0f, 6.0f, 4.0f, 5.0f, 6.0f}));
}
TEST(TileTest, WidthTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 1, 2, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 1, 4, 3), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
}
TEST(TileTest, HeightTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 1, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 4, 1, 3), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
}
TEST(TileTest, HWCTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 2, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 4, 4, 6), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f,
8.0f, 9.0f, 10.0f, 11.0f, 12.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(
FloatNear(1e-6),
{1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 4.0f,
5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 10.0f, 11.0f, 12.0f, 7.0f, 8.0f, 9.0f, 7.0f,
8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 10.0f, 11.0f, 12.0f, 1.0f, 2.0f,
3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 4.0f, 5.0f, 6.0f,
1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 4.0f,
5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f,
12.0f, 10.0f, 11.0f, 12.0f, 7.0f, 8.0f, 9.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 10.0f, 11.0f, 12.0f}));
}
}
}
}
} |
904 | cpp | tensorflow/tensorflow | mfcc | tensorflow/lite/kernels/internal/mfcc.cc | tensorflow/lite/kernels/mfcc_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MFCC_H_
#define TENSORFLOW_CORE_KERNELS_MFCC_H_
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/mfcc_dct.h"
#include "tensorflow/core/kernels/mfcc_mel_filterbank.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
class Mfcc {
public:
Mfcc();
bool Initialize(int input_length, double input_sample_rate);
void Compute(const std::vector<double>& spectrogram_frame,
std::vector<double>* output) const;
void set_upper_frequency_limit(double upper_frequency_limit) {
CHECK(!initialized_) << "Set frequency limits before calling Initialize.";
upper_frequency_limit_ = upper_frequency_limit;
}
void set_lower_frequency_limit(double lower_frequency_limit) {
CHECK(!initialized_) << "Set frequency limits before calling Initialize.";
lower_frequency_limit_ = lower_frequency_limit;
}
void set_filterbank_channel_count(int filterbank_channel_count) {
CHECK(!initialized_) << "Set channel count before calling Initialize.";
filterbank_channel_count_ = filterbank_channel_count;
}
void set_dct_coefficient_count(int dct_coefficient_count) {
CHECK(!initialized_) << "Set coefficient count before calling Initialize.";
dct_coefficient_count_ = dct_coefficient_count;
}
private:
MfccMelFilterbank mel_filterbank_;
MfccDct dct_;
bool initialized_;
double lower_frequency_limit_;
double upper_frequency_limit_;
int filterbank_channel_count_;
int dct_coefficient_count_;
Mfcc(const Mfcc&) = delete;
void operator=(const Mfcc&) = delete;
};
}
#endif
#include <math.h>
#include "tensorflow/core/kernels/mfcc.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
const double kDefaultUpperFrequencyLimit = 4000;
const double kDefaultLowerFrequencyLimit = 20;
const double kFilterbankFloor = 1e-12;
const int kDefaultFilterbankChannelCount = 40;
const int kDefaultDCTCoefficientCount = 13;
Mfcc::Mfcc()
: initialized_(false),
lower_frequency_limit_(kDefaultLowerFrequencyLimit),
upper_frequency_limit_(kDefaultUpperFrequencyLimit),
filterbank_channel_count_(kDefaultFilterbankChannelCount),
dct_coefficient_count_(kDefaultDCTCoefficientCount) {}
bool Mfcc::Initialize(int input_length, double input_sample_rate) {
bool initialized = mel_filterbank_.Initialize(
input_length, input_sample_rate, filterbank_channel_count_,
lower_frequency_limit_, upper_frequency_limit_);
if (initialized) {
initialized =
dct_.Initialize(filterbank_channel_count_, dct_coefficient_count_);
}
initialized_ = initialized;
return initialized;
}
void Mfcc::Compute(const std::vector<double>& spectrogram_frame,
std::vector<double>* output) const {
if (!initialized_) {
LOG(ERROR) << "Mfcc not initialized.";
return;
}
std::vector<double> working;
mel_filterbank_.Compute(spectrogram_frame, &working);
for (int i = 0; i < working.size(); ++i) {
double val = working[i];
if (val < kFilterbankFloor) {
val = kFilterbankFloor;
}
working[i] = log(val);
}
dct_.Compute(working, output);
}
} | #include "tensorflow/core/kernels/mfcc.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
TEST(MfccTest, AgreesWithPythonGoldenValues) {
Mfcc mfcc;
std::vector<double> input;
const int kSampleCount = 513;
input.reserve(kSampleCount);
for (int i = 0; i < kSampleCount; ++i) {
input.push_back(i + 1);
}
ASSERT_TRUE(mfcc.Initialize(input.size(), 22050 ));
std::vector<double> output;
mfcc.Compute(input, &output);
std::vector<double> expected = {
29.13970072, -6.41568601, -0.61903012, -0.96778652, -0.26819878,
-0.40907028, -0.15614748, -0.23203119, -0.10481487, -0.1543029,
-0.0769791, -0.10806114, -0.06047613};
ASSERT_EQ(expected.size(), output.size());
for (int i = 0; i < output.size(); ++i) {
EXPECT_NEAR(output[i], expected[i], 1e-04);
}
}
TEST(MfccTest, AvoidsNansWithZeroInput) {
Mfcc mfcc;
std::vector<double> input;
const int kSampleCount = 513;
input.reserve(kSampleCount);
for (int i = 0; i < kSampleCount; ++i) {
input.push_back(0.0);
}
ASSERT_TRUE(mfcc.Initialize(input.size(), 22050 ));
std::vector<double> output;
mfcc.Compute(input, &output);
int expected_size = 13;
ASSERT_EQ(expected_size, output.size());
for (const double value : output) {
EXPECT_FALSE(std::isnan(value));
}
}
TEST(MfccTest, SimpleInputSaneResult) {
Mfcc mfcc;
mfcc.set_lower_frequency_limit(125.0);
mfcc.set_upper_frequency_limit(3800.0);
mfcc.set_filterbank_channel_count(40);
mfcc.set_dct_coefficient_count(40);
const int kSpectrogramSize = 129;
std::vector<double> input(kSpectrogramSize, 0.0);
const int kHotBin = 10;
input[kHotBin] = 1.0;
ASSERT_TRUE(mfcc.Initialize(input.size(), 8000));
std::vector<double> output;
mfcc.Compute(input, &output);
EXPECT_EQ(output.begin() + 1, std::max_element(output.begin(), output.end()));
}
} |
905 | cpp | tensorflow/tensorflow | broadcast_to | tensorflow/lite/kernels/broadcast_to.cc | tensorflow/lite/kernels/broadcast_to_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace reference_ops {
template <int N>
void BroadcastImpl(const NdArrayDesc<N>& input_desc, const char* input_data,
const NdArrayDesc<N>& output_desc, char* output_data,
int indexes[N], int dim, const int last_broadcasting_dim,
const int type_size) {
if (dim == last_broadcasting_dim) {
int copy_size = output_desc.strides[dim] * type_size;
const char* data_src =
input_data + SubscriptToIndex(input_desc, indexes) * type_size;
char* data_dst =
output_data + SubscriptToIndex(output_desc, indexes) * type_size;
for (int i = 0; i < output_desc.extents[dim]; ++i, data_dst += copy_size) {
memcpy(data_dst, data_src, copy_size);
}
return;
}
for (indexes[dim] = 0; indexes[dim] < input_desc.extents[dim];
++indexes[dim]) {
BroadcastImpl<N>(input_desc, input_data, output_desc, output_data, indexes,
dim + 1, last_broadcasting_dim, type_size);
}
indexes[dim] = 0;
if (input_desc.extents[dim] != output_desc.extents[dim]) {
int copy_size = output_desc.strides[dim] * type_size;
char* data_src =
output_data + SubscriptToIndex(output_desc, indexes) * type_size;
char* data_dst = data_src + copy_size;
for (int i = 1; i < output_desc.extents[dim]; ++i, data_dst += copy_size) {
memcpy(data_dst, data_src, copy_size);
}
}
}
template <int N>
inline void BroadcastTo(const RuntimeShape& unextended_input_shape,
const char* input_data,
const RuntimeShape& unextended_output_shape,
char* output_data, TfLiteType data_type) {
NdArrayDesc<N> input_desc;
NdArrayDesc<N> output_desc;
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_input_shape),
&input_desc);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
int last_broadcast_dim = -1;
for (int i = N - 1; i >= 0; --i) {
if (input_desc.extents[i] != output_desc.extents[i]) {
last_broadcast_dim = i;
break;
}
}
if (last_broadcast_dim == -1) {
memcpy(output_data, input_data,
unextended_input_shape.FlatSize() * TfLiteTypeGetSize(data_type));
return;
}
int indexes[N] = {0};
BroadcastImpl<N>(input_desc, input_data, output_desc, output_data, indexes, 0,
last_broadcast_dim, TfLiteTypeGetSize(data_type));
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/broadcast_to.h"
#include <string.h>
#include <cstdint>
#include <memory>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace broadcastto {
constexpr int kInputTensor = 0;
constexpr int kShapeTensor = 1;
constexpr int kOutputTensor = 0;
constexpr int kMaxDims = 8;
struct BroadcastToContext {
BroadcastToContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, kInputTensor);
shape = GetInput(context, node, kShapeTensor);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* input;
const TfLiteTensor* shape;
TfLiteTensor* output;
};
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
BroadcastToContext* op_context) {
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->shape), 1);
int input_num_dims = NumDimensions(op_context->input);
int output_num_dims = SizeOfDimension(op_context->shape, 0);
TF_LITE_ENSURE_MSG(context, input_num_dims <= output_num_dims,
"Output shape must be broadcastable from input shape.");
TF_LITE_ENSURE_MSG(context, output_num_dims <= kMaxDims,
"BroadcastTo only supports 1-8D tensor.");
auto get_shape_data = [op_context](int i) -> int32_t {
if (op_context->shape->type == kTfLiteInt32) {
return GetTensorData<int32_t>(op_context->shape)[i];
} else {
return GetTensorData<int64_t>(op_context->shape)[i];
}
};
int extending_dims = output_num_dims - input_num_dims;
for (int idx = 0; idx < input_num_dims; ++idx) {
TF_LITE_ENSURE_MSG(context,
(SizeOfDimension(op_context->input, idx) == 1 ||
SizeOfDimension(op_context->input, idx) ==
get_shape_data(extending_dims + idx)),
"Output shape must be broadcastable from input shape.");
}
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_num_dims);
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)>
scoped_output_shape(output_shape, TfLiteIntArrayFree);
for (int idx = 0; idx < output_num_dims; ++idx) {
output_shape->data[idx] = get_shape_data(idx);
}
return context->ResizeTensor(context, op_context->output,
scoped_output_shape.release());
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_MSG(context,
(NumDimensions(GetInput(context, node, 0)) <= kMaxDims),
"BroadcastTo only supports 1-8D tensor.");
BroadcastToContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.shape->type == kTfLiteInt32 ||
op_context.shape->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
TF_LITE_ENSURE(context, op_context.input->type != kTfLiteString);
if (IsConstantOrPersistentTensor(op_context.shape)) {
return ResizeOutputTensor(context, &op_context);
}
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BroadcastToContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
reference_ops::BroadcastTo<kMaxDims>(
GetTensorShape(op_context.input), op_context.input->data.raw,
GetTensorShape(op_context.output), op_context.output->data.raw,
op_context.input->type);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_TO() {
static TfLiteRegistration r = {nullptr, nullptr, broadcastto::Prepare,
broadcastto::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class InputType, class ShapeType = int32_t>
class BroadcastToOpModel : public SingleOpModel {
public:
BroadcastToOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> shape_shape) {
input_ = AddInput({GetTensorType<InputType>(), input_shape});
shape_ = AddInput({GetTensorType<ShapeType>(), shape_shape});
output_ = AddOutput(GetTensorType<InputType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_TO,
BuiltinOptions_BroadcastToOptions,
CreateBroadcastToOptions(builder_).Union());
BuildInterpreter({input_shape, shape_shape});
}
BroadcastToOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> shape_shape,
std::initializer_list<ShapeType> shape_values) {
input_ = AddInput({GetTensorType<InputType>(), input_shape});
shape_ =
AddConstInput(GetTensorType<ShapeType>(), shape_values, shape_shape);
output_ = AddOutput(GetTensorType<InputType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_TO,
BuiltinOptions_BroadcastToOptions,
CreateBroadcastToOptions(builder_).Union());
BuildInterpreter({input_shape, shape_shape});
}
void SetInput(std::initializer_list<InputType> data) {
PopulateTensor(input_, data);
}
void SetShape(std::initializer_list<ShapeType> data) {
PopulateTensor(shape_, data);
}
std::vector<InputType> GetOutput() {
return ExtractVector<InputType>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int shape_;
int output_;
};
template <typename T>
class BroadcastToOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, uint8_t, int8_t, int16_t, int32_t>;
TYPED_TEST_SUITE(BroadcastToOpTest, DataTypes);
#if GTEST_HAS_DEATH_TEST
TYPED_TEST(BroadcastToOpTest, ShapeMustBe1D) {
EXPECT_DEATH(
BroadcastToOpModel<TypeParam>({2, 3, 4, 4}, {2, 2}, {2, 3, 4, 4}), "");
BroadcastToOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2});
m.SetShape({2, 3, 4, 4});
EXPECT_THAT(m.Invoke(), kTfLiteError);
}
TYPED_TEST(BroadcastToOpTest, TooManyDimensions) {
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9},
{2, 2, 3, 4, 5, 6, 7, 8, 9}),
"BroadcastTo only supports 1-8D tensor.");
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9}),
"BroadcastTo only supports 1-8D tensor.");
}
TYPED_TEST(BroadcastToOpTest, MismatchDimension) {
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({2, 4, 1, 2}, {4}, {2, 4, 1, 3}),
"Output shape must be broadcastable from input shape.");
EXPECT_DEATH(
BroadcastToOpModel<TypeParam>({2, 4, 1, 2, 3}, {4}, {2, 4, 1, 2}),
"Output shape must be broadcastable from input shape.");
BroadcastToOpModel<TypeParam> m1({2, 4, 1, 2}, {4});
m1.SetShape({2, 3, 4, 4});
EXPECT_THAT(m1.Invoke(), kTfLiteError);
BroadcastToOpModel<TypeParam> m2({2, 4, 1, 2}, {5});
m2.SetShape({1, 2, 3, 4, 4});
EXPECT_THAT(m2.Invoke(), kTfLiteError);
}
#endif
TYPED_TEST(BroadcastToOpTest, BroadcastTo1DConstTest) {
BroadcastToOpModel<TypeParam> m({1}, {1}, {4});
m.SetInput({3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 3, 3}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo4DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 2}, {4}, {1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 4, 3, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo8DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 1, 1, 1, 2, 1}, {8},
{1, 1, 1, 1, 1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo1DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1}, {1});
m.SetInput({3});
m.SetShape({4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 3, 3}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo4DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 2}, {4});
m.SetInput({3, 4});
m.SetShape({1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 4, 3, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo8DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 1, 1, 1, 2, 1}, {8});
m.SetInput({3, 4});
m.SetShape({1, 1, 1, 1, 1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast4DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2}, {4}, {3, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast4DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2}, {4});
m.SetInput({1, 2, 3, 4, 5, 6});
m.SetShape({3, 3, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast6DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 2, 1, 3, 1, 2}, {6}, {2, 2, 1, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 1, 3, 2, 2}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12,
1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast6DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 2, 1, 3, 1, 2}, {6});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
m.SetShape({2, 2, 1, 3, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 1, 3, 2, 2}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12,
1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast8DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2, 1, 4, 1, 1}, {8},
{2, 3, 1, 2, 2, 4, 1, 1});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 1, 2, 2, 4, 1, 1}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6,
7, 8, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16,
13, 14, 15, 16, 17, 18, 19, 20, 17, 18, 19, 20, 21, 22,
23, 24, 21, 22, 23, 24, 1, 2, 3, 4, 1, 2, 3, 4,
5, 6, 7, 8, 5, 6, 7, 8, 9, 10, 11, 12, 9, 10,
11, 12, 13, 14, 15, 16, 13, 14, 15, 16, 17, 18, 19, 20,
17, 18, 19, 20, 21, 22, 23, 24, 21, 22, 23, 24}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast8DDynamicTest) {
BroadcastToOpModel<TypeParam> m({2, 1, 1, 2, 1, 4, 1, 1}, {8});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetShape({2, 3, 2, 2, 2, 4, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 2, 2, 2, 4, 1, 1}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16}));
}
TYPED_TEST(BroadcastToOpTest, ExtendingShape4DConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {4}, {3, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, NoBroadcastingConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {3}, {3, 1, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, NoBroadcasting8DConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 1, 1, 1, 1, 1, 2}, {8},
{3, 1, 1, 1, 1, 1, 1, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 1, 1, 1, 1, 1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, Int64ShapeConstTest) {
BroadcastToOpModel<TypeParam, int64_t> m({1, 1, 1, 1, 1, 1, 2, 1}, {8},
{1, 1, 1, 1, 1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, Int64ShapeDDynamicTest) {
BroadcastToOpModel<TypeParam, int64_t> m({1, 1, 1, 1, 1, 1, 2, 1}, {8});
m.SetInput({3, 4});
m.SetShape({1, 1, 1, 1, 1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastToEmtpyShapeTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {3}, {3, 0, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 0, 2}));
}
}
} |
906 | cpp | tensorflow/tensorflow | maximum_minimum | tensorflow/lite/kernels/maximum_minimum.cc | tensorflow/lite/kernels/maximum_minimum_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T, typename Op, int N = 5>
void MaximumMinimumBroadcastSlow(const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data, Op op) {
if (unextended_input1_shape == unextended_input2_shape) {
const int flat_size =
MatchingElementsSize(unextended_input1_shape, unextended_input2_shape,
unextended_output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = op(input1_data[i], input2_data[i]);
}
} else {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(
unextended_input1_shape, unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
auto maxmin_func = [&](int indexes[N]) {
output_data[SubscriptToIndex(output_desc, indexes)] =
op(input1_data[SubscriptToIndex(desc1, indexes)],
input2_data[SubscriptToIndex(desc2, indexes)]);
};
NDOpsHelper<N>(output_desc, maxmin_func);
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/maximum_minimum.h"
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#ifdef TFLITE_KERNEL_USE_XNNPACK
#include <algorithm>
#include <array>
#include <limits>
#include "xnnpack.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/minimal_logging.h"
#endif
namespace tflite {
namespace ops {
namespace builtin {
namespace maximum_minimum {
enum KernelType {
kReference,
kGenericOptimized,
};
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input1 = GetInput(context, node, kInputTensor1);
input2 = GetInput(context, node, kInputTensor2);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* input1;
const TfLiteTensor* input2;
TfLiteTensor* output;
};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE_TYPES_EQ(context, op_context.input1->type,
op_context.input2->type);
op_context.output->type = op_context.input1->type;
bool requires_broadcast =
!HaveSameShapes(op_context.input1, op_context.input2);
TfLiteIntArray* output_size = nullptr;
if (requires_broadcast) {
TF_LITE_ENSURE_OK(
context, CalculateShapeForBroadcast(context, op_context.input1,
op_context.input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(op_context.input1->dims);
}
return context->ResizeTensor(context, op_context.output, output_size);
}
struct MaximumOp {
template <typename data_type>
static data_type op(data_type el1, data_type el2) {
return el1 > el2 ? el1 : el2;
}
};
struct MinimumOp {
template <typename data_type>
static data_type op(data_type el1, data_type el2) {
return el1 < el2 ? el1 : el2;
}
};
template <KernelType kernel_type, typename data_type, typename op_type>
void TFLiteOperation(TfLiteContext* context, TfLiteNode* node,
const OpContext& op_context) {
reference_ops::MaximumMinimumBroadcastSlow(
GetTensorShape(op_context.input1),
GetTensorData<data_type>(op_context.input1),
GetTensorShape(op_context.input2),
GetTensorData<data_type>(op_context.input2),
GetTensorShape(op_context.output),
GetTensorData<data_type>(op_context.output),
op_type::template op<data_type>);
}
template <>
void TFLiteOperation<maximum_minimum::kGenericOptimized, int8, MaximumOp>(
TfLiteContext* context, TfLiteNode* node, const OpContext& op_context) {
tflite::ArithmeticParams op_params;
const bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(op_context.input1), GetTensorShape(op_context.input2),
&op_params);
if (need_broadcast) {
optimized_ops::BroadcastMaximumDispatch(
op_params, GetTensorShape(op_context.input1),
GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2),
GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output),
GetTensorData<int8>(op_context.output), MaximumOp::template op<int8>);
return;
}
reference_ops::MaximumMinimumBroadcastSlow(
GetTensorShape(op_context.input1), GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2), GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output), GetTensorData<int8>(op_context.output),
MaximumOp::template op<int8>);
}
template <>
void TFLiteOperation<maximum_minimum::kGenericOptimized, int8, MinimumOp>(
TfLiteContext* context, TfLiteNode* node, const OpContext& op_context) {
tflite::ArithmeticParams op_params;
const bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(op_context.input1), GetTensorShape(op_context.input2),
&op_params);
if (need_broadcast) {
optimized_ops::BroadcastMinimumDispatch(
op_params, GetTensorShape(op_context.input1),
GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2),
GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output),
GetTensorData<int8>(op_context.output), MinimumOp::template op<int8>);
return;
}
reference_ops::MaximumMinimumBroadcastSlow(
GetTensorShape(op_context.input1), GetTensorData<int8>(op_context.input1),
GetTensorShape(op_context.input2), GetTensorData<int8>(op_context.input2),
GetTensorShape(op_context.output), GetTensorData<int8>(op_context.output),
MinimumOp::template op<int8>);
}
template <KernelType kernel_type, typename OpType>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
if (NumElements(op_context.input1) == 0 ||
NumElements(op_context.input2) == 0) {
return kTfLiteOk;
}
switch (op_context.output->type) {
case kTfLiteFloat32: {
#ifdef TFLITE_KERNEL_USE_XNNPACK
size_t num_input1_dims = static_cast<size_t>(
GetTensorShape(op_context.input1).DimensionsCount());
size_t num_input2_dims = static_cast<size_t>(
GetTensorShape(op_context.input2).DimensionsCount());
if (std::max(num_input1_dims, num_input2_dims) < XNN_MAX_TENSOR_DIMS) {
std::array<size_t, XNN_MAX_TENSOR_DIMS> input1_shape;
std::array<size_t, XNN_MAX_TENSOR_DIMS> input2_shape;
for (size_t i = 0; i < num_input1_dims; ++i) {
input1_shape[i] = GetTensorShape(op_context.input1).Dims(i);
}
for (size_t i = 0; i < num_input2_dims; ++i) {
input2_shape[i] = GetTensorShape(op_context.input2).Dims(i);
}
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
pthreadpool_t threadpool =
cpu_backend_context->get_xnnpack_threadpool();
enum xnn_status status = xnn_status_invalid_parameter;
if (std::is_same<OpType, MaximumOp>::value) {
status = xnn_run_maximum_nd_f32(
num_input1_dims, input1_shape.data(), num_input2_dims,
input2_shape.data(), GetTensorData<float>(op_context.input1),
GetTensorData<float>(op_context.input2),
GetTensorData<float>(op_context.output),
XNN_FLAG_YIELD_WORKERS, threadpool);
if (status != xnn_status_success) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Failed to run xnn_run_maximum_nd_f32. Error code: %d",
status);
TFLiteOperation<kernel_type, float, OpType>(context, node,
op_context);
}
} else if (std::is_same<OpType, MinimumOp>::value) {
status = xnn_run_minimum_nd_f32(
num_input1_dims, input1_shape.data(), num_input2_dims,
input2_shape.data(), GetTensorData<float>(op_context.input1),
GetTensorData<float>(op_context.input2),
GetTensorData<float>(op_context.output),
XNN_FLAG_YIELD_WORKERS, threadpool);
if (status != xnn_status_success) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Failed to run xnn_run_minimum_nd_f32. Error code: %d",
status);
TFLiteOperation<kernel_type, float, OpType>(context, node,
op_context);
}
}
break;
}
#endif
TFLiteOperation<kernel_type, float, OpType>(context, node, op_context);
break;
}
case kTfLiteUInt8:
TFLiteOperation<kernel_type, uint8_t, OpType>(context, node, op_context);
break;
case kTfLiteInt8:
TFLiteOperation<kernel_type, int8_t, OpType>(context, node, op_context);
break;
case kTfLiteInt32:
TFLiteOperation<kernel_type, int32_t, OpType>(context, node, op_context);
break;
case kTfLiteInt64:
TFLiteOperation<kernel_type, int64_t, OpType>(context, node, op_context);
break;
case kTfLiteInt16:
TFLiteOperation<kernel_type, int16_t, OpType>(context, node, op_context);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by Maximum.",
op_context.output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MAXIMUM_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kReference,
maximum_minimum::MaximumOp>};
return &r;
}
TfLiteRegistration* Register_MAXIMUM_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kGenericOptimized,
maximum_minimum::MaximumOp>};
return &r;
}
TfLiteRegistration* Register_MINIMUM_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kReference,
maximum_minimum::MinimumOp>};
return &r;
}
TfLiteRegistration* Register_MINIMUM_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, maximum_minimum::Prepare,
maximum_minimum::Eval<maximum_minimum::kGenericOptimized,
maximum_minimum::MinimumOp>};
return &r;
}
TfLiteRegistration* Register_MAXIMUM() {
return Register_MAXIMUM_GENERIC_OPT();
}
TfLiteRegistration* Register_MINIMUM() {
return Register_MINIMUM_GENERIC_OPT();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class T>
class MaxMinOpModel : public SingleOpModel {
public:
MaxMinOpModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorType& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
MaxMinOpModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2,
std::initializer_list<T> input2_values,
const TensorType& output) {
input1_ = AddInput(input1);
input2_ = AddConstInput<T>(input2, input2_values);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(std::initializer_list<T> data) {
PopulateTensor(input1_, data);
}
void SetInput2(std::initializer_list<T> data) {
PopulateTensor(input2_, data);
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
template <typename data_type>
void TestModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values,
std::initializer_list<data_type> output_values,
int is_constant = false) {
std::unique_ptr<MaxMinOpModel<data_type>> m;
if (is_constant) {
m = std::make_unique<MaxMinOpModel<data_type>>(op, input1, input2,
input2_values, output.type);
} else {
m = std::make_unique<MaxMinOpModel<data_type>>(op, input1, input2,
output.type);
m->SetInput2(input2_values);
}
m->SetInput1(input1_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(output.shape));
EXPECT_THAT(m->GetOutput(), ElementsAreArray(output_values));
}
TEST(MaximumOpTest, FloatTest) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, 11.0, -2.0, -1.44};
std::initializer_list<float> data2 = {-1.0, 0.0, 1.0, 12.0, -3.0, -1.43};
TestModel<float>(BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}}, data1, data2,
{1.0, 0.0, 1.0, 12.0, -2.0, -1.43});
TestModel<float>(BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {3, 1, 2}}, data1, data2,
{-1.0, 0.0, -1.0, 11.0, -3.0, -1.44});
}
TEST(MaxMinOpTest, Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MAXIMUM, {TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}}, data1, data2,
{1, 0, 2, 12, 255, 23});
TestModel<uint8_t>(BuiltinOperator_MINIMUM, {TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}},
{TensorType_UINT8, {3, 1, 2}}, data1, data2,
{0, 0, 1, 11, 2, 1});
}
TEST(MaxMinOpTest, Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 123, 1};
TestModel<int8_t>(BuiltinOperator_MAXIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {3, 1, 2}}, {TensorType_INT8, {3, 1, 2}},
data1, data2, {1, 0, 2, 12, 123, 23});
TestModel<int8_t>(BuiltinOperator_MINIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {3, 1, 2}}, {TensorType_INT8, {3, 1, 2}},
data1, data2, {0, 0, 1, 11, 2, 1});
}
TEST(MaxMinOpTest, Int16Test) {
std::initializer_list<int16_t> data1 = {-32768, 0, 2, 11, 2, 23};
std::initializer_list<int16_t> data2 = {0, 0, 1, 32767, 123, 1};
TestModel<int16_t>(BuiltinOperator_MAXIMUM, {TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}}, data1, data2,
{0, 0, 2, 32767, 123, 23});
TestModel<int16_t>(BuiltinOperator_MINIMUM, {TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}},
{TensorType_INT16, {3, 1, 2}}, data1, data2,
{-32768, 0, 1, 11, 2, 1});
}
TEST(MaximumOpTest, FloatWithBroadcastTest) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, -2.0, -1.44, 11.0};
std::initializer_list<float> data2 = {0.5, 2.0};
TestModel<float>(BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {1.0, 2.0, 0.5, 2.0, 0.5, 11.0});
TestModel<float>(BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {0.5, 0.0, -1.0, -2.0, -1.44, 2.0});
}
TEST(MaximumOpTest, FloatWithBroadcastTest_ScalarY) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, -2.0, -1.44, 11.0};
std::initializer_list<float> data2 = {0.5};
TestModel<float>(BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {1.0, 0.5, 0.5, 0.5, 0.5, 11.0},
true);
TestModel<float>(BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 2}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {3, 1, 2}},
data1, data2, {0.5, 0.0, -1.0, -2.0, -1.44, 0.5},
true);
}
TEST(MaximumOpTest, Int32WithBroadcastTest) {
std::initializer_list<int32_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int32_t> data2 = {2};
TestModel<int32_t>(BuiltinOperator_MAXIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {2, 2, 2, 2, 3, 11});
TestModel<int32_t>(BuiltinOperator_MINIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {1, 0, -1, -2, 2, 2});
}
TEST(MaximumOpTest, Int32WithBroadcastTest_ScalarY) {
std::initializer_list<int32_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int32_t> data2 = {2};
TestModel<int32_t>(BuiltinOperator_MAXIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {2, 2, 2, 2, 3, 11}, true);
TestModel<int32_t>(BuiltinOperator_MINIMUM, {TensorType_INT32, {3, 1, 2}},
{TensorType_INT32, {}}, {TensorType_INT32, {3, 1, 2}},
data1, data2, {1, 0, -1, -2, 2, 2}, true);
}
TEST(MaximumOpTest, Int8WithBroadcastTest_ScalarY) {
std::initializer_list<int8_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int8_t> data2 = {2};
TestModel<int8_t>(BuiltinOperator_MAXIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {}}, {TensorType_INT8, {3, 1, 2}}, data1,
data2, {2, 2, 2, 2, 3, 11}, true);
TestModel<int8_t>(BuiltinOperator_MINIMUM, {TensorType_INT8, {3, 1, 2}},
{TensorType_INT8, {}}, {TensorType_INT8, {3, 1, 2}}, data1,
data2, {1, 0, -1, -2, 2, 2}, true);
}
TEST(MaxMinOpTest, Int8Test8D) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 123, 1};
TestModel<int8_t>(BuiltinOperator_MAXIMUM,
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}}, data1, data2,
{1, 0, 2, 12, 123, 23});
TestModel<int8_t>(BuiltinOperator_MINIMUM,
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}},
{TensorType_INT8, {3, 1, 2, 1, 1, 1, 1, 1}}, data1, data2,
{0, 0, 1, 11, 2, 1});
}
TEST(MaximumOpTest, FloatWithBroadcastTest5D) {
std::initializer_list<float> data1 = {1.0, 0.0, -1.0, -2.0, -1.44, 11.0};
std::initializer_list<float> data2 = {0.5, 2.0};
TestModel<float>(
BuiltinOperator_MAXIMUM, {TensorType_FLOAT32, {3, 1, 1, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 1, 1, 2}}, data1,
data2, {1.0, 2.0, 0.5, 2.0, 0.5, 11.0});
TestModel<float>(
BuiltinOperator_MINIMUM, {TensorType_FLOAT32, {3, 1, 1, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {3, 1, 1, 1, 2}}, data1,
data2, {0.5, 0.0, -1.0, -2.0, -1.44, 2.0});
}
TEST(MaximumOpTest, Int32WithBroadcastTest5D) {
std::initializer_list<int32_t> data1 = {1, 0, -1, -2, 3, 11};
std::initializer_list<int32_t> data2 = {2};
TestModel<int32_t>(
BuiltinOperator_MAXIMUM, {TensorType_INT32, {3, 1, 2, 1, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2, 1, 1}}, data1,
data2, {2, 2, 2, 2, 3, 11});
TestModel<int32_t>(
BuiltinOperator_MINIMUM, {TensorType_INT32, {3, 1, 2, 1, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {3, 1, 2, 1, 1}}, data1,
data2, {1, 0, -1, -2, 2, 2});
}
}
} |
907 | cpp | tensorflow/tensorflow | conv3d_transpose | tensorflow/lite/kernels/conv3d_transpose.cc | tensorflow/lite/kernels/conv3d_transpose_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV3D_TRANSPOSE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV3D_TRANSPOSE_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Conv3DTranspose(
const Conv3DTransposeParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int stride_depth = params.stride_depth;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int pad_depth = params.padding_values.depth;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_num_channels = MatchingDim(input_shape, 4, filter_shape, 4);
const int output_num_channels = output_shape.Dims(4);
const int input_depth = input_shape.Dims(1);
const int input_height = input_shape.Dims(2);
const int input_width = input_shape.Dims(3);
const int filter_depth = filter_shape.Dims(0);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_depth = output_shape.Dims(1);
const int output_height = output_shape.Dims(2);
const int output_width = output_shape.Dims(3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_num_channels);
}
const int num_elements = output_shape.FlatSize();
for (int i = 0; i < num_elements; i++) {
output_data[i] = 0.0f;
}
for (int batch = 0; batch < batches; ++batch) {
for (int in_d = 0; in_d < input_depth; ++in_d) {
for (int in_y = 0; in_y < input_height; ++in_y) {
for (int in_x = 0; in_x < input_width; ++in_x) {
for (int in_channel = 0; in_channel < input_num_channels;
++in_channel) {
const int out_x_origin = (in_x * stride_width) - pad_width;
const int out_y_origin = (in_y * stride_height) - pad_height;
const int out_d_origin = (in_d * stride_depth) - pad_depth;
for (int filter_d = 0; filter_d < filter_depth; ++filter_d) {
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
for (int out_channel = 0; out_channel < output_num_channels;
++out_channel) {
const int out_x =
out_x_origin + params.dilation_width * filter_x;
const int out_y =
out_y_origin + params.dilation_height * filter_y;
const int out_d =
out_d_origin + params.dilation_depth * filter_d;
if ((out_x >= 0) && (out_x < output_width) &&
(out_y >= 0) && (out_y < output_height) &&
(out_d >= 0) && (out_d < output_depth)) {
float input_value = input_data[Offset(
input_shape, batch, in_d, in_y, in_x, in_channel)];
float filter_value = filter_data[Offset(
filter_shape, filter_d, filter_y, filter_x,
out_channel, in_channel)];
output_data[Offset(output_shape, batch, out_d, out_y,
out_x, out_channel)] +=
input_value * filter_value;
}
}
}
}
}
}
}
}
}
}
const float float_activation_min = params.float_activation_min;
const float float_activation_max = params.float_activation_max;
float* data_ptr = output_data;
if (bias_data) {
const int outer_size =
batches * output_depth * output_height * output_width;
for (int n = 0; n < outer_size; ++n) {
for (int c = 0; c < output_num_channels; ++c) {
data_ptr[c] = ActivationFunctionWithMinMax(data_ptr[c] + bias_data[c],
float_activation_min,
float_activation_max);
}
data_ptr += output_num_channels;
}
} else {
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
data_ptr[i] = ActivationFunctionWithMinMax(
data_ptr[i], float_activation_min, float_activation_max);
}
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/conv3d_transpose.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace conv3d_transpose {
enum KernelType {
kReference,
kGenericOptimized,
};
const int kTensorNotAllocated = -1;
struct OpData {
Padding3DValues padding;
int col2im_id = kTensorNotAllocated;
int col2im_index;
bool need_col2im = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* opdata = new OpData;
return opdata;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context,
TfLiteNode* node,
KernelType kernel_type) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
int temporaries_count = 0;
if (kernel_type == kGenericOptimized) {
if (data->col2im_id == kTensorNotAllocated) {
context->AddTensors(context, 1, &data->col2im_id);
}
data->col2im_index = temporaries_count++;
data->need_col2im = true;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(temporaries_count);
return kTfLiteOk;
}
TfLiteStatus ResizeOutputAndTemporaryTensors(
TfLiteContext* context, OpData* opdata, TfLiteConv3DTransposeParams* params,
const TfLiteTensor* shape_tensor, const TfLiteTensor* filter,
const TfLiteTensor* input, TfLiteTensor* col2im, TfLiteTensor* output) {
auto shape_data = GetTensorData<int32_t>(shape_tensor);
TF_LITE_ENSURE_EQ(context, shape_data[0], SizeOfDimension(input, 0));
TF_LITE_ENSURE_EQ(context, shape_data[4] % SizeOfDimension(filter, 3), 0);
const RuntimeShape& filter_shape = GetTensorShape(filter);
const int depth = shape_data[1];
const int height = shape_data[2];
const int width = shape_data[3];
const int filter_depth = filter_shape.Dims(0);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
int unused_out_width, unused_out_height, unused_out_depth;
opdata->padding = ComputePadding3DValues(
params->stride_height, params->stride_width, params->stride_depth,
params->dilation_height_factor, params->dilation_width_factor,
params->dilation_depth_factor, height, width, depth, filter_height,
filter_width, filter_depth, params->padding, &unused_out_height,
&unused_out_width, &unused_out_depth);
TF_LITE_ENSURE_EQ(context, unused_out_depth, SizeOfDimension(input, 1));
TF_LITE_ENSURE_EQ(context, unused_out_height, SizeOfDimension(input, 2));
TF_LITE_ENSURE_EQ(context, unused_out_width, SizeOfDimension(input, 3));
TfLiteIntArray* output_shape =
TfLiteIntArrayCreate(NumElements(shape_tensor));
for (int i = 0; i < output_shape->size; ++i) {
output_shape->data[i] = GetTensorData<int32_t>(shape_tensor)[i];
}
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
if (opdata->need_col2im) {
TfLiteIntArray* col2im_shape_array = TfLiteIntArrayCreate(2);
const RuntimeShape& input_shape = GetTensorShape(input);
col2im_shape_array->data[0] =
input_shape.Dims(1) * input_shape.Dims(2) * input_shape.Dims(3);
col2im_shape_array->data[1] =
filter_depth * filter_height * filter_width * filter_shape.Dims(3);
col2im->type = kTfLiteFloat32;
col2im->allocation_type = kTfLiteDynamic;
return context->ResizeTensor(context, col2im, col2im_shape_array);
}
return kTfLiteOk;
}
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConv3DTransposeParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->inputs->size == 3 || node->inputs->size == 4);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &output_shape));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input));
TF_LITE_ENSURE_EQ(context, output_shape->dims->size, 1);
TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 5);
TF_LITE_ENSURE_EQ(context, input->dims->size, 5);
TF_LITE_ENSURE_EQ(context, filter->dims->size, 5);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input, 4),
SizeOfDimension(filter, 4));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
TF_LITE_ENSURE_TYPES_EQ(context, output_shape->type, kTfLiteInt32);
const TfLiteTensor* bias = GetInput(context, node, 3);
if (bias) {
TF_LITE_ENSURE_TYPES_EQ(context, bias->type, input->type);
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 3));
}
if (params->dilation_depth_factor > 1 || params->dilation_height_factor > 1 ||
params->dilation_width_factor > 1) {
kernel_type = kReference;
}
TF_LITE_ENSURE_STATUS(
AllocateTemporaryTensorsIfRequired(context, node, kernel_type));
TfLiteTensor* col2im = nullptr;
if (opdata->need_col2im) {
node->temporaries->data[opdata->col2im_index] = opdata->col2im_id;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node,
opdata->col2im_index, &col2im));
}
if (!IsConstantOrPersistentTensor(output_shape)) {
SetTensorToDynamic(output);
if (opdata->need_col2im) {
SetTensorToDynamic(col2im);
}
} else {
TF_LITE_ENSURE_STATUS(ResizeOutputAndTemporaryTensors(
context, opdata, params, output_shape, filter, input, col2im, output));
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return Prepare(kernel_type, context, node);
}
void EvalFloat(KernelType kernel_type, TfLiteContext* context, TfLiteNode* node,
TfLiteConv3DTransposeParams* params, OpData* opdata,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* col2im,
TfLiteTensor* output) {
float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
Conv3DTransposeParams runtime_params;
runtime_params.padding_values = opdata->padding;
runtime_params.stride_depth = params->stride_depth;
runtime_params.stride_height = params->stride_height;
runtime_params.stride_width = params->stride_width;
runtime_params.dilation_depth = params->dilation_depth_factor;
runtime_params.dilation_height = params->dilation_height_factor;
runtime_params.dilation_width = params->dilation_width_factor;
runtime_params.float_activation_min = output_activation_min;
runtime_params.float_activation_max = output_activation_max;
switch (kernel_type) {
case kReference: {
reference_ops::Conv3DTranspose(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output));
break;
}
case kGenericOptimized: {
optimized_ops::Conv3DTranspose(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
GetTensorShape(col2im), GetTensorData<float>(col2im),
CpuBackendContext::GetFromContext(context));
} break;
}
}
TfLiteStatus Eval(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConv3DTransposeParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &output_shape));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input));
const TfLiteTensor* bias = GetInput(context, node, 3);
TfLiteTensor* col2im = opdata->need_col2im
? GetTemporary(context, node, opdata->col2im_index)
: nullptr;
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputAndTemporaryTensors(
context, opdata, params, output_shape,
filter, input, col2im, output));
}
if (params->dilation_depth_factor > 1 || params->dilation_height_factor > 1 ||
params->dilation_width_factor > 1) {
kernel_type = kReference;
}
switch (input->type) {
case kTfLiteFloat32:
EvalFloat(kernel_type, context, node, params, opdata, input, filter, bias,
col2im, output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return Eval(kernel_type, context, node);
}
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE_REF() {
static TfLiteRegistration r = {
conv3d_transpose::Init, conv3d_transpose::Free,
conv3d_transpose::Prepare<conv3d_transpose::kReference>,
conv3d_transpose::Eval<conv3d_transpose::kReference>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE_GENERIC_OPT() {
static TfLiteRegistration r = {
conv3d_transpose::Init, conv3d_transpose::Free,
conv3d_transpose::Prepare<conv3d_transpose::kGenericOptimized>,
conv3d_transpose::Eval<conv3d_transpose::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE() {
return Register_CONV_3D_TRANSPOSE_GENERIC_OPT();
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
class Conv3dTransposeOpModel : public SingleOpModel {
public:
Conv3dTransposeOpModel(
std::initializer_list<int> output_shape_data, const TensorData& filter,
const TensorData& input, const TensorData& bias, const TensorData& output,
TestType test_type, Padding padding = Padding_VALID,
int32_t stride_depth = 1, int32_t stride_width = 1,
int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
if (test_type == TestType::kDynamic) {
output_shape_ = AddInput({TensorType_INT32, {5}});
} else {
output_shape_ = AddConstInput(TensorType_INT32, output_shape_data, {5});
}
filter_ = AddInput(filter);
input_ = AddInput(input);
bias_ = AddInput(bias);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D_TRANSPOSE, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter({GetShape(output_shape_), GetShape(filter_),
GetShape(input_), GetShape(bias_)});
if (test_type == TestType::kDynamic) {
PopulateTensor(output_shape_, output_shape_data);
}
}
Conv3dTransposeOpModel(
std::initializer_list<int> output_shape_data, const TensorData& filter,
const TensorData& input, const TensorData& output, TestType test_type,
Padding padding = Padding_VALID, int32_t stride_depth = 1,
int32_t stride_width = 1, int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
if (test_type == TestType::kDynamic) {
output_shape_ = AddInput({TensorType_INT32, {5}});
} else {
output_shape_ = AddConstInput(TensorType_INT32, output_shape_data, {5});
}
filter_ = AddInput(filter);
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D_TRANSPOSE, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter(
{GetShape(output_shape_), GetShape(filter_), GetShape(input_)});
if (test_type == TestType::kDynamic) {
PopulateTensor(output_shape_, output_shape_data);
}
}
void SetFilter(std::vector<float> f) { PopulateTensor(filter_, f); }
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetInput(std::vector<float> data) { PopulateTensor(input_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int output_shape_;
int input_;
int filter_;
int bias_;
int output_;
};
template <typename T>
std::vector<T> CreateRangeVector(int N) {
std::vector<T> result;
for (int i = 0; i < N; ++i) result.push_back(i);
return result;
}
class Conv3dTransposeOpTest : public ::testing::TestWithParam<TestType> {};
TEST_P(Conv3dTransposeOpTest, InvalidInputDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"input->dims->size != 5");
}
TEST_P(Conv3dTransposeOpTest, InvalidFilterDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"filter->dims->size != 5");
}
TEST_P(Conv3dTransposeOpTest, MismatchChannelSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {1, 2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"SizeOfDimension.input, 4. != SizeOfDimension.filter, 4.");
}
TEST_P(Conv3dTransposeOpTest, MismatchBiasSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {1, 3, 2, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4, 2}}, {TensorType_FLOAT32, {3}},
{TensorType_FLOAT32, {}}, Conv3dTransposeOpTest::GetParam()),
"NumElements.bias. != SizeOfDimension.filter, 3.");
}
TEST_P(Conv3dTransposeOpTest, SimpleFloat32Test) {
Conv3dTransposeOpModel m(
{1, 3, 3, 5, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam());
m.SetInput(CreateRangeVector<float>(32));
m.SetFilter({-1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 3, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -4, -4, -8, -8, -12, -12, 1, 1, -16, -16, -18,
-16, -18, -20, -18, -24, 14, -12, 1, 17, 18, 4, 22, 4,
26, 4, 29, -29, -34, -32, -36, -30, -36, -30, -36, -30, 14,
2, -50, 2, -8, -26, -8, -26, -8, -26, 74, -44, -16, 50,
28, 4, 28, 4, 28, 4, 60, -62, -1, 33, 32, 38, 36,
42, 40, 46, 45, 1, -34, 50, 10, 54, 10, 58, 10, 62,
60, 0, -49, 1, -54, 0, -58, 0, -62, 0, -1, -1}));
}
TEST_P(Conv3dTransposeOpTest, PaddingValidTest) {
Conv3dTransposeOpModel m(
{1, 4, 5, 6, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 3, 4, 5, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam());
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 4, 5, 6, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -6, -6, -14, -14, -22, -22, -30, -30, -17,
-17, -22, -20, -50, -46, -58, -58, -66, -70, -74, -82,
-20, -54, -62, -40, -90, -106, -98, -118, -106, -130, -114,
-142, -20, -94, -102, -60, -130, -166, -138, -178, -146, -190,
-154, -202, -20, -134, -61, 1, -4, -60, -4, -64, -4,
-68, -4, -72, 77, -77, -80, -80, -160, -164, -164, -172,
-168, -180, -172, -188, -96, -96, -162, -98, -188, -282, -196,
-290, -204, -298, -212, -306, -18, -196, -202, -118, -228, -322,
-236, -330, -244, -338, -252, -346, -18, -216, -242, -138, -268,
-362, -276, -370, -284, -378, -292, -386, -18, -236, -202, 2,
-68, -78, -72, -78, -76, -78, -80, -78, 158, -80, -80,
-160, -240, -324, -244, -332, -248, -340, -252, -348, -176, -176,
-322, -178, -348, -442, -356, -450, -364, -458, -372, -466, -18,
-276, -362, -198, -388, -482, -396, -490, -404, -498, -412, -506,
-18, -296, -402, -218, -428, -522, -436, -530, -444, -538, -452,
-546, -18, -316, -362, 2, -148, -78, -152, -78, -156, -78,
-160, -78, 238, -80, 161, 1, 166, 2, 170, 2, 174,
2, 178, 2, 1, 1, 20, 2, 22, 164, 22, 168,
22, 172, 22, 176, 2, 178, 20, 2, 22, 184, 22,
188, 22, 192, 22, 196, 2, 198, 20, 2, 22, 204,
22, 208, 22, 212, 22, 216, 2, 218, -221, 1, -224,
222, -228, 226, -232, 230, -236, 234, 1, 237}));
}
TEST_P(Conv3dTransposeOpTest, PaddingSameTest) {
Conv3dTransposeOpModel m(
{1, 3, 4, 5, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 3, 4, 5, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_SAME);
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
-1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 4, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -2, 0, -2, 0, -2, 0, -2, 0, -2, 0, -4, 2,
-4, 2, -4, 2, -4, 2, -2, 0, -4, 2, -4, 2, -4, 2,
-4, 2, -2, 0, -4, 2, -4, 2, -4, 2, -4, 2, 0, 0,
-2, 2, -6, 2, -10, 2, -14, 2, 0, 2, -18, 10, -18, 14,
-18, 18, -18, 22, 20, 22, -18, 30, -18, 34, -18, 38, -18, 42,
40, 42, -18, 50, -18, 54, -18, 58, -18, 62, 0, 0, -82, 2,
-86, 2, -90, 2, -94, 2, 80, 82, -18, 90, -18, 94, -18, 98,
-18, 102, 100, 102, -18, 110, -18, 114, -18, 118, -18, 122, 120, 122,
-18, 130, -18, 134, -18, 138, -18, 142}));
}
TEST_P(Conv3dTransposeOpTest, PaddingValidComplexTest) {
Conv3dTransposeOpModel m(
{2, 4, 3, 2, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 3, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID);
m.SetInput(CreateRangeVector<float>(24));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, 1, 1, -1, -2, 4, 2, 0, -1, -5, 1, 5, -2, 10, 2, -2,
-4, 8, 4, 8, -2, -18, 2, 18, -2, 26, 2, -2, -4, 8, 4, 24,
-2, -34, 2, 34, -1, 17, 1, -1, -2, 4, 2, 16, -1, -21, 1, 21,
-1, 25, 1, -1, -2, 4, 2, 24, -1, -29, 1, 29, -2, 58, 2, -2,
-4, 8, 4, 56, -2, -66, 2, 66, -2, 74, 2, -2, -4, 8, 4, 72,
-2, -82, 2, 82, -1, 41, 1, -1, -2, 4, 2, 40, -1, -45, 1, 45}));
}
TEST_P(Conv3dTransposeOpTest, StrideTest) {
Conv3dTransposeOpModel m(
{2, 4, 3, 2, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID,
2,
1, 1);
m.SetInput(CreateRangeVector<float>(16));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, 1, 1, -1, -2, 4, 2, 0, -1, -5, 1, 5, -1, 1, 1, -1,
-2, 4, 2, 0, -1, -5, 1, 5, -1, 9, 1, -1, -2, 4, 2, 8,
-1, -13, 1, 13, -1, 9, 1, -1, -2, 4, 2, 8, -1, -13, 1, 13,
-1, 17, 1, -1, -2, 4, 2, 16, -1, -21, 1, 21, -1, 17, 1, -1,
-2, 4, 2, 16, -1, -21, 1, 21, -1, 25, 1, -1, -2, 4, 2, 24,
-1, -29, 1, 29, -1, 25, 1, -1, -2, 4, 2, 24, -1, -29, 1, 29}));
}
TEST_P(Conv3dTransposeOpTest, StrideAndPaddingSameTest) {
Conv3dTransposeOpModel m(
{2, 4, 2, 1, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_SAME,
2,
1, 1);
m.SetInput(CreateRangeVector<float>(16));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 2, 1, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-1, 1, -2, 4, -1, 1, -2, 4, -1, 9, -2,
4, -1, 9, -2, 4, -1, 17, -2, 4, -1, 17,
-2, 4, -1, 25, -2, 4, -1, 25, -2, 4}));
}
TEST_P(Conv3dTransposeOpTest, DilationTest) {
Conv3dTransposeOpModel m(
{1, 3, 3, 2, 2}, {TensorType_FLOAT32, {1, 2, 2, 2, 1}},
{TensorType_FLOAT32, {1, 3, 1, 1, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID,
1,
1, 1,
ActivationFunctionType_NONE,
1, 1,
2);
m.SetInput(CreateRangeVector<float>(3));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 3, 2, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, -1, 1, 1, 0, 0, 0, 0, -1, 1, 1, -1,
2, -2, 2, 2, 0, 0, 0, 0, -2, 2, 2, -2}));
}
TEST_P(Conv3dTransposeOpTest, BiasTest) {
Conv3dTransposeOpModel m({2, 4, 3, 2, 2},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 3, 2, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID);
m.SetInput(CreateRangeVector<float>(24));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{0, 3, 2, 1, -1, 6, 3, 2, 0, -3, 2, 7, -1, 12, 3, 0,
-3, 10, 5, 10, -1, -16, 3, 20, -1, 28, 3, 0, -3, 10, 5, 26,
-1, -32, 3, 36, 0, 19, 2, 1, -1, 6, 3, 18, 0, -19, 2, 23,
0, 27, 2, 1, -1, 6, 3, 26, 0, -27, 2, 31, -1, 60, 3, 0,
-3, 10, 5, 58, -1, -64, 3, 68, -1, 76, 3, 0, -3, 10, 5, 74,
-1, -80, 3, 84, 0, 43, 2, 1, -1, 6, 3, 42, 0, -43, 2, 47}));
}
INSTANTIATE_TEST_SUITE_P(Conv3dTransposeOpTest, Conv3dTransposeOpTest,
::testing::Values(TestType::kConst,
TestType::kDynamic));
}
} |
908 | cpp | tensorflow/tensorflow | resize_nearest_neighbor | tensorflow/lite/kernels/resize_nearest_neighbor.cc | tensorflow/lite/kernels/internal/resize_nearest_neighbor_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_
#include <algorithm>
#include <cmath>
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline int32_t GetNearestNeighbor(const int input_value,
const int32_t input_size,
const int32_t output_size,
const bool align_corners,
const bool half_pixel_centers) {
const float scale =
(align_corners && output_size > 1)
? (input_size - 1) / static_cast<float>(output_size - 1)
: input_size / static_cast<float>(output_size);
const float offset = half_pixel_centers ? 0.5f : 0.0f;
int32_t output_value = std::min(
align_corners
? static_cast<int32_t>(TfLiteRound((input_value + offset) * scale))
: static_cast<int32_t>(std::floor((input_value + offset) * scale)),
input_size - 1);
if (half_pixel_centers) {
output_value = std::max(static_cast<int32_t>(0), output_value);
}
return output_value;
}
template <typename T>
inline void ResizeNearestNeighbor(
const tflite::ResizeNearestNeighborParams& op_params,
const RuntimeShape& unextended_input_shape, const T* input_data,
const RuntimeShape& output_size_shape, const int32_t* output_size_data,
const RuntimeShape& unextended_output_shape, T* output_data) {
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
int32_t input_height = input_shape.Dims(1);
int32_t input_width = input_shape.Dims(2);
int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
TFLITE_DCHECK_EQ(output_size_shape.FlatSize(), 2);
int32_t output_height = output_size_data[0];
int32_t output_width = output_size_data[1];
const int col_offset = input_shape.Dims(3);
const int row_offset = input_shape.Dims(2) * col_offset;
const int batch_offset = input_shape.Dims(1) * row_offset;
const T* input_ptr = input_data;
T* output_ptr = output_data;
for (int b = 0; b < batches; ++b) {
for (int y = 0; y < output_height; ++y) {
int32_t in_y = GetNearestNeighbor(y, input_height, output_height,
op_params.align_corners,
op_params.half_pixel_centers);
const T* y_input_ptr = input_ptr + in_y * row_offset;
for (int x = 0; x < output_width; ++x) {
int32_t in_x = GetNearestNeighbor(x, input_width, output_width,
op_params.align_corners,
op_params.half_pixel_centers);
const T* x_input_ptr = y_input_ptr + in_x * col_offset;
memcpy(output_ptr, x_input_ptr, depth * sizeof(T));
output_ptr += depth;
}
}
input_ptr += batch_offset;
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h"
#include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace resize_nearest_neighbor {
enum KernelType {
kReference,
kGenericOptimized,
kNeonOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kSizeTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* size,
TfLiteTensor* output) {
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
const int32* size_data = GetTensorData<int32>(size);
output_size->data[1] = size_data[0];
output_size->data[2] = size_data[1];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_TYPES_EQ(context, size->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, size->dims->data[0], 2);
output->type = input->type;
if (!IsConstantOrPersistentTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, input, size, output);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteResizeNearestNeighborParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, input, size, output));
}
tflite::ResizeNearestNeighborParams op_params;
op_params.align_corners = params->align_corners;
op_params.half_pixel_centers = params->half_pixel_centers;
if (output->type == kTfLiteFloat32) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int32>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<int32>(output));
} else if (output->type == kTfLiteUInt8) {
if (kernel_type == kReference) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) {
optimized_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
} else if (output->type == kTfLiteInt8) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else if (output->type == kTfLiteInt16) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<int16_t>(output));
} else {
TF_LITE_KERNEL_LOG(
context, "Output type is %s, requires float, uint8, int8 or int16.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_nearest_neighbor::Prepare,
resize_nearest_neighbor::Eval<resize_nearest_neighbor::kReference>};
return &r;
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_nearest_neighbor::Prepare,
resize_nearest_neighbor::Eval<
resize_nearest_neighbor::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR_NEON_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_nearest_neighbor::Prepare,
resize_nearest_neighbor::Eval<resize_nearest_neighbor::kNeonOptimized>};
return &r;
}
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR() {
#ifdef USE_NEON
return Register_RESIZE_NEAREST_NEIGHBOR_NEON_OPT();
#else
return Register_RESIZE_NEAREST_NEIGHBOR_GENERIC_OPT();
#endif
}
}
}
} | #include <algorithm>
#include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace {
template <typename T>
void TestReferenceResizeNearestNeighbor(
const RuntimeShape& input_shape, const std::vector<T>& input_data,
const std::vector<int32>& output_size_data,
const RuntimeShape& output_shape,
const std::vector<T>& expected_output_data, bool align_corners = false,
bool half_pixel_centers = false) {
ResizeNearestNeighborParams op_params{align_corners, half_pixel_centers};
RuntimeShape output_size_shape({1, 1, 1, 2});
std::vector<T> output_data(expected_output_data.size());
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(expected_output_data, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To1x1) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {1, 1};
RuntimeShape output_shape = {1, 1, 1, 1};
std::vector<float> output_data = {1};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To1x1_AlignCorners) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {1, 1};
RuntimeShape output_shape = {1, 1, 1, 1};
std::vector<float> output_data = {1};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test2x2To1x1_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {1, 1};
RuntimeShape output_shape = {1, 1, 1, 1};
std::vector<float> output_data = {4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8> output_data = {1, 1, 2, 1, 1, 2, 3, 3, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3Int16) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<int16_t> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<int16_t> output_data = {1, 1, 2, 1, 1, 2, 3, 3, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3_AlignCorners) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8> output_data = {1, 2, 2, 3, 4, 4, 3, 4, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test2x2To3x3_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8> output_data = {1, 2, 2, 3, 4, 4, 3, 4, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test3x3To2x2) {
RuntimeShape input_shape = {1, 3, 3, 1};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int32> output_size_data = {2, 2};
RuntimeShape output_shape = {1, 2, 2, 1};
std::vector<float> output_data = {1, 2, 4, 5};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test3x3To2x2_AlignCorners) {
RuntimeShape input_shape = {1, 3, 3, 1};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int32> output_size_data = {2, 2};
RuntimeShape output_shape = {1, 2, 2, 1};
std::vector<float> output_data = {1, 3, 7, 9};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test3x3To2x2_HalfPixelCenters) {
RuntimeShape input_shape = {1, 3, 3, 1};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int32> output_size_data = {2, 2};
RuntimeShape output_shape = {1, 2, 2, 1};
std::vector<float> output_data = {1, 3, 7, 9};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To2x5) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {2, 5};
RuntimeShape output_shape = {1, 2, 5, 1};
std::vector<uint8> output_data = {1, 1, 1, 2, 2, 3, 3, 3, 4, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To2x5_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {2, 5};
RuntimeShape output_shape = {1, 2, 5, 1};
std::vector<uint8> output_data = {1, 1, 2, 2, 2, 3, 3, 4, 4, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test4x4To3x3) {
RuntimeShape input_shape = {1, 4, 4, 1};
std::vector<uint8> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8> output_data = {1, 2, 3, 5, 6, 7, 9, 10, 11};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test4x4To3x3_AlignCorners) {
RuntimeShape input_shape = {1, 4, 4, 1};
std::vector<uint8> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8> output_data = {1, 3, 4, 9, 11, 12, 13, 15, 16};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data,
true);
}
TEST(ResizeNearestNeighborReference, Test4x4To3x3_HalfPixelCenters) {
RuntimeShape input_shape = {1, 4, 4, 1};
std::vector<uint8> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {1, 3, 3, 1};
std::vector<uint8> output_data = {1, 3, 4, 9, 11, 12, 13, 15, 16};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To5x2) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {5, 2};
RuntimeShape output_shape = {1, 5, 2, 1};
std::vector<float> output_data = {1, 2, 1, 2, 1, 2, 3, 4, 3, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2To5x2_HalfPixelCenters) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {5, 2};
RuntimeShape output_shape = {1, 5, 2, 1};
std::vector<float> output_data = {1, 2, 1, 2, 3, 4, 3, 4, 3, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference,
Test2x2To5x2_HalfPixelCenters_AlignCorners) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<float> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {5, 2};
RuntimeShape output_shape = {1, 5, 2, 1};
std::vector<float> output_data = {2, 2, 2, 2, 4, 4, 4, 4, 4, 4};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
true, true);
}
TEST(ResizeNearestNeighborReference, Test2x2To4x4) {
RuntimeShape input_shape = {1, 2, 2, 1};
std::vector<uint8> input_data = {1, 2, 3, 4};
std::vector<int32> output_size_data = {4, 4};
RuntimeShape output_shape = {1, 4, 4, 1};
std::vector<uint8> output_data = {1, 1, 2, 2, 1, 1, 2, 2,
3, 3, 4, 4, 3, 3, 4, 4};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2x2x2To2x3x3x2) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 1, 2, 2, 3, 3, 4, 4,
5, 5, 6, 6, 7, 7, 8, 8};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2,
3, 3, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6,
5, 5, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8};
TestReferenceResizeNearestNeighbor(input_shape, input_data, output_size_data,
output_shape, output_data);
}
TEST(ResizeNearestNeighborReference, Test2x2x2x2To2x3x3x2_AlignCorners) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {
1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 5, 6, 7, 8, 7, 8,
1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 5, 6, 7, 8, 7, 8,
};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
true, false);
}
TEST(ResizeNearestNeighborReference, Test2x2x2x2To2x3x3x2_HalfPixelCenters) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 1, 2, 2, 3, 3, 4, 4,
5, 5, 6, 6, 7, 7, 8, 8};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4,
3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6,
7, 7, 8, 8, 8, 8, 7, 7, 8, 8, 8, 8};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
false, true);
}
TEST(ResizeNearestNeighborReference,
Test2x2x2x2To2x3x3x2_HalfPixelCenters_AlignCorners) {
RuntimeShape input_shape = {2, 2, 2, 2};
std::vector<float> input_data = {1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32> output_size_data = {3, 3};
RuntimeShape output_shape = {2, 3, 3, 2};
std::vector<float> output_data = {1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8,
5, 6, 7, 8, 7, 8, 1, 2, 3, 4, 3, 4,
5, 6, 7, 8, 7, 8, 5, 6, 7, 8, 7, 8};
TestReferenceResizeNearestNeighbor(
input_shape, input_data, output_size_data, output_shape, output_data,
true, true);
}
void TestOptimizedResizeNearestNeighbor(int batch, int depth, int input_width,
int input_height, int output_width,
int output_height) {
RuntimeShape output_size_shape({1, 1, 1, 2});
RuntimeShape input_shape({batch, input_height, input_width, depth});
RuntimeShape output_shape({batch, output_height, output_width, depth});
std::vector<uint8> input_data(input_shape.FlatSize(), 0);
FillRandom(&input_data, static_cast<uint8>(0), static_cast<uint8>(255));
std::vector<uint8> reference_output_data(output_shape.FlatSize(), 0);
std::vector<uint8> output_data(output_shape.FlatSize(), 3);
std::vector<int32> output_size_data = {output_height, output_width};
ResizeNearestNeighborParams op_params{false,
false};
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
op_params.align_corners = true;
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
op_params.align_corners = false;
op_params.half_pixel_centers = true;
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
op_params.align_corners = true;
op_params.half_pixel_centers = true;
reference_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, reference_output_data.data());
optimized_ops::ResizeNearestNeighbor(
op_params, input_shape, input_data.data(), output_size_shape,
output_size_data.data(), output_shape, output_data.data());
ASSERT_EQ(reference_output_data, output_data);
}
bool is_valid_scale(int input_width, int input_height, int output_width,
int output_height) {
const float height_scale_float =
static_cast<float>(input_height) / output_height;
const float width_scale_float =
static_cast<float>(input_width) / output_width;
int32 height_scale_int = (input_height << 16) / output_height + 1;
int32 width_scale_int = (input_width << 16) / output_width + 1;
for (int y = 0; y < output_height; ++y) {
int32 in_y_float =
std::min(static_cast<int32>(std::floor(y * height_scale_float)),
input_height - 1);
int32 in_y_int = std::min((y * height_scale_int) >> 16, input_height - 1);
if (in_y_int != in_y_float) {
return false;
}
for (int x = 0; x < output_width; ++x) {
int32 in_x_float =
std::min(static_cast<int32>(std::floor(x * width_scale_float)),
input_width - 1);
int32 in_x_int = std::min((x * width_scale_int) >> 16, input_width - 1);
if (in_x_int != in_x_float) {
return false;
}
}
}
return true;
}
TEST(ResizeNearestNeighborOptimized, TestReferenceParity) {
int invalid_count = 0;
const int kTestsToRun = 10000;
for (int i = 0; i < kTestsToRun; i++) {
const int batch = ExponentialRandomPositiveInt(0.9f, 3, 20);
const int depth = ExponentialRandomPositiveInt(0.9f, 6, 50);
const int input_width = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int input_height = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int output_width = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int output_height = ExponentialRandomPositiveInt(0.9f, 20, 200);
if (is_valid_scale(input_width, input_height, output_width,
output_height)) {
TestOptimizedResizeNearestNeighbor(
batch, depth, input_width, input_height, output_width, output_height);
} else {
invalid_count++;
}
}
ASSERT_LT(static_cast<float>(invalid_count) / kTestsToRun, 0.001f);
}
}
} |
909 | cpp | tensorflow/tensorflow | gather | tensorflow/lite/kernels/gather.cc | tensorflow/lite/kernels/gather_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_GATHER_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_GATHER_H_
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
GPUOperation CreateGather(const GpuInfo& gpu_info, const OperationDef& op_def,
const GatherAttributes& attr);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/gather.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
namespace tflite {
namespace gpu {
namespace {
std::string GetGatherCode(const OperationDef& op_def, GatherAttributes attr) {
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.IsBatchSupported()) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int S = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"S >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " int idx;\n";
c += " args.src_tensor::type result;\n";
switch (attr.axis) {
case Axis::BATCH:
c += " idx = args.indices.Read<int>(0, 0, 0, B).x;\n";
c += " result = args.src_tensor.Read(X, Y, "
"S, idx);\n";
break;
case Axis::HEIGHT:
c += " idx = args.indices.Read<int>(0, 0, 0, Y).x;\n";
c += " result = args.src_tensor.Read(X, idx, "
"S, B);\n";
break;
case Axis::WIDTH:
c += " idx = args.indices.Read<int>(0, 0, 0, X).x;\n";
c += " result = args.src_tensor.Read(idx, Y, "
", S, B);\n";
break;
case Axis::CHANNELS:
c += " idx = args.indices.Read<int>(0, 0, 0, S * 4).x;\n";
c += " args.src_tensor.ReadPerChannel(result.x, X, Y, idx, B);\n";
c += " idx = args.indices.Read<int>(0, 0, 0, S * 4 + 1).x;\n";
c += " args.src_tensor.ReadPerChannel(result.y, X, Y, idx, B);\n";
c += " idx = args.indices.Read<int>(0, 0, 0, S * 4 + 2).x;\n";
c += " args.src_tensor.ReadPerChannel(result.z, X, Y, idx, B);\n";
c += " idx = args.indices.Read<int>(0, 0, 0, S * 4 + 3).x;\n";
c += " args.src_tensor.ReadPerChannel(result.w, X, Y, idx, B);\n";
break;
default:
c += " return;\n";
}
c += " args.dst_tensor.Write(result, X, Y, S);\n";
c += "}\n";
return c;
}
}
GPUOperation CreateGather(const GpuInfo& gpu_info, const OperationDef& op_def,
const GatherAttributes& attr) {
GPUOperation op(op_def);
op.AddSrcTensor("src_tensor", op_def.src_tensors[0]);
op.AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
if (op_def.src_tensors.size() == 1) {
BHWC shape = BHWC(attr.indices.shape.v, 1, 1, 1);
TensorStorageType storage_type = GetStorageTypeForLinearTensor(
gpu_info, DataType::INT32, attr.indices.shape);
TensorDescriptor indices =
CreateBhwcTensorDescriptor(DataType::INT32, storage_type, shape);
indices.UploadData(attr.indices);
op.args_.AddObject("indices",
std::make_unique<TensorDescriptor>(std::move(indices)));
} else {
op.AddSrcTensor("indices", op_def.src_tensors[1]);
}
op.code_ = GetGatherCode(op_def, attr);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
return op;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/gather_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, GatherBatch) {
auto status = GatherBatchTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherBatchConst) {
auto status = GatherBatchTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherHeight) {
auto status = GatherHeightTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherHeightConst) {
auto status = GatherHeightTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherWidth) {
auto status = GatherWidthTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherWidthConst) {
auto status = GatherWidthTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherChannels) {
auto status = GatherChannelsTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherChannelsConst) {
auto status = GatherChannelsTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} |
910 | cpp | tensorflow/tensorflow | comparisons | tensorflow/lite/kernels/internal/reference/comparisons.cc | tensorflow/lite/kernels/comparisons_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline bool EqualFn(T lhs, T rhs) {
return lhs == rhs;
}
template <typename T>
inline bool NotEqualFn(T lhs, T rhs) {
return lhs != rhs;
}
template <typename T>
inline bool GreaterFn(T lhs, T rhs) {
return lhs > rhs;
}
template <typename T>
inline bool GreaterEqualFn(T lhs, T rhs) {
return lhs >= rhs;
}
template <typename T>
inline bool LessFn(T lhs, T rhs) {
return lhs < rhs;
}
template <typename T>
inline bool LessEqualFn(T lhs, T rhs) {
return lhs <= rhs;
}
template <typename T>
using ComparisonFn = bool (*)(T, T);
template <typename T, ComparisonFn<T> F>
inline void ComparisonImpl(
const ComparisonParams& op_params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, bool* output_data) {
const int64_t flatsize =
MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int64_t i = 0; i < flatsize; ++i) {
output_data[i] = F(input1_data[i], input2_data[i]);
}
}
template <ComparisonFn<float> F>
inline void Comparison(const ComparisonParams& op_params,
const RuntimeShape& input1_shape,
const float* input1_data,
const RuntimeShape& input2_shape,
const float* input2_data,
const RuntimeShape& output_shape, bool* output_data) {
ComparisonImpl<float, F>(op_params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
}
template <typename T, ComparisonFn<int32_t> F>
inline void ComparisonWithScaling(
const ComparisonParams& op_params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, bool* output_data) {
int left_shift = op_params.left_shift;
int32_t input1_offset = op_params.input1_offset;
int32_t input1_multiplier = op_params.input1_multiplier;
int input1_shift = op_params.input1_shift;
int32_t input2_offset = op_params.input2_offset;
int32_t input2_multiplier = op_params.input2_multiplier;
int input2_shift = op_params.input2_shift;
const int64_t flatsize =
MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int64_t i = 0; i < flatsize; ++i) {
const int32_t input1_val = input1_offset + input1_data[i];
const int32_t input2_val = input2_offset + input2_data[i];
const int32_t shifted_input1_val = input1_val * (1 << left_shift);
const int32_t shifted_input2_val = input2_val * (1 << left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, input1_multiplier, input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, input2_multiplier, input2_shift);
output_data[i] = F(scaled_input1_val, scaled_input2_val);
}
}
struct BroadcastComparison4DSlowCommon {
const RuntimeShape output_shape;
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
};
TFLITE_NOINLINE
BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess(
const RuntimeShape& unextended_input1_shape,
const RuntimeShape& unextended_input2_shape,
const RuntimeShape& unextended_output_shape);
template <typename T, ComparisonFn<T> F>
inline void BroadcastComparison4DSlowImpl(
const ComparisonParams& op_params,
const RuntimeShape& unextended_input1_shape, const T* input1_data,
const RuntimeShape& unextended_input2_shape, const T* input2_data,
const RuntimeShape& unextended_output_shape, bool* output_data) {
const BroadcastComparison4DSlowCommon dims =
BroadcastComparison4DSlowPreprocess(unextended_input1_shape,
unextended_input2_shape,
unextended_output_shape);
for (int b = 0; b < dims.output_shape.Dims(0); ++b) {
for (int y = 0; y < dims.output_shape.Dims(1); ++y) {
for (int x = 0; x < dims.output_shape.Dims(2); ++x) {
for (int c = 0; c < dims.output_shape.Dims(3); ++c) {
output_data[Offset(dims.output_shape, b, y, x, c)] =
F(input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)],
input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)]);
}
}
}
}
}
template <ComparisonFn<float> F>
inline void BroadcastComparison4DSlow(const ComparisonParams& op_params,
const RuntimeShape& input1_shape,
const float* input1_data,
const RuntimeShape& input2_shape,
const float* input2_data,
const RuntimeShape& output_shape,
bool* output_data) {
BroadcastComparison4DSlowImpl<float, F>(op_params, input1_shape, input1_data,
input2_shape, input2_data,
output_shape, output_data);
}
template <typename T, ComparisonFn<int32_t> F>
inline void BroadcastComparison4DSlowWithScaling(
const ComparisonParams& op_params,
const RuntimeShape& unextended_input1_shape, const T* input1_data,
const RuntimeShape& unextended_input2_shape, const T* input2_data,
const RuntimeShape& unextended_output_shape, bool* output_data) {
const BroadcastComparison4DSlowCommon dims =
BroadcastComparison4DSlowPreprocess(unextended_input1_shape,
unextended_input2_shape,
unextended_output_shape);
int left_shift = op_params.left_shift;
int32_t input1_offset = op_params.input1_offset;
int32_t input1_multiplier = op_params.input1_multiplier;
int input1_shift = op_params.input1_shift;
int32_t input2_offset = op_params.input2_offset;
int32_t input2_multiplier = op_params.input2_multiplier;
int input2_shift = op_params.input2_shift;
for (int b = 0; b < dims.output_shape.Dims(0); ++b) {
for (int y = 0; y < dims.output_shape.Dims(1); ++y) {
for (int x = 0; x < dims.output_shape.Dims(2); ++x) {
for (int c = 0; c < dims.output_shape.Dims(3); ++c) {
const int32_t input1_val =
input1_offset +
input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)];
const int32_t input2_val =
input2_offset +
input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)];
const int32_t shifted_input1_val = input1_val * (1 << left_shift);
const int32_t shifted_input2_val = input2_val * (1 << left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, input1_multiplier, input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, input2_multiplier, input2_shift);
output_data[Offset(dims.output_shape, b, y, x, c)] =
F(scaled_input1_val, scaled_input2_val);
}
}
}
}
}
#define TFLITE_COMPARISON_OP(name) \
inline void name(const ComparisonParams& op_params, \
const RuntimeShape& input1_shape, const float* input1_data, \
const RuntimeShape& input2_shape, const float* input2_data, \
const RuntimeShape& output_shape, bool* output_data) { \
Comparison<name##Fn>(op_params, input1_shape, input1_data, input2_shape, \
input2_data, output_shape, output_data); \
} \
template <typename T> \
inline void name##NoScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
ComparisonImpl<T, name##Fn>(op_params, input1_shape, input1_data, \
input2_shape, input2_data, output_shape, \
output_data); \
} \
template <typename T> \
inline void name##WithScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
ComparisonWithScaling<T, name##Fn>(op_params, input1_shape, input1_data, \
input2_shape, input2_data, \
output_shape, output_data); \
} \
template <typename T> \
inline void Broadcast4DSlow##name##NoScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
BroadcastComparison4DSlowImpl<T, name##Fn>( \
op_params, input1_shape, input1_data, input2_shape, input2_data, \
output_shape, output_data); \
} \
inline void Broadcast4DSlow##name( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const float* input1_data, const RuntimeShape& input2_shape, \
const float* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
BroadcastComparison4DSlow<name##Fn>(op_params, input1_shape, input1_data, \
input2_shape, input2_data, \
output_shape, output_data); \
} \
template <typename T> \
inline void Broadcast4DSlow##name##WithScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
BroadcastComparison4DSlowWithScaling<T, name##Fn>( \
op_params, input1_shape, input1_data, input2_shape, input2_data, \
output_shape, output_data); \
}
TFLITE_COMPARISON_OP(Equal)
TFLITE_COMPARISON_OP(NotEqual)
TFLITE_COMPARISON_OP(Greater)
TFLITE_COMPARISON_OP(GreaterEqual)
TFLITE_COMPARISON_OP(Less)
TFLITE_COMPARISON_OP(LessEqual)
#undef TFLITE_COMPARISON_OP
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/comparisons.h"
namespace tflite {
namespace reference_ops {
BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess(
const RuntimeShape& unextended_input1_shape,
const RuntimeShape& unextended_input2_shape,
const RuntimeShape& unextended_output_shape) {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
return {RuntimeShape::ExtendedShape(4, unextended_output_shape), desc1,
desc2};
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class ComparisonOpModel : public SingleOpModel {
public:
ComparisonOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape,
TensorType input_type, BuiltinOperator op) {
input1_ = AddInput(input_type);
input2_ = AddInput(input_type);
output_ = AddOutput(TensorType_BOOL);
ConfigureBuiltinOp(op);
BuildInterpreter({input1_shape, input2_shape});
}
ComparisonOpModel(const TensorData& input1, const TensorData& input2,
TensorType input_type, BuiltinOperator op) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(TensorType_BOOL);
ConfigureBuiltinOp(op);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<bool> GetOutput() { return ExtractVector<bool>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
void ConfigureBuiltinOp(BuiltinOperator op) {
switch (op) {
case BuiltinOperator_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_EqualOptions,
CreateEqualOptions(builder_).Union());
break;
}
case BuiltinOperator_NOT_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_NotEqualOptions,
CreateNotEqualOptions(builder_).Union());
break;
}
case BuiltinOperator_GREATER: {
SetBuiltinOp(op, BuiltinOptions_GreaterOptions,
CreateGreaterOptions(builder_).Union());
break;
}
case BuiltinOperator_GREATER_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_GreaterEqualOptions,
CreateGreaterEqualOptions(builder_).Union());
break;
}
case BuiltinOperator_LESS: {
SetBuiltinOp(op, BuiltinOptions_LessOptions,
CreateLessOptions(builder_).Union());
break;
}
case BuiltinOperator_LESS_EQUAL: {
SetBuiltinOp(op, BuiltinOptions_LessEqualOptions,
CreateLessEqualOptions(builder_).Union());
break;
}
default: {
FAIL() << "We shouldn't get here.";
}
}
}
};
TEST(ComparisonsTest, EqualBool) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_BOOL,
BuiltinOperator_EQUAL);
model.PopulateTensor<bool>(model.input1(), {true, false, true, false});
model.PopulateTensor<bool>(model.input2(), {true, true, false, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualInt16) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16,
BuiltinOperator_EQUAL);
model.PopulateTensor<int16_t>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int16_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 4, 1}, {1, 1, 1, 4, 1}, TensorType_STRING,
BuiltinOperator_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "C", "D"});
model.PopulateTensor<std::string>(model.input2(), {"A", "C", "B", "D"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4, 1));
}
TEST(ComparisonsTest, EqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, EqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, false, false, false,
false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, EqualBroadcastString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_STRING,
BuiltinOperator_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "A", "B"});
model.PopulateTensor<std::string>(model.input2(), {"A"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualBool) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_BOOL,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<bool>(model.input1(), {true, false, true, false});
model.PopulateTensor<bool>(model.input2(), {true, true, false, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 1, 4}, {1, 1, 1, 1, 4}, TensorType_STRING,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "C", "D"});
model.PopulateTensor<std::string>(model.input2(), {"A", "C", "B", "D"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, NotEqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, true, true, true, true, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, NotEqualBroadcastString) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_STRING,
BuiltinOperator_NOT_EQUAL);
model.PopulateTensor<std::string>(model.input1(), {"A", "B", "A", "B"});
model.PopulateTensor<std::string>(model.input2(), {"A"});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_GREATER);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, true, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, GreaterEqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualInt16) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int16_t>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int16_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, GreaterEqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_GREATER_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, true, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, LessFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_LESS);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessInt16) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16,
BuiltinOperator_LESS);
model.PopulateTensor<int16_t>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int16_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_LESS);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 6, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, false, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(ComparisonsTest, LessEqualFloat) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_FLOAT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<float>(model.input1(), {0.1, 0.9, 0.7, 0.3});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.6, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessEqualInt) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessEqualBroadcast) {
ComparisonOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3});
model.PopulateTensor<int>(model.input2(), {7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(ComparisonsTest, LessEqualBroadcastTwoD) {
ComparisonOpModel model({1, 1, 2, 4}, {1, 1, 1, 4}, TensorType_INT32,
BuiltinOperator_LESS_EQUAL);
model.PopulateTensor<int>(model.input1(), {-1, 9, 7, 3, 2, 4, 2, 8});
model.PopulateTensor<int>(model.input2(), {7, 1, 2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, false, true, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 2, 4));
}
TEST(QuantizedComparisonsTest, EqualUInt8Quantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, false));
}
TEST(QuantizedComparisonsTest, EqualInt8Quantized) {
const float kMin = -127.f;
const float kMax = 127.f;
ComparisonOpModel model({TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_INT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {1, -9, 7, 3});
model.QuantizeAndPopulate<int8_t>(model.input2(), {-1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, true, false));
}
TEST(QuantizedComparisonsTest, NotEqualUInt8Quantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 7, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, true));
}
TEST(QuantizedComparisonsTest, NotEqualInt8Quantized) {
const float kMin = -127.f;
const float kMax = 127.f;
ComparisonOpModel model({TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_INT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_INT8, BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {1, -9, 7, 3});
model.QuantizeAndPopulate<int8_t>(model.input2(), {1, 2, 7, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, false, true));
}
TEST(ComparisonsTest, GreaterQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
}
TEST(ComparisonsTest, GreaterQuantizedSmallRange) {
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, 0.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, 0.0, 2.0},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1.0, 0.5, 0.35, 0.1});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1.01, 0.25, 0.3, 0.4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, true, true, false));
}
TEST(ComparisonsTest, GreaterEqualQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, true, true, false));
}
TEST(ComparisonsTest, LessQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_LESS);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(false, false, false, true));
}
TEST(ComparisonsTest, LessEqualQuantized) {
const float kMin = -1.f;
const float kMax = 128.f;
ComparisonOpModel model({TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
{TensorType_UINT8, {1, 2, 2, 1}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_LESS_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {1, 9, 7, 3});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {1, 2, 6, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
}
TEST(ComparisonsTest, QuantizedEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, false, false, false, false))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8NotEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, true, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8NotEqualWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_NOT_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {-20, 2, 7, -8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, true, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8GreaterWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, false, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8GreaterWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_GREATER);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, false, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest,
QuantizedInt8GreaterWithBroadcastMultiplierGreaterThanOne) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_GREATER);
model.QuantizeAndPopulate<int8_t>(model.input1(),
{572, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, false, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8GreaterEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_GREATER_EQUAL);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8GreaterEqualWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_GREATER_EQUAL);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(true, false, false, true, true, true))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8LessWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
{TensorType_UINT8, {}, kMin, kMax},
TensorType_UINT8, BuiltinOperator_LESS);
model.QuantizeAndPopulate<uint8_t>(model.input1(), {20, 2, 7, 8, 11, 20});
model.QuantizeAndPopulate<uint8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, false))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedInt8LessWithBroadcast) {
const float kMin = -127.f;
const float kMax = 127.f;
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_INT8, test_shapes[i], kMin, kMax},
{TensorType_INT8, {}, kMin, kMax}, TensorType_INT8,
BuiltinOperator_LESS);
model.QuantizeAndPopulate<int8_t>(model.input1(), {20, -2, -71, 8, 11, 20});
model.QuantizeAndPopulate<int8_t>(model.input2(), {8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAre(false, true, true, false, false, false))
<< "With shape number " << i;
}
}
TEST(ComparisonsTest, QuantizedUInt8L |
911 | cpp | tensorflow/tensorflow | test_delegate_providers | tensorflow/lite/kernels/test_delegate_providers.cc | tensorflow/lite/kernels/test_delegate_providers_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_TEST_DELEGATE_PROVIDERS_H_
#define TENSORFLOW_LITE_KERNELS_TEST_DELEGATE_PROVIDERS_H_
#include <vector>
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
class KernelTestDelegateProviders {
public:
static KernelTestDelegateProviders* Get();
KernelTestDelegateProviders();
bool InitFromCmdlineArgs(int* argc, const char** argv);
tools::ToolParams* MutableParams() { return ¶ms_; }
const tools::ToolParams& ConstParams() const { return params_; }
std::vector<tools::ProvidedDelegateList::ProvidedDelegate> CreateAllDelegates(
const tools::ToolParams& params) const {
tools::ProvidedDelegateList util;
return util.CreateAllRankedDelegates(params);
}
std::vector<tools::ProvidedDelegateList::ProvidedDelegate>
CreateAllDelegates() const {
return delegate_list_util_.CreateAllRankedDelegates();
}
static constexpr char kUseSimpleAllocator[] = "use_simple_allocator";
static constexpr char kAccelerationTestConfigPath[] =
"acceleration_test_config_path";
private:
tools::ToolParams params_;
tools::ProvidedDelegateList delegate_list_util_;
};
}
#endif
#include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <string>
#include <vector>
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
constexpr char KernelTestDelegateProviders::kAccelerationTestConfigPath[];
constexpr char KernelTestDelegateProviders::kUseSimpleAllocator[];
KernelTestDelegateProviders* KernelTestDelegateProviders::Get() {
static KernelTestDelegateProviders* const providers =
new KernelTestDelegateProviders();
return providers;
}
KernelTestDelegateProviders::KernelTestDelegateProviders()
: delegate_list_util_(¶ms_) {
delegate_list_util_.AddAllDelegateParams();
params_.AddParam(kAccelerationTestConfigPath,
tools::ToolParam::Create<std::string>(""));
params_.AddParam(kUseSimpleAllocator, tools::ToolParam::Create<bool>(false));
}
bool KernelTestDelegateProviders::InitFromCmdlineArgs(int* argc,
const char** argv) {
std::vector<tflite::Flag> flags = {
Flag(
kAccelerationTestConfigPath,
[this](const std::string& val, int argv_position) {
this->params_.Set<std::string>(kAccelerationTestConfigPath, val,
argv_position);
},
"", "Acceleration test config file for SingleOpModel",
Flag::kOptional),
Flag(
kUseSimpleAllocator,
[this](const bool& val, int argv_position) {
this->params_.Set<bool>(kUseSimpleAllocator, val, argv_position);
},
false, "Use Simple Memory Allocator for SingleOpModel",
Flag::kOptional)};
delegate_list_util_.AppendCmdlineFlags(flags);
bool parse_result = tflite::Flags::Parse(argc, argv, flags);
if (!parse_result || params_.Get<bool>("help")) {
std::string usage = Flags::Usage(argv[0], flags);
TFLITE_LOG(ERROR) << usage;
parse_result = false;
}
return parse_result;
}
} | #include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(KernelTestDelegateProvidersTest, DelegateProvidersParams) {
KernelTestDelegateProviders providers;
const auto& params = providers.ConstParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
EXPECT_TRUE(params.HasParam("use_nnapi"));
int argc = 3;
const char* argv[] = {"program_name", "--use_nnapi=true",
"--other_undefined_flag=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
EXPECT_TRUE(params.Get<bool>("use_nnapi"));
EXPECT_EQ(2, argc);
EXPECT_EQ("--other_undefined_flag=1", argv[1]);
}
TEST(KernelTestDelegateProvidersTest, CreateTfLiteDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
KernelTestDelegateProviders providers;
providers.MutableParams()->Set<bool>("use_xnnpack", true);
EXPECT_GE(providers.CreateAllDelegates().size(), 1);
tools::ToolParams local_params;
local_params.Merge(providers.ConstParams());
local_params.Set<bool>("use_xnnpack", false);
EXPECT_TRUE(providers.CreateAllDelegates(local_params).empty());
#endif
}
}
} |
912 | cpp | tensorflow/tensorflow | broadcast_args | tensorflow/lite/kernels/broadcast_args.cc | tensorflow/lite/kernels/broadcast_args_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
void BroadcastArgs(const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
auto get_shape_data = [](const RuntimeShape& shape, const T* data,
int backward_idx) -> T {
int forward_idx = shape.FlatSize() - 1 - backward_idx;
if (forward_idx < 0) return 1;
return data[forward_idx];
};
int output_num_elements = output_shape.FlatSize();
for (int i = 0; i < output_num_elements; ++i) {
int backward_i = output_num_elements - 1 - i;
int shape1_i = get_shape_data(input1_shape, input1_data, i);
int shape2_i = get_shape_data(input2_shape, input2_data, i);
if (shape1_i == 1) {
output_data[backward_i] = shape2_i;
} else if (shape2_i == 1) {
output_data[backward_i] = shape1_i;
} else {
TFLITE_CHECK_EQ(shape1_i, shape2_i);
output_data[backward_i] = shape1_i;
}
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/broadcast_args.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace broadcast_args {
constexpr int kShape1Tensor = 0;
constexpr int kShape2Tensor = 1;
constexpr int kOutputTensor = 0;
struct BroadcastArgsContext {
BroadcastArgsContext(TfLiteContext* context, TfLiteNode* node) {
shape1 = GetInput(context, node, kShape1Tensor);
shape2 = GetInput(context, node, kShape2Tensor);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* shape1;
const TfLiteTensor* shape2;
TfLiteTensor* output;
};
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node);
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
BroadcastArgsContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.shape1->type == kTfLiteInt32 ||
op_context.shape1->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, op_context.shape1->type, op_context.shape2->type);
TF_LITE_ENSURE_EQ(context, op_context.shape1->type, op_context.output->type);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.shape1), 1);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.shape2), 1);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(1);
output_shape->data[0] = std::max(SizeOfDimension(op_context.shape1, 0),
SizeOfDimension(op_context.shape2, 0));
if (IsConstantOrPersistentTensor(op_context.shape1) &&
IsConstantOrPersistentTensor(op_context.shape2)) {
SetTensorToPersistentRo(op_context.output);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, op_context.output,
output_shape));
return EvalImpl(context, node);
}
return context->ResizeTensor(context, op_context.output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BroadcastArgsContext op_context(context, node);
if (IsConstantOrPersistentTensor(op_context.output)) {
return kTfLiteOk;
} else {
return EvalImpl(context, node);
}
}
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) {
BroadcastArgsContext op_context(context, node);
#define TF_LITE_BROADCAST_ARG(data_type) \
reference_ops::BroadcastArgs(GetTensorShape(op_context.shape1), \
GetTensorData<data_type>(op_context.shape1), \
GetTensorShape(op_context.shape2), \
GetTensorData<data_type>(op_context.shape2), \
GetTensorShape(op_context.output), \
GetTensorData<data_type>(op_context.output))
if (op_context.output->type == kTfLiteInt32) {
TF_LITE_BROADCAST_ARG(int32_t);
} else {
TF_LITE_BROADCAST_ARG(int64_t);
}
#undef TF_LITE_BROADCAST_ARG
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_ARGS() {
static TfLiteRegistration r = {nullptr, nullptr, broadcast_args::Prepare,
broadcast_args::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class ShapeType = int32_t>
class BroadcastArgsOpModel : public SingleOpModel {
public:
BroadcastArgsOpModel(std::initializer_list<ShapeType> input1,
std::initializer_list<ShapeType> input2,
bool constant_tensor) {
int input1_length = input1.size();
int input2_length = input2.size();
if (constant_tensor) {
shape1_ =
AddConstInput({GetTensorType<ShapeType>(), {input1_length}}, input1);
shape2_ =
AddConstInput({GetTensorType<ShapeType>(), {input2_length}}, input2);
} else {
shape1_ = AddInput({GetTensorType<ShapeType>(), {input1_length}});
shape2_ = AddInput({GetTensorType<ShapeType>(), {input2_length}});
}
output_ = AddOutput(GetTensorType<ShapeType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_ARGS, BuiltinOptions_NONE, 0);
BuildInterpreter({{input1_length}, {input2_length}});
if (!constant_tensor) {
if (input1.size() > 0) SetInput1(input1);
if (input2.size() > 0) SetInput2(input2);
}
}
void SetInput1(std::initializer_list<ShapeType> data) {
PopulateTensor(shape1_, data);
}
void SetInput2(std::initializer_list<ShapeType> data) {
PopulateTensor(shape2_, data);
}
std::vector<ShapeType> GetOutput() {
return ExtractVector<ShapeType>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int shape1_;
int shape2_;
int output_;
};
template <typename T>
class BroadcastArgsOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<int64_t, int32_t>;
TYPED_TEST_SUITE(BroadcastArgsOpTest, DataTypes);
#if GTEST_HAS_DEATH_TEST
TYPED_TEST(BroadcastArgsOpTest, ShapeNotBroadcastableConstant) {
EXPECT_DEATH(BroadcastArgsOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2},
true),
"");
}
TYPED_TEST(BroadcastArgsOpTest, ShapeNotBroadcastable) {
BroadcastArgsOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2},
false);
EXPECT_DEATH(ASSERT_EQ(m.Invoke(), kTfLiteOk), "");
}
#endif
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsWithScalar) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({}, {2, 4}, constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 4}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsDifferentDims) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({1}, {2, 4}, constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 4}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsSameDims) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({1, 4, 6, 3, 1, 5}, {4, 4, 1, 3, 4, 1},
constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 4, 6, 3, 4, 5}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsComplex) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({6, 3, 1, 5}, {4, 4, 1, 3, 4, 1},
constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 4, 6, 3, 4, 5}));
}
}
}
} |
913 | cpp | tensorflow/tensorflow | cast | tensorflow/lite/kernels/cast.cc | tensorflow/lite/kernels/cast_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CAST_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CAST_H_
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
namespace tflite {
namespace gpu {
GPUOperation CreateCast(const OperationDef& definition,
const GpuInfo& gpu_info);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/cast.h"
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
GPUOperation CreateCast(const OperationDef& definition,
const GpuInfo& gpu_info) {
ElementwiseDescriptor op_desc;
const std::string conversion =
GetTypeConversion(gpu_info, definition.src_tensors[0].GetDataType(),
definition.dst_tensors[0].GetDataType(), 4);
op_desc.code =
"out_value = " + absl::Substitute(conversion, "in_value") + ";\n";
return CreateGpuOperation(definition, std::move(op_desc));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/cast_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Cast) {
auto status = CastTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CastToBool) {
auto status = CastToBoolTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CastFromBool) {
auto status = CastFromBoolTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} |
914 | cpp | tensorflow/tensorflow | div | tensorflow/lite/kernels/div.cc | tensorflow/lite/kernels/div_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
#include <algorithm>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void DivCheckArithmeticParams(const ArithmeticParams& params) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
constexpr int32_t max_value =
static_cast<int32_t>(std::numeric_limits<T>::max());
TFLITE_DCHECK_GE(params.input1_offset, -max_value);
TFLITE_DCHECK_LE(params.input1_offset, max_value);
TFLITE_DCHECK_GE(params.input2_offset, -max_value);
TFLITE_DCHECK_LE(params.input2_offset, max_value);
TFLITE_DCHECK_GE(params.output_offset, -max_value);
TFLITE_DCHECK_LE(params.output_offset, max_value);
}
template <typename T>
inline void DivElementwise(int size, const ArithmeticParams& params,
const T* input1_data, const T* input2_data,
T* output_data) {
DivCheckArithmeticParams<T>(params);
for (int i = 0; i < size; ++i) {
int32_t input1_val = params.input1_offset + input1_data[i];
int32_t input2_val = params.input2_offset + input2_data[i];
TFLITE_DCHECK_NE(input2_val, 0);
if (input2_val < 0) {
input1_val = -input1_val;
input2_val = -input2_val;
}
int recip_shift;
const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift);
const int headroom = CountLeadingSignBits(input1_val);
const int32_t unscaled_quotient =
MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
headroom);
const int total_shift = params.output_shift - recip_shift - headroom;
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplierSmallerThanOneExp(
unscaled_quotient, params.output_multiplier, total_shift);
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<T>(clamped_output);
}
}
inline void Div(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const uint8_t* input1_data,
const RuntimeShape& input2_shape, const uint8_t* input2_data,
const RuntimeShape& output_shape, uint8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void Div(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int8_t* input1_data,
const RuntimeShape& input2_shape, const int8_t* input2_data,
const RuntimeShape& output_shape, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
}
template <typename T, int N = 5>
inline void BroadcastDivSlowQuantized(
const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape,
const T* input1_data, const RuntimeShape& unextended_input2_shape,
const T* input2_data, const RuntimeShape& unextended_output_shape,
T* output_data) {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
DivCheckArithmeticParams<T>(params);
auto div_func = [&](int indexes[N]) {
int32_t input1_val =
params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
int32_t input2_val =
params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
TFLITE_DCHECK_NE(input2_val, 0);
if (input2_val < 0) {
input1_val = -input1_val;
input2_val = -input2_val;
}
int recip_shift;
const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift);
const int headroom = CountLeadingSignBits(input1_val);
const int32_t unscaled_quotient =
MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
headroom);
const int total_shift = params.output_shift - recip_shift - headroom;
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplierSmallerThanOneExp(
unscaled_quotient, params.output_multiplier, total_shift);
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[SubscriptToIndex(output_desc, indexes)] =
static_cast<T>(clamped_output);
};
NDOpsHelper<N>(output_desc, div_func);
}
template <int N = 5>
inline void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const uint8_t* input1_data,
const RuntimeShape& unextended_input2_shape,
const uint8_t* input2_data,
const RuntimeShape& unextended_output_shape,
uint8_t* output_data) {
BroadcastDivSlowQuantized<uint8_t, N>(
params, unextended_input1_shape, input1_data, unextended_input2_shape,
input2_data, unextended_output_shape, output_data);
}
template <int N = 5>
inline void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const int8_t* input1_data,
const RuntimeShape& unextended_input2_shape,
const int8_t* input2_data,
const RuntimeShape& unextended_output_shape,
int8_t* output_data) {
BroadcastDivSlowQuantized<int8_t, N>(
params, unextended_input1_shape, input1_data, unextended_input2_shape,
input2_data, unextended_output_shape, output_data);
}
template <typename T, int N = 5>
void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
auto div_func = [&](int indexes[N]) {
output_data[SubscriptToIndex(output_desc, indexes)] =
ActivationFunctionWithMinMax(
input1_data[SubscriptToIndex(desc1, indexes)] /
input2_data[SubscriptToIndex(desc2, indexes)],
output_activation_min, output_activation_max);
};
NDOpsHelper<N>(output_desc, div_func);
}
template <typename T>
inline void Div(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
input1_data[i] / input2_data[i], output_activation_min,
output_activation_max);
}
}
}
}
#endif
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#ifdef TFLITE_KERNEL_USE_XNNPACK
#include <algorithm>
#include <array>
#include <limits>
#include "xnnpack.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/minimal_logging.h"
#endif
namespace tflite {
namespace ops {
namespace builtin {
namespace div {
enum KernelType {
kReference,
kGenericOptimized,
kNeonOptimized,
};
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
int32 output_activation_min;
int32 output_activation_max;
int32_t output_multiplier;
int output_shift;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input2->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
if (output->type == kTfLiteUInt8) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
const double real_multiplier =
input1->params.scale / (input2->params.scale * output->params.scale);
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params,
const OpData* data, const TfLiteTensor* input1,
const TfLiteTensor* input2, TfLiteTensor* output) {
#define TF_LITE_DIV(type, opname, data_type) \
tflite::ArithmeticParams op_params; \
data_type output_activation_min, output_activation_max; \
CalculateActivationRange(params->activation, &output_activation_min, \
&output_activation_max); \
SetActivationParams(output_activation_min, output_activation_max, \
&op_params); \
type::opname(op_params, GetTensorShape(input1), \
GetTensorData<data_type>(input1), GetTensorShape(input2), \
GetTensorData<data_type>(input2), GetTensorShape(output), \
GetTensorData<data_type>(output))
if (output->type == kTfLiteInt32) {
if (kernel_type == kReference) {
if (data->requires_broadcast) {
TF_LITE_DIV(reference_ops, BroadcastDivSlow, int32_t);
} else {
TF_LITE_DIV(reference_ops, Div, int32_t);
}
} else {
if (data->requires_broadcast) {
TF_LITE_DIV(optimized_ops, BroadcastDivSlow, int32_t);
} else {
TF_LITE_DIV(optimized_ops, Div, int32_t);
}
}
} else if (output->type == kTfLiteFloat32) {
if (kernel_type == kReference) {
if (data->requires_broadcast) {
TF_LITE_DIV(reference_ops, BroadcastDivSlow, float);
} else {
TF_LITE_DIV(reference_ops, Div, float);
}
} else {
#ifdef TFLITE_KERNEL_USE_XNNPACK
size_t num_input1_dims =
static_cast<size_t>(GetTensorShape(input1).DimensionsCount());
size_t num_input2_dims =
static_cast<size_t>(GetTensorShape(input2).DimensionsCount());
if (std::max(num_input1_dims, num_input2_dims) <= XNN_MAX_TENSOR_DIMS) {
std::array<size_t, XNN_MAX_TENSOR_DIMS> input1_shape;
std::array<size_t, XNN_MAX_TENSOR_DIMS> input2_shape;
for (size_t i = 0; i < num_input1_dims; ++i) {
input1_shape[i] = GetTensorShape(input1).Dims(i);
}
for (size_t i = 0; i < num_input2_dims; ++i) {
input2_shape[i] = GetTensorShape(input2).Dims(i);
}
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
pthreadpool_t threadpool =
cpu_backend_context->get_xnnpack_threadpool();
float output_min = -std::numeric_limits<float>::infinity();
float output_max = std::numeric_limits<float>::infinity();
CalculateActivationRange(params->activation, &output_min, &output_max);
const enum xnn_status status = xnn_run_divide_nd_f32(
num_input1_dims, input1_shape.data(), num_input2_dims,
input2_shape.data(), GetTensorData<float>(input1),
GetTensorData<float>(input2), GetTensorData<float>(output),
output_min, output_max,
XNN_FLAG_YIELD_WORKERS, threadpool);
if (status == xnn_status_success) {
return;
}
TFLITE_LOG(
TFLITE_LOG_INFO,
"Failed to run xnnpack xnn_run_divide_nd_f32. Error code: %d",
status);
}
#endif
if (data->requires_broadcast) {
TF_LITE_DIV(optimized_ops, BroadcastDivSlow, float);
} else {
TF_LITE_DIV(optimized_ops, Div, float);
}
}
}
#undef TF_LITE_DIV
}
template <KernelType kernel_type>
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteDivParams* params, const OpData* data,
const TfLiteTensor* input1,
const TfLiteTensor* input2, TfLiteTensor* output) {
if (input1->type == kTfLiteUInt8 && input2->type == kTfLiteUInt8 &&
output->type == kTfLiteUInt8) {
tflite::ArithmeticParams op_params;
SetActivationParams(data->output_activation_min,
data->output_activation_max, &op_params);
op_params.input1_offset = -input1->params.zero_point;
op_params.input2_offset = -input2->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier = data->output_multiplier;
op_params.output_shift = data->output_shift;
bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(input1), GetTensorShape(input2), &op_params);
#define TF_LITE_DIV(type, opname, dtype) \
type::opname(op_params, GetTensorShape(input1), \
GetTensorData<dtype>(input1), GetTensorShape(input2), \
GetTensorData<dtype>(input2), GetTensorShape(output), \
GetTensorData<dtype>(output))
if (kernel_type == kReference) {
if (need_broadcast) {
TF_LITE_DIV(reference_ops, BroadcastDivSlow, uint8_t);
} else {
TF_LITE_DIV(reference_ops, Div, uint8_t);
}
} else {
if (need_broadcast) {
TF_LITE_DIV(optimized_ops, BroadcastDivSlow, uint8_t);
} else {
TF_LITE_DIV(optimized_ops, Div, uint8_t);
}
}
#undef TF_LITE_DIV
} else {
TF_LITE_KERNEL_LOG(
context, "Unsupported combination of input and output types in Div.");
return kTfLiteError;
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus CheckNonZero(TfLiteContext* context, const TfLiteTensor* tensor) {
const auto* data = GetTensorData<T>(tensor);
const size_t number_elements = tensor->bytes / sizeof(T);
for (size_t i = 0; i < number_elements; i++) {
TF_LITE_ENSURE(context, data[i] != 0);
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type == kTfLiteFloat32) {
EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteInt32) {
CheckNonZero<int32_t>(context, input2);
EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8) {
CheckNonZero<uint8_t>(context, input2);
TF_LITE_ENSURE_OK(
context, EvalQuantized<kernel_type>(context, node, params, data, input1,
input2, output));
} else {
TF_LITE_KERNEL_LOG(
context,
"Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DIV_REF() {
static TfLiteRegistration r = {
div::Init,
div::Free,
div::Prepare,
div::Eval<div::kReference>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_DIV_GENERIC_OPT() {
static TfLiteRegistration r = {
div::Init,
div::Free,
div::Prepare,
div::Eval<div::kGenericOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_DIV_NEON_OPT() {
static TfLiteRegistration r = {
div::Init,
div::Free,
div::Prepare,
div::Eval<div::kNeonOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpInput1Shared};
return &r;
}
TfLiteRegistration* Register_DIV() {
#ifdef USE_NEON
return Register_DIV_NEON_OPT();
#else
return Register_DIV_GENERIC_OPT();
#endif
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Div, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluActivation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Relu6Activation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluMinus1To1Activation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.TanhActivation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.SignBitActivation()
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
TEST(Div, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_DIV, xnnpack_delegate.get());
}
}
} |
915 | cpp | tensorflow/tensorflow | floor_div | tensorflow/lite/kernels/floor_div.cc | tensorflow/lite/kernels/floor_div_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
#include <cmath>
#include <functional>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
T FloorDiv(T input1, T input2) {
return std::floor(std::divides<double>()(static_cast<double>(input1),
static_cast<double>(input2)));
}
}
}
#endif
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace floor_div {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
switch (type) {
case kTfLiteFloat32:
case kTfLiteInt32:
case kTfLiteInt16:
case kTfLiteInt8:
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
const T* denominator_data = GetTensorData<T>(input2);
for (int i = 0; i < NumElements(input2); ++i) {
if (std::equal_to<T>()(denominator_data[i], 0)) {
TF_LITE_KERNEL_LOG(context, "Division by 0");
return kTfLiteError;
}
}
if (requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), denominator_data, GetTensorShape(output),
GetTensorData<T>(output), reference_ops::FloorDiv<T>);
} else {
reference_ops::BinaryFunction<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output),
reference_ops::FloorDiv<T>);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input1->type) {
case kTfLiteInt8: {
return EvalImpl<int8_t>(context, data->requires_broadcast, input1, input2,
output);
}
case kTfLiteInt16: {
return EvalImpl<int16_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_FLOOR_DIV() {
static TfLiteRegistration r = {floor_div::Init, floor_div::Free,
floor_div::Prepare, floor_div::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
template <typename T>
class FloorDivModel : public SingleOpModel {
public:
FloorDivModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_FLOOR_DIV, BuiltinOptions_FloorDivOptions,
CreateFloorDivOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
};
TEST(FloorDivModel, Simple) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValue) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDiv) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
}
TEST(FloorDivModel, SimpleFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.05, 9.09, 11.9, 3.01});
model.PopulateTensor<float>(model.input2(), {2.05, 2.03, 3.03, 4.03});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(4.0, 4.0, 3.0, 0.0));
}
TEST(FloorDivModel, NegativeValueFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {2.0, 2.3, -3.0, -4.1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5.0, -5.0, 3.0, -2.0));
}
TEST(FloorDivModel, BroadcastFloorDivFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {-3.3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4.0, 2.0, 3.0, -3.0));
}
TEST(FloorDivModel, SimpleInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValueInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDivInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1}}, {TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
}
}
} |
916 | cpp | tensorflow/tensorflow | mul | tensorflow/lite/kernels/mul.cc | tensorflow/lite/kernels/mul_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_MUL_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_MUL_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewMultiplyNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/mul.h"
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::Status GetCoordinate(const NodeShader::GenerationContext& ctx, int dim,
const std::string& default_coord,
std::string* coord) {
std::string result;
if (ctx.input_shapes[1][dim] == 1 && ctx.input_shapes[0][dim] != 1) {
result = "0";
} else if (ctx.input_shapes[0][dim] == ctx.input_shapes[1][dim]) {
result = default_coord;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Second runtime tensor dimension ", dim,
" must either match "
"first tensor's dimensions or be 1."));
}
*coord = result;
return absl::OkStatus();
}
absl::Status GenerateMultiplyRuntimeTensorCode(
const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) {
std::string x_coord, y_coord, z_coord;
RETURN_IF_ERROR(
GetCoordinate(ctx, 2, "gid.x", &x_coord));
RETURN_IF_ERROR(
GetCoordinate(ctx, 1, "gid.y", &y_coord));
RETURN_IF_ERROR(
GetCoordinate(ctx, 3, "gid.z", &z_coord));
std::string source =
absl::StrCat("vec4 input1_value = $input_data_1[", x_coord, ", ", y_coord,
", ", z_coord, "]$;");
if (ctx.input_shapes[1][3] == 1 && ctx.input_shapes[0][3] != 1) {
absl::StrAppend(
&source,
"\ninput1_value = vec4(input1_value.x, input1_value.x, input1_value.x, "
"input1_value.x);\n");
}
absl::StrAppend(
&source, "value_0 = $input_data_0[gid.x, gid.y, gid.z]$ * input1_value;");
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
absl::Status GenerateMultiplyConstantTensorCode(
const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) {
const auto& attr = std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
if (std::holds_alternative<float>(attr.param)) {
*generated_code = {
{{"scalar", std::get<float>(attr.param)}},
{},
{},
uint3(),
uint3(),
"value_0 *= $scalar$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(attr.param)) {
*generated_code = {
{},
{{"mul_buffer",
MakeReadonlyObject(
std::get<Tensor<Linear, DataType::FLOAT32>>(attr.param).data)}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
"value_0 *= $mul_buffer[gid.z]$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (std::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(attr.param)) {
std::string source;
if (ctx.input_shapes[0][1] == 1 && ctx.input_shapes[0][2] == 1 &&
ctx.input_shapes[0][3] == 1) {
source = R"(
value_0 = $input_data_0[0, 0, 0]$;
value_0 = vec4(value_0.x, value_0.x, value_0.x, value_0.x);
)";
}
auto param_shape =
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param).shape;
if (param_shape.c == 1) {
if (param_shape.h == 1 && param_shape.w == 1) {
absl::StrAppend(&source, "vec4 const_val = $hwc_buffer[0, 0, 0]$;");
} else {
absl::StrAppend(&source,
"vec4 const_val = $hwc_buffer[gid.x, gid.y, 0]$;");
}
absl::StrAppend(&source,
"const_val = vec4(const_val.x, const_val.x, const_val.x, "
"const_val.x);");
} else {
source += "vec4 const_val = $hwc_buffer[gid.x, gid.y, gid.z]$;";
}
absl::StrAppend(&source, "value_0 *= const_val;");
*generated_code = {
{},
{{"hwc_buffer",
MakeReadonlyObject(
uint3(param_shape.w, param_shape.h,
DivideRoundUp(param_shape.c, 4)),
ConvertToPHWC4(
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param)))}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
std::move(source),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
return absl::InvalidArgumentError("Unsupported Multiplication case.");
}
class Multiply : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() == 2) {
return GenerateMultiplyRuntimeTensorCode(ctx, generated_code);
} else {
return GenerateMultiplyConstantTensorCode(ctx, generated_code);
}
}
};
}
std::unique_ptr<NodeShader> NewMultiplyNodeShader() {
return std::make_unique<Multiply>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/mul.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(MulTest, ConstantTensorMatchingShape) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = input.shape;
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor_3d;
tensor_3d.shape.h = input.shape.h;
tensor_3d.shape.w = input.shape.w;
tensor_3d.shape.c = input.shape.c;
tensor_3d.id = 2;
tensor_3d.data = {-2, 2, -3, 3};
attr.param = std::move(tensor_3d);
SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-2, 4, -9, 12}));
}
TEST(MulTest, ConstantTensorSingleChannel) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = input.shape;
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor_3d;
tensor_3d.shape.h = input.shape.h;
tensor_3d.shape.w = input.shape.w;
tensor_3d.shape.c = 1;
tensor_3d.id = 2;
tensor_3d.data = {-2, 2};
attr.param = std::move(tensor_3d);
SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-2, -4, 6, 8}));
}
TEST(MulTest, DegenerateConstantTensorSingleValue) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = input.shape;
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor_3d;
tensor_3d.shape.h = 1;
tensor_3d.shape.w = 1;
tensor_3d.shape.c = 1;
tensor_3d.id = 2;
tensor_3d.data = {-2};
attr.param = std::move(tensor_3d);
SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-2, -4, -6, -8}));
}
TEST(MulTest, ConstantTensorLinear) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = input.shape;
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> tensor;
tensor.shape.v = 2;
tensor.id = 1;
tensor.data = {2, 3};
attr.param = std::move(tensor);
SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 6, 6, 12}));
}
TEST(MulTest, ConstantTensorScalar) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = input.shape;
ElementwiseAttributes attr;
attr.param = 2.f;
SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 6, 8}));
}
TEST(MulTest, RuntimeTensorMatchingShapeNonOnes) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
TensorRef<BHWC> mask;
mask.type = DataType::FLOAT32;
mask.ref = 1;
mask.shape = input.shape;
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = input.shape;
SingleOpModel model({ToString(OperationType::MUL), {}}, {input, mask},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4, -1, -2, -3, -4}));
ASSERT_TRUE(model.PopulateTensor(1, {5, 6, 7, 8, 9, 10, 11, 12}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {5, 12, 21, 32, -9, -20, -33, -48}));
}
TEST(MulTest, RuntimeTensorMatchingShapeHeightOne) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> mask;
mask.type = DataType::FLOAT32;
mask.ref = 1;
mask.shape = input.shape;
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = input.shape;
SingleOpModel model({ToString(OperationType::MUL), {}}, {input, mask},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {1, 2, 3, 4}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 4, 9, 16}));
}
TEST(MulTest, RuntimeTensorSingleChannel) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> mask;
mask.type = DataType::FLOAT32;
mask.ref = 1;
mask.shape = BHWC(1, input.shape.h, input.shape.w, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = input.shape;
SingleOpModel model({ToString(OperationType::MUL), {}}, {input, mask},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {2, 3}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 9, 12}));
}
TEST(MulTest, RuntimeTensorLinear) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> mask;
mask.type = DataType::FLOAT32;
mask.ref = 1;
mask.shape = BHWC(1, 1, 1, input.shape.c);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = input.shape;
SingleOpModel model({ToString(OperationType::MUL), {}}, {input, mask},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {1, 2}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 4, 3, 8}));
}
TEST(MulTest, RuntimeTensorScalar) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 2);
TensorRef<BHWC> mask;
mask.type = DataType::FLOAT32;
mask.ref = 1;
mask.shape = BHWC(1, 1, 1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = input.shape;
SingleOpModel model({ToString(OperationType::MUL), {}}, {input, mask},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {5}));
ASSERT_OK(model.Invoke(*NewMultiplyNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {5, 10, 15, 20}));
}
}
}
}
} |
917 | cpp | tensorflow/tensorflow | floor_mod | tensorflow/lite/kernels/floor_mod.cc | tensorflow/lite/kernels/floor_mod_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
#include <cmath>
#include <functional>
namespace tflite {
namespace reference_ops {
template <typename T>
T FloorMod(T input1, T input2) {
struct FloatMod {
float operator()(const float lhs, const float rhs) const {
return std::fmod(lhs, rhs);
}
};
using ModFunc = typename std::conditional<std::is_integral<T>::value,
std::modulus<T>, FloatMod>::type;
ModFunc mod_func;
T trunc_mod = mod_func(input1, input2);
return (trunc_mod != 0) && ((input2 < 0) != (trunc_mod < 0))
? (trunc_mod + input2)
: trunc_mod;
}
}
}
#endif
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace floor_mod {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteInt8 && type != kTfLiteInt16 && type != kTfLiteInt32 &&
type != kTfLiteFloat32 && type != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
const T* denominator_data = GetTensorData<T>(input2);
if (input2->type == kTfLiteInt8 || input2->type == kTfLiteInt16 ||
input2->type == kTfLiteInt32 || input2->type == kTfLiteInt64) {
const int num_elements = NumElements(input2);
for (int i = 0; i < num_elements; ++i) {
if (denominator_data[i] == 0) {
TF_LITE_KERNEL_LOG(context, "Division by 0");
return kTfLiteError;
}
}
}
if (requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), denominator_data, GetTensorShape(output),
GetTensorData<T>(output), reference_ops::FloorMod<T>);
} else {
reference_ops::BinaryFunction<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output),
reference_ops::FloorMod<T>);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input1->type) {
case kTfLiteInt8: {
return EvalImpl<int8_t>(context, data->requires_broadcast, input1, input2,
output);
}
case kTfLiteInt16: {
return EvalImpl<int16_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt64: {
return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_FLOOR_MOD() {
static TfLiteRegistration r = {floor_mod::Init, floor_mod::Free,
floor_mod::Prepare, floor_mod::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/floor_mod_test_common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
TEST(FloorModModel, Simple) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, NegativeValue) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, BroadcastFloorMod) {
FloorModModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
TEST(FloorModModel, Int64WithBroadcast) {
FloorModModel<int64_t> model({TensorType_INT64, {1, 2, 2, 1}},
{TensorType_INT64, {1}}, {TensorType_INT64, {}});
model.PopulateTensor<int64_t>(model.input1(), {10, -9, -11, (1LL << 34) + 9});
model.PopulateTensor<int64_t>(model.input2(), {-(1LL << 33)});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(),
ElementsAre(-8589934582, -9, -11, -8589934583));
}
TEST(FloorModModel, FloatSimple) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<float>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, FloatNegativeValue) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<float>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, FloatBroadcastFloorMod) {
FloorModModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<float>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
TEST(FloorModModel, SimpleInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(FloorModModel, NegativeValueInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, -2, -1));
}
TEST(FloorModModel, BroadcastFloorModInt16) {
FloorModModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1}}, {TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-2, 0, -2, -2));
}
}
} |
918 | cpp | tensorflow/tensorflow | kernel_util | tensorflow/lite/kernels/kernel_util.cc | tensorflow/lite/kernels/kernel_util_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
#include <stdint.h>
#include <limits>
#ifndef TF_LITE_STATIC_MEMORY
#include <string>
#endif
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#ifndef NDEBUG
#include "tensorflow/lite/kernels/op_macros.h"
#endif
namespace tflite {
const TfLiteTensor* GetInput(const TfLiteContext* context,
const TfLiteNode* node, int index);
TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, const TfLiteTensor** tensor);
TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
int index);
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
int index);
TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, TfLiteTensor** tensor);
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
const TfLiteNode* node, int index);
#ifndef TF_LITE_STATIC_MEMORY
TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
int index);
TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor);
const TfLiteTensor* GetIntermediates(TfLiteContext* context,
const TfLiteNode* node, int index);
TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor);
#endif
inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; }
inline int SizeOfDimension(const TfLiteTensor* t, int dim) {
return t->dims->data[dim];
}
inline int NumInputs(const TfLiteNode* node) {
return node->inputs == nullptr ? 0 : node->inputs->size;
}
inline int NumOutputs(const TfLiteNode* node) {
return node->outputs == nullptr ? 0 : node->outputs->size;
}
#ifndef TF_LITE_STATIC_MEMORY
inline int NumIntermediates(const TfLiteNode* node) {
return node->intermediates->size;
}
#endif
inline int64_t NumElements(const int* dims, int num_dims) {
int64_t count = 1;
for (int i = 0; i < num_dims; ++i) {
#ifndef NDEBUG
if (count <= 0) {
break;
}
TF_LITE_ASSERT(dims[i] < std::numeric_limits<int>::max() / count);
#endif
count *= dims[i];
}
return count;
}
inline int64_t NumElements(const TfLiteIntArray* dims) {
return NumElements(dims->data, dims->size);
}
inline int64_t NumElements(const TfLiteTensor* t) {
return NumElements(t->dims);
}
inline bool IsConstantTensor(const TfLiteTensor* tensor) {
return tensor->allocation_type == kTfLiteMmapRo;
}
inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) {
return IsConstantTensor(tensor) ||
(tensor->allocation_type == kTfLitePersistentRo);
}
inline bool IsDynamicTensor(const TfLiteTensor* tensor) {
return tensor->allocation_type == kTfLiteDynamic;
}
#ifndef TF_LITE_STATIC_MEMORY
inline void SetTensorToDynamic(TfLiteTensor* tensor) {
if (tensor->allocation_type != kTfLiteDynamic) {
TfLiteTensorDataFree(tensor);
tensor->allocation_type = kTfLiteDynamic;
}
}
inline void SetTensorToPersistentRo(TfLiteTensor* tensor) {
if (tensor->allocation_type != kTfLitePersistentRo) {
TfLiteTensorDataFree(tensor);
tensor->allocation_type = kTfLitePersistentRo;
}
}
#endif
inline bool IsHybridOp(const TfLiteTensor* input, const TfLiteTensor* weight) {
return ((weight->type == kTfLiteUInt8 || weight->type == kTfLiteInt8) &&
input->type == kTfLiteFloat32);
}
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift);
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift,
int num_channels);
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias,
TfLiteTensor* output,
double* multiplier);
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
TfLiteTensor* output,
double* multiplier);
TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
TfLiteFusedActivation activation,
TfLiteTensor* output,
int32_t* act_min,
int32_t* act_max);
template <typename T>
void CalculateActivationRange(TfLiteFusedActivation activation,
T* activation_min, T* activation_max) {
if (activation == kTfLiteActRelu) {
*activation_min = 0;
*activation_max = std::numeric_limits<T>::max();
} else if (activation == kTfLiteActRelu6) {
*activation_min = 0;
*activation_max = 6;
} else if (activation == kTfLiteActReluN1To1) {
*activation_min = -1;
*activation_max = 1;
} else {
*activation_min = std::numeric_limits<T>::lowest();
*activation_max = std::numeric_limits<T>::max();
}
}
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2);
#if !defined(TF_LITE_STATIC_MEMORY)
TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteIntArray** output_shape);
std::string GetShapeDebugString(const TfLiteIntArray* shape);
#endif
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteIntArray** output_shape);
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
const TfLiteTensor* input3,
TfLiteIntArray** output_shape);
int TfLiteTypeGetSize(TfLiteType type);
bool IsMobilePlatform();
bool HasUnspecifiedDimension(const TfLiteTensor* tensor);
}
#endif
#include "tensorflow/lite/kernels/kernel_util.h"
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <complex>
#include <limits>
#include <memory>
#ifndef TF_LITE_STATIC_MEMORY
#include <string>
#include "tensorflow/lite/array.h"
#endif
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#if defined(__APPLE__)
#include "TargetConditionals.h"
#endif
namespace tflite {
namespace {
inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
int tensor_index) {
if (context->tensors != nullptr) {
return &context->tensors[tensor_index];
} else {
return context->GetTensor(context, tensor_index);
}
}
inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
int index, int max_size,
const int* tensor_indices,
int* tensor_index) {
if (index < 0 || index >= max_size) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Invalid tensor index %d (not in [0, %d))\n", index,
max_size);
return kTfLiteError;
}
if (tensor_indices[index] == kTfLiteOptionalTensor) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Tensor at index %d was optional but was expected\n",
index);
return kTfLiteError;
}
*tensor_index = tensor_indices[index];
return kTfLiteOk;
}
inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
int max_size, const int* tensor_indices) {
if (index >= 0 && index < max_size) {
const int tensor_index = tensor_indices[index];
if (tensor_index != kTfLiteOptionalTensor) {
return tensor_index;
}
}
return -1;
}
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->inputs->size, node->inputs->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
const TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(
context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
node->inputs->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
}
const TfLiteTensor* GetInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
return GetMutableInput(context, node, index);
}
TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, const TfLiteTensor** tensor) {
return GetMutableInputSafe(context, node, index, tensor);
}
TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
int index) {
TfLiteTensor* tensor = GetMutableInput(context, node, index);
if (tensor == nullptr) return nullptr;
return tensor->is_variable ? tensor : nullptr;
}
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->outputs->size, node->outputs->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(
context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
node->outputs->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
const TfLiteNode* node, int index) {
return GetInput(context, node, index);
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->temporaries->size, node->temporaries->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
context, index, node->temporaries->size,
node->temporaries->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
const TfLiteTensor* GetIntermediates(TfLiteContext* context,
const TfLiteNode* node, int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->intermediates->size, node->intermediates->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
context, index, node->intermediates->size,
node->intermediates->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
#endif
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
return PopulateConvolutionQuantizationParams(
context, input, filter, bias, output, activation, multiplier, shift,
output_activation_min, output_activation_max, per_channel_multiplier,
per_channel_shift, affine_quantization->scale->size);
}
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift,
int num_channels) {
TF_LITE_ENSURE_EQ(context, input->quantization.type,
kTfLiteAffineQuantization);
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
const bool is_per_channel = affine_quantization->scale->size > 1;
if (is_per_channel) {
TF_LITE_ENSURE(context,
input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
TF_LITE_ENSURE(context,
filter->type == kTfLiteInt8 || filter->type == kTfLiteInt4);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
TF_LITE_ENSURE_EQ(
context, num_channels,
filter->dims->data[affine_quantization->quantized_dimension]);
}
const float input_scale = input->params.scale;
const float output_scale = output->params.scale;
const float* filter_scales = affine_quantization->scale->data;
for (int i = 0; i < num_channels; ++i) {
const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
const double filter_scale = static_cast<double>(scale);
const double effective_output_scale = static_cast<double>(input_scale) *
filter_scale /
static_cast<double>(output_scale);
int32_t significand;
int channel_shift;
QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
per_channel_multiplier[i] = significand;
per_channel_shift[i] = channel_shift;
}
if (input->type == kTfLiteUInt8) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, input, filter, bias, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, multiplier, &exponent);
*shift = -exponent;
}
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt16) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, activation, output, output_activation_min,
output_activation_max));
}
return kTfLiteOk;
}
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias,
TfLiteTensor* output,
double* multiplier) {
const double input_product_scale = static_cast<double>(input->params.scale) *
static_cast<double>(filter->params.scale);
if (bias) {
const double bias_scale = static_cast<double>(bias->params.scale);
const double scale_diff = std::abs(input_product_scale - bias_scale);
const double output_scale = static_cast<double>(output->params.scale);
TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
}
return GetQuantizedConvolutionMultipler(context, input, filter, output,
multiplier);
}
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
TfLiteTensor* output,
double* multiplier) {
const double input_product_scale =
static_cast<double>(input->params.scale * filter->params.scale);
TF_LITE_ENSURE(context, input_product_scale >= 0);
*multiplier = input_product_scale / static_cast<double>(output->params.scale);
return kTfLiteOk;
}
namespace {
inline TfLiteStatus Quantize(TfLiteContext* context, float scale,
int32_t zero_point, float f, int32_t& q) {
const float tmp = TfLiteRound(f / scale);
const bool no_integer_overflow_from_quantization =
(tmp >= static_cast<float>(std::numeric_limits<int32_t>::min()) &&
tmp <= static_cast<float>(std::numeric_limits<int32_t>::max()));
TF_LITE_ENSURE(context, no_integer_overflow_from_quantization);
q = zero_point + static_cast<int32_t>(tmp);
return kTfLiteOk;
}
TfLiteStatus CalculateActivationRangeQuantizedImpl(
TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin,
int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) {
const auto scale = output->params.scale;
const auto zero_point = output->params.zero_point;
int32_t tmp_q;
if (activation == kTfLiteActRelu) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 0.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
*act_max = qmax;
} else if (activation == kTfLiteActRelu6) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 0.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 6.0, tmp_q));
*act_max = std::min(qmax, tmp_q);
} else if (activation == kTfLiteActReluN1To1) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, -1.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 1.0, tmp_q));
*act_max = std::min(qmax, tmp_q);
} else {
*act_min = qmin;
*act_max = qmax;
}
return kTfLiteOk;
}
}
TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
TfLiteFusedActivation activation,
TfLiteTensor* output,
int32_t* act_min,
int32_t* act_max) {
int32_t qmin = 0;
int32_t qmax = 0;
if (output->type == kTfLiteUInt8) {
qmin = std::numeric_limits<uint8_t>::min();
qmax = std::numeric_limits<uint8_t>::max();
} else if (output->type == kTfLiteInt8) {
qmin = std::numeric_limits<int8_t>::min();
qmax = std::numeric_limits<int8_t>::max();
} else if (output->type == kTfLiteInt16) {
qmin = std::numeric_limits<int16_t>::min();
qmax = std::numeric_limits<int16_t>::max();
} else {
TF_LITE_ENSURE(context, false);
}
return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
output, act_min, act_max);
}
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
return TfLiteIntArrayEqual(input1->dims, input2->dims);
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteIntArray** output_shape) {
if (NumDimensions(input) != 1) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Invalid %dD input tensor (must be a 1D tensor).",
NumDimensions(input));
return kTfLiteError;
}
const int output_dims = SizeOfDimension(input, 0);
IntArrayUniquePtr shape(TfLiteIntArrayCreate(output_dims));
for (int i = 0; i < output_dims; i++) {
shape->data[i] = input->data.i32[i];
}
*output_shape = shape.release();
return kTfLiteOk;
}
std::string GetShapeDebugString(const TfLiteIntArray* shape) {
std::string str;
for (int d = 0; d < shape->size; ++d) {
if (str.empty())
str = "[" + std::to_string(shape->data[d]);
else | #include "tensorflow/lite/kernels/kernel_util.h"
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
struct TestContext : public TfLiteContext {
string error;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
TestContext* c = static_cast<TestContext*>(context);
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
c->error = temp_buffer;
}
class TestWithTfLiteContext : public ::testing::Test {
public:
TestWithTfLiteContext() { context_.ReportError = ReportError; }
TensorUniquePtr BuildTfLiteTensorForTest(std::initializer_list<int> dims) {
return BuildTfLiteTensor(kTfLiteInt32, dims, kTfLiteDynamic);
}
protected:
TestContext context_;
};
class HaveSameShapeTest : public TestWithTfLiteContext {};
TEST_F(HaveSameShapeTest, NullPointerIsSameShape) {
TensorUniquePtr t1 = BuildTfLiteTensor();
t1->dims = nullptr;
TensorUniquePtr t2 = BuildTfLiteTensor();
t2->dims = nullptr;
EXPECT_TRUE(HaveSameShapes(t1.get(), t2.get()));
}
TEST_F(HaveSameShapeTest, NotSameShapeFalse) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({2, 3});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3});
EXPECT_FALSE(HaveSameShapes(t1.get(), t2.get()));
}
TEST_F(HaveSameShapeTest, EmptyShapeEqualTrue) {
TensorUniquePtr t1 = BuildTfLiteTensor();
TensorUniquePtr t2 = BuildTfLiteTensor();
EXPECT_TRUE(HaveSameShapes(t1.get(), t2.get()));
}
class BroadcastShapeTest : public TestWithTfLiteContext {};
TEST_F(BroadcastShapeTest, IncompatibleDimNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* output = nullptr;
EXPECT_NE(kTfLiteOk,
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &output));
EXPECT_EQ(output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,2] and [1,3], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, IncompatibleDimWithZeroNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 0});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* output = nullptr;
EXPECT_NE(kTfLiteOk,
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &output));
EXPECT_EQ(output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,0] and [1,3], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, BroadCastSecondDimension) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* raw_output;
auto status =
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &raw_output);
ASSERT_EQ(kTfLiteOk, status);
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 3}));
}
TEST_F(BroadcastShapeTest, ScalarAnd2dBroadcastsTo2d) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2}));
}
TEST_F(BroadcastShapeTest, DifferentRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 2});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 1, 2}));
}
TEST_F(BroadcastShapeTest, ZeroDimDifferentRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 0, 2});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST_F(BroadcastShapeTest, ZeroDimSameRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 0, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST_F(BroadcastShapeTest, IncompatibleDimOnThreeTensorsNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 4});
TfLiteIntArray* raw_output = nullptr;
EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
EXPECT_EQ(raw_output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,2], [1,3] and [1,4], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, IncompatibleDimWithZeroOnThreeTensorsNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 0});
TfLiteIntArray* raw_output = nullptr;
EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
EXPECT_EQ(raw_output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,1], [1,3] and [1,0], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, ThreeTensorsBroadcastToLarger2ndDim) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 3}));
}
TEST_F(BroadcastShapeTest, TwoScalarsBroadcastTo2d) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2}));
}
TEST_F(BroadcastShapeTest, DifferentSizesOnThreeTensorsBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({3, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 3, 2}));
}
TEST_F(BroadcastShapeTest,
DifferentSizesOnThreeTensors4dBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({3, 4});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 2, 1, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2, 3, 4}));
}
TEST_F(BroadcastShapeTest, ZeroOnThreeTensorsBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({0, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST(GetShapeDebugStringTest, GetShapeDebugString) {
IntArrayUniquePtr dims0 = BuildTfLiteArray({});
EXPECT_EQ("[]", GetShapeDebugString(dims0.get()));
IntArrayUniquePtr dims1 = BuildTfLiteArray({1});
dims1->data[0] = 1;
EXPECT_EQ("[1]", GetShapeDebugString(dims1.get()));
IntArrayUniquePtr dims2 = BuildTfLiteArray({2, 3});
dims2->data[0] = 2;
dims2->data[1] = 3;
EXPECT_EQ("[2,3]", GetShapeDebugString(dims2.get()));
IntArrayUniquePtr dims3 = BuildTfLiteArray({4, 5, 6});
dims3->data[0] = 4;
dims3->data[1] = 5;
dims3->data[2] = 6;
EXPECT_EQ("[4,5,6]", GetShapeDebugString(dims3.get()));
}
class QuantizationParamsTest : public TestWithTfLiteContext {};
TEST_F(QuantizationParamsTest, PerChannelConvolution) {
TensorUniquePtr input = BuildTfLiteTensor();
input->type = kTfLiteInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {0.5, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 0.5;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
TensorUniquePtr filter = BuildTfLiteTensor();
filter->type = kTfLiteInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {0.25, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(3);
filter_params->scale->data[0] = 0.25;
filter_params->scale->data[1] = 0.125;
filter_params->scale->data[2] = 0.25;
filter_params->zero_point = TfLiteIntArrayCreate(3);
filter_params->zero_point->data[0] = 0;
filter_params->zero_point->data[1] = 0;
filter_params->zero_point->data[2] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
TensorUniquePtr bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {0.125, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = 0.125;
bias_params->scale->data[1] = 0.0625;
bias_params->scale->data[2] = 0.125;
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
TensorUniquePtr output = BuildTfLiteTensor();
output->type = kTfLiteInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {0.5, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 0.5;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int32_t> per_channel_shift(3);
auto status = PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data());
EXPECT_EQ(kTfLiteOk, status);
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-1, -2, -1));
}
TEST_F(QuantizationParamsTest, CheckAndPopulateShift) {
TensorUniquePtr input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {0.5, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 0.5;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
TensorUniquePtr filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {0.25, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
filter_params->scale->data[0] = 0.25;
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
TensorUniquePtr bias = BuildTfLiteTensor();
bias->type = kTfLiteUInt8;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {0.125, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = 0.125;
bias_params->scale->data[1] = 0.0625;
bias_params->scale->data[2] = 0.125;
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
TensorUniquePtr output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {0.5, 128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 0.5;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = 128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-1, -1, -1));
EXPECT_EQ(shift, 1);
EXPECT_EQ(multiplier, 1073741824);
}
#ifndef __APPLE__
TEST_F(QuantizationParamsTest, CheckAndPopulateZeroValue) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(3);
filter_params->scale->data[0] = std::ldexp(1.0f, -31);
filter_params->scale->data[1] = std::ldexp(1.0f, -32);
filter_params->scale->data[2] = std::ldexp(1.0f, -33);
filter_params->zero_point = TfLiteIntArrayCreate(3);
filter_params->zero_point->data[0] = 0;
filter_params->zero_point->data[1] = 0;
filter_params->zero_point->data[2] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = std::ldexp(1.0f, -31);
bias_params->scale->data[1] = std::ldexp(1.0f, -32);
bias_params->scale->data[2] = std::ldexp(1.0f, -33);
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier, ElementsAre(1073741824, 1073741824, 0));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -31, 0));
}
#endif
TEST_F(QuantizationParamsTest, CheckAndPopulateUint8) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
int32_t two_pow_neg_31 = 0x30000000;
filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(1);
bias_params->scale->data[0] = 4.6566129e-10;
bias_params->zero_point = TfLiteIntArrayCreate(1);
bias_params->zero_point->data[0] = 11;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
}
TEST_F(QuantizationParamsTest, CheckAndPopulateWithoutBias) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
int32_t two_pow_neg_31 = 0x30000000;
filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), nullptr, output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
}
TEST_F(QuantizationParamsTest, ActivationRangeQuantizedOverflow) {
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1e-10, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t act_min, act_max;
ASSERT_EQ(kTfLiteOk,
CalculateActivationRangeQuantized(
&context_, kTfLiteActRelu, output.get(), &act_min, &act_max));
ASSERT_NE(kTfLiteOk,
CalculateActivationRangeQuantized(
&context_, kTfLiteActRelu6, output.get(), &act_min, &act_max));
EXPECT_TRUE(absl::StrContains(
context_.error, "no_integer_overflow_from_quantization was not true"));
ASSERT_NE(kTfLiteOk, CalculateActivationRangeQuantized(
&context_, kTfLiteActReluN1To1, output.get(),
&act_min, &act_max));
EXPECT_TRUE(absl::StrContains(
context_.error, "no_integer_overflow_from_quantization was not true"));
}
TEST_F(QuantizationParamsTest, IsMobilePlatform) {
#if defined(__ANDROID__)
EXPECT_TRUE(IsMobilePlatform());
#elif defined(__linux__)
EXPECT_FALSE(IsMobilePlatform());
#elif defined(_WIN32)
EXPECT_FALSE(IsMobilePlatform());
#endif
}
TEST(HasUnspecifiedDimensions, ReturnsTrueIfADimIsMinusOne) {
auto tensor = BuildTfLiteTensor(kTfLiteInt32, {1, 1, 3}, kTfLiteDynamic);
tensor->dims_signature = ConvertVectorToTfLiteIntArray({1, -1, 3});
EXPECT_TRUE(HasUnspecifiedDimension(tensor.get()));
}
TEST(HasUnspecifiedDimensions, ReturnsFalseIfAllPostiveDims) {
auto tensor = BuildTfLiteTensor(kTfLiteInt32, {1, 1, 3}, kTfLiteDynamic);
tensor->dims_signature = ConvertVectorToTfLiteIntArray({1, 1, 3});
EXPECT_FALSE(HasUnspecifiedDimension(tensor.get()));
} |
919 | cpp | tensorflow/tensorflow | strided_slice | tensorflow/lite/kernels/strided_slice.cc | tensorflow/lite/kernels/strided_slice_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_STRIDED_SLICE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_STRIDED_SLICE_H_
#include <string>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
class StridedSlice : public GPUOperation {
public:
StridedSlice(const OperationDef& definition, const SliceAttributes& attr);
absl::Status BindArguments(ArgumentsBinder* args) override;
int3 GetGridSize() const override;
StridedSlice(StridedSlice&& operation);
StridedSlice& operator=(StridedSlice&& operation);
StridedSlice(const StridedSlice&) = delete;
StridedSlice& operator=(const StridedSlice&) = delete;
private:
std::string GetStridedSliceCode(const OperationDef& op_def, bool alignedx4);
SliceAttributes attributes_;
};
StridedSlice CreateStridedSlice(const OperationDef& definition,
const SliceAttributes& attr);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/strided_slice.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
bool Is4Aligned(const SliceAttributes& attr) {
return attr.strides.c == 1 && attr.starts.c % 4 == 0;
}
int4 GetOffset(const SliceAttributes& attr, int src_width, int src_height,
int src_channels, int src_batch) {
int4 offset;
if (attr.strides.w > 0) {
offset.x = attr.starts.w;
} else {
if (attr.ends.w > 0) {
offset.x = attr.ends.w;
} else {
offset.x = src_width + attr.ends.w;
}
}
if (attr.strides.h > 0) {
offset.y = attr.starts.h;
} else {
if (attr.ends.h > 0) {
offset.y = attr.ends.h;
} else {
offset.y = src_height + attr.ends.h;
}
}
if (attr.strides.c > 0) {
offset.z = attr.starts.c;
} else {
if (attr.ends.c > 0) {
offset.z = attr.ends.c;
} else {
offset.z = src_channels + attr.ends.c;
}
}
if (Is4Aligned(attr)) {
offset.z /= 4;
}
if (attr.strides.b > 0) {
offset.w = attr.starts.b;
} else {
if (attr.ends.b > 0) {
offset.w = attr.ends.b;
} else {
offset.w = src_batch + attr.ends.b;
}
}
return offset;
}
}
StridedSlice::StridedSlice(const OperationDef& definition,
const SliceAttributes& attr)
: GPUOperation(definition), attributes_(attr) {
work_group_size_ = int3(8, 4, 1);
code_ = GetStridedSliceCode(definition_, Is4Aligned(attributes_));
}
StridedSlice::StridedSlice(StridedSlice&& operation)
: GPUOperation(std::move(operation)), attributes_(operation.attributes_) {}
StridedSlice& StridedSlice::operator=(StridedSlice&& operation) {
if (this != &operation) {
attributes_ = operation.attributes_;
GPUOperation::operator=(std::move(operation));
}
return *this;
}
std::string StridedSlice::GetStridedSliceCode(const OperationDef& op_def,
bool alignedx4) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddInt("offset_x");
args_.AddInt("offset_y");
args_.AddInt("offset_z");
args_.AddInt("offset_b");
args_.AddInt("stride_x");
args_.AddInt("stride_y");
args_.AddInt("stride_z");
args_.AddInt("stride_b");
const std::string batch_id =
op_def.dst_tensors[0].HasAxis(Axis::BATCH) ? "B" : "0";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int S = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"S >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " int s_x = X * args.stride_x + args.offset_x;\n";
c += " int s_y = Y * args.stride_y + args.offset_y;\n";
if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int s_b = " + batch_id + " * args.stride_b + args.offset_b;\n";
c += " args.src_tensor.SetBatchRef(s_b);\n";
}
if (alignedx4) {
c += " int s_z = S + args.offset_z;\n";
c += " args.src_tensor::type result = args.src_tensor.Read(s_x, s_y, "
"s_z);\n";
} else {
c += " args.src_tensor::type result;\n";
const std::string postfixes[] = {"x", "y", "z", "w"};
for (int i = 0; i < 4; ++i) {
c += " {\n";
const std::string channel = "(S * 4 + " + std::to_string(i) + ")";
c += " int s_ch = min(" + channel +
" * args.stride_z + args.offset_z, args.src_tensor.Channels() - "
"1);\n";
c += " args.src_tensor.ReadPerChannel(result." + postfixes[i] +
", s_x, s_y, s_ch);\n";
c += " }\n";
}
}
c += " args.dst_tensor.Write(result, X, Y, S);\n";
c += "}\n";
return c;
}
absl::Status StridedSlice::BindArguments(ArgumentsBinder* args) {
int4 offset = GetOffset(attributes_, src_[0]->Width(), src_[0]->Height(),
src_[0]->Channels(), src_[0]->Batch());
RETURN_IF_ERROR(args->SetInt("offset_x", offset.x));
RETURN_IF_ERROR(args->SetInt("offset_y", offset.y));
RETURN_IF_ERROR(args->SetInt("offset_z", offset.z));
RETURN_IF_ERROR(args->SetInt("offset_b", offset.w));
RETURN_IF_ERROR(args->SetInt("stride_x", attributes_.strides.w));
RETURN_IF_ERROR(args->SetInt("stride_y", attributes_.strides.h));
RETURN_IF_ERROR(args->SetInt("stride_z", attributes_.strides.c));
RETURN_IF_ERROR(args->SetInt("stride_b", attributes_.strides.b));
return absl::OkStatus();
}
int3 StridedSlice::GetGridSize() const {
const int grid_x = dst_[0]->Width() * dst_[0]->Batch();
const int grid_y = dst_[0]->Height();
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
StridedSlice CreateStridedSlice(const OperationDef& definition,
const SliceAttributes& attr) {
return StridedSlice(definition, attr);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/strided_slice_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, StridedSlice) {
auto status = StridedSliceTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} |
920 | cpp | tensorflow/tensorflow | cumsum | tensorflow/lite/kernels/cumsum.cc | tensorflow/lite/kernels/cumsum_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CUMSUM_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CUMSUM_H_
#include <string>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
class Cumsum : public GPUOperation {
public:
Cumsum() = default;
explicit Cumsum(const OperationDef& definition, Axis axis)
: GPUOperation(definition), axis_(axis) {}
int3 GetGridSize() const override;
Cumsum(Cumsum&& operation);
Cumsum& operator=(Cumsum&& operation);
Cumsum(const Cumsum&) = delete;
Cumsum& operator=(const Cumsum&) = delete;
void GetCumsumCode(const OperationDef& op_def);
private:
Axis axis_;
};
Cumsum CreateCumsum(const OperationDef& definition,
const CumsumAttributes& attr);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/cumsum.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
namespace tflite {
namespace gpu {
void Cumsum::GetCumsumCode(const OperationDef& op_def) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::map<Axis, std::string> task_sizes = {
{Axis::WIDTH, "args.src_tensor.Width()"},
{Axis::HEIGHT, "args.src_tensor.Height()"},
{Axis::DEPTH, "args.src_tensor.Depth()"},
{Axis::CHANNELS, "args.src_tensor.Slices()"},
{Axis::BATCH, "args.src_tensor.Batch()"},
};
std::string limit = task_sizes[axis_];
task_sizes[axis_] = "1";
std::map<Axis, std::string> index_name = {
{Axis::WIDTH, "X"}, {Axis::HEIGHT, "Y"}, {Axis::DEPTH, "Z"},
{Axis::CHANNELS, "S"}, {Axis::BATCH, "B"},
};
std::string indexes = "X, Y";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.dst_tensors[0].HasAxis(Axis::DEPTH)) {
indexes += ", Z";
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % " + task_sizes[Axis::HEIGHT] + ";\n";
c += " int D = linear_id / " + task_sizes[Axis::HEIGHT] + ";\n";
c += " if (D >= " + task_sizes[Axis::DEPTH] + ") return;\n";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= " + task_sizes[Axis::HEIGHT] + ") return;\n";
}
indexes += ", S";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
indexes += ", B";
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / " + task_sizes[Axis::BATCH] + ";\n";
c += " int B = linear_id % " + task_sizes[Axis::BATCH] + ";\n";
c += " if (X >= " + task_sizes[Axis::WIDTH] + ") return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= " + task_sizes[Axis::WIDTH] + ") return;\n";
}
c += " int S = GLOBAL_ID_2;\n";
c += " if (S >= " + task_sizes[Axis::CHANNELS] + ") return;\n";
c += " args.src_tensor::type res = args.src_tensor::zero_value;\n";
c += " for (; " + index_name[axis_] + " < " + limit + "; " +
index_name[axis_] + "++) {\n";
c += " args.src_tensor::type curr = args.src_tensor.Read(" + indexes +
");\n";
if (axis_ == Axis::CHANNELS) {
c += " res.x = res.w + curr.x;\n";
c += " res.y = res.x + curr.y;\n";
c += " res.z = res.y + curr.z;\n";
c += " res.w = res.z + curr.w;\n";
} else {
c += " res += curr;\n";
}
c += " args.dst_tensor.Write(res, " + indexes + ");\n";
c += " }\n";
c += "}\n";
code_ = c;
}
int3 Cumsum::GetGridSize() const {
const int width = axis_ == Axis::WIDTH ? 1 : src_[0]->Width();
const int height = axis_ == Axis::HEIGHT ? 1 : src_[0]->Height();
const int depth = axis_ == Axis::DEPTH ? 1 : src_[0]->Depth();
const int batch = axis_ == Axis::BATCH ? 1 : src_[0]->Batch();
const int slices = axis_ == Axis::CHANNELS ? 1 : src_[0]->Slices();
const int grid_x = width * batch;
const int grid_y = height * depth;
const int grid_z = slices;
return int3(grid_x, grid_y, grid_z);
}
Cumsum::Cumsum(Cumsum&& operation)
: GPUOperation(std::move(operation)), axis_(operation.axis_) {}
Cumsum& Cumsum::operator=(Cumsum&& operation) {
if (this != &operation) {
axis_ = operation.axis_;
GPUOperation::operator=(std::move(operation));
}
return *this;
}
Cumsum CreateCumsum(const OperationDef& definition,
const CumsumAttributes& attr) {
Cumsum op(definition, attr.axis);
op.GetCumsumCode(definition);
return op;
}
}
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/cumsum_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, CumsumHWCTest) {
absl::Status status = CumsumHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CumsumBHWCTest) {
absl::Status status = CumsumBHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} |
921 | cpp | tensorflow/tensorflow | reshape | tensorflow/lite/kernels/reshape.cc | third_party/xla/xla/tests/reshape_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESHAPE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESHAPE_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewReshapeNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/reshape.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Reshape : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes[0][1] * ctx.input_shapes[0][2] *
ctx.input_shapes[0][3] !=
ctx.output_shapes[0][1] * ctx.output_shapes[0][2] *
ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Number of elements in input & output tensors don't match.");
}
const auto& attr = std::any_cast<const ReshapeAttributes&>(ctx.op_attr);
if (attr.new_shape.h != ctx.output_shapes[0][1] ||
attr.new_shape.w != ctx.output_shapes[0][2] ||
attr.new_shape.c != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Dimensions for output does not match new_shape attribute");
}
std::string code = R"(
int input_ch_w = $input_channels$ * $input_data_0_w$;
int output_ch_w = $output_channels$ * $output_data_0_w$;
for (int i = 0; i < 4; ++i) {
int dst_channel = gid.z * 4 + i;
if (dst_channel >= $output_channels$) {
continue;
}
int p = dst_channel + $output_channels$ * gid.x + output_ch_w * gid.y;
int src_y = p / input_ch_w;
int src_x = (p % input_ch_w) / $input_channels$;
int src_z = (p % input_ch_w) % $input_channels$;
int src_layer = src_z / 4;
int src_channel = src_z % 4;
value_0[i] = $input_data_0[src_x, src_y, src_layer]$[src_channel];
}
)";
*generated_code = {
{
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"input_channels", static_cast<int>(ctx.input_shapes[0][3])},
{"output_data_0_w", static_cast<int>(ctx.output_shapes[0][2])},
{"output_channels", static_cast<int>(ctx.output_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewReshapeNodeShader() {
return std::make_unique<Reshape>();
}
}
}
} | #include <memory>
#include <numeric>
#include <random>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/array4d.h"
#include "xla/client/global_data.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/reference_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class ReshapeTest : public ::testing::WithParamInterface<bool>,
public ClientLibraryTestBase {
public:
ReshapeTest() { set_use_bfloat16(GetParam()); }
ErrorSpec zero_error_spec_{0.0};
};
XLA_TEST_P(ReshapeTest, CollapseTrivial1x1) {
XlaBuilder builder(TestName());
Array2D<float> input_array(1, 1);
input_array.Fill(1.0f);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(
0, input_literal, "parameter", &builder, ¶meter));
Collapse(parameter, {0, 1});
auto expected_literal = LiteralUtil::CreateR1<float>({1.0f});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, CollapseTrivialR1EmptyDims) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateR1<float>({1.0f});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(
0, input_literal, "parameter", &builder, ¶meter));
Collapse(parameter, {});
auto expected_literal = LiteralUtil::CreateR1<float>({1.0f});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, CollapseTrivialR1OnlyDim) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateR1<float>({1.0f});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(
0, input_literal, "parameter", &builder, ¶meter));
Collapse(parameter, {0});
auto expected_literal = LiteralUtil::CreateR1<float>({1.0f});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, SingleElementArrayToScalar) {
XlaBuilder builder(TestName());
Array2D<float> input_array(1, 1);
input_array.Fill(1.0f);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(
0, input_literal, "parameter", &builder, ¶meter));
auto reshape = Reshape(parameter, {0, 1},
{});
auto new_shape = builder.GetShape(reshape).value();
auto expected_literal = LiteralUtil::CreateR0<float>(1.0f);
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ScalarToSingleElementArray) {
XlaBuilder builder(TestName());
Literal param0_literal = LiteralUtil::CreateR0<float>(1.0f);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, param0_literal, "param0",
&builder, ¶meter));
auto a = Neg(parameter);
Reshape(a, {}, {1});
auto expected_literal = LiteralUtil::CreateR1<float>({-1.0f});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Trivial0x3) {
XlaBuilder builder(TestName());
Array2D<float> input_array(0, 3);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Collapse(parameter, {0, 1});
auto expected_literal = LiteralUtil::CreateR1<float>({});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Trivial0x3WithParameter) {
XlaBuilder builder(TestName());
Literal param0_literal =
LiteralUtil::CreateR2FromArray2D<float>(Array2D<float>(0, 3));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, param0_literal, "param0",
&builder, ¶meter));
Collapse(parameter, {0, 1});
auto expected_literal = LiteralUtil::CreateR1<float>({});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Trivial3x0) {
XlaBuilder builder(TestName());
Array2D<float> input_array(3, 0);
auto input_literal = LiteralUtil::CreateR2FromArray2D(input_array);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Collapse(parameter, {0, 1});
auto expected_literal = LiteralUtil::CreateR1<float>({});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Trivial1x3) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Collapse(parameter, {0, 1});
auto expected_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Trivial3x1) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateR2<float>({{1.0f}, {2.0f}, {3.0f}});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Collapse(parameter, {0, 1});
auto expected_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, R1ToR2_0_To_2x0) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateR1<float>({});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0},
{2, 0});
auto expected_literal = LiteralUtil::CreateR2<float>({{}, {}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, R1ToR2_6_To_2x3) {
XlaBuilder builder(TestName());
auto input_literal =
LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0},
{2, 3});
auto expected_literal =
LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Reshape0x2To2x0) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(0, 2));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1},
{2, 0});
auto expected_literal = LiteralUtil::CreateR2<float>({{}, {}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ReshapeRowToCol) {
XlaBuilder builder(TestName());
auto simple = MakeLinspaceArray2D(1.0f, 3.0f, 1, 3);
auto input_literal = LiteralUtil::CreateFromArray(*simple);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1},
{3, 1});
auto expected = ReferenceUtil::TransposeArray2D(*simple);
auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, TransposeAsReshape) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {1, 0},
{3, 4});
auto expected = ReferenceUtil::TransposeArray2D(*a4x3);
auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Transpose0x4) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(0, 4));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Transpose(parameter, {1, 0});
auto expected_literal = LiteralUtil::CreateR2<float>({{}, {}, {}, {}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, Transpose4x3) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Transpose(parameter, {1, 0});
auto expected = ReferenceUtil::TransposeArray2D(*a4x3);
auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ReshapeSplitNoShuffleZeroElements) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(6, 0));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1},
{2, 3, 0, 0});
auto expected_literal =
LiteralUtil::CreateFromArray(Array4D<float>(2, 3, 0, 0));
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ReshapeR4ToR2ZeroElements) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(Array4D<float>(2, 3, 4, 0));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1, 2, 3},
{24, 0});
auto expected_literal = LiteralUtil::CreateFromArray(Array2D<float>(24, 0));
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ReshapeSplitNoShuffle) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1},
{2, 6});
auto expected = MakeLinspaceArray2D(1.0f, 12.0f, 2, 6);
auto expected_literal = LiteralUtil::CreateFromArray(*expected);
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ReshapeSplitAndShuffleZeroElements) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(Array2D<float>(0, 6));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {1, 0},
{3, 0});
auto expected_literal = LiteralUtil::CreateFromArray(Array2D<float>(3, 0));
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ReshapeSplitAndShuffle) {
XlaBuilder builder(TestName());
auto a4x3 = MakeLinspaceArray2D(1.0f, 12.0f, 4, 3);
auto input_literal = LiteralUtil::CreateFromArray(*a4x3);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {1, 0},
{2, 6});
Array2D<float> expected({{1.0f, 4.0f, 7.0f, 10.0f, 2.0f, 5.0f},
{8.0f, 11.0f, 3.0f, 6.0f, 9.0f, 12.0f}});
auto expected_literal = LiteralUtil::CreateFromArray(expected);
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
static Array3D<float> ArrayForDocR3Tests() {
return Array3D<float>({{{10, 11, 12}, {15, 16, 17}},
{{20, 21, 22}, {25, 26, 27}},
{{30, 31, 32}, {35, 36, 37}},
{{40, 41, 42}, {45, 46, 47}}});
}
XLA_TEST_P(ReshapeTest, DocR3_R1_Collapse_012) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1, 2},
{24});
auto expected_literal = LiteralUtil::CreateR1<float>(
{10, 11, 12, 15, 16, 17, 20, 21, 22, 25, 26, 27,
30, 31, 32, 35, 36, 37, 40, 41, 42, 45, 46, 47});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, DocR3_R2_Collapse_012_Refine_83) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1, 2},
{8, 3});
auto expected_literal = LiteralUtil::CreateR2<float>({{10, 11, 12},
{15, 16, 17},
{20, 21, 22},
{25, 26, 27},
{30, 31, 32},
{35, 36, 37},
{40, 41, 42},
{45, 46, 47}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, DocR3_R1_Collapse_120) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {1, 2, 0},
{24});
auto expected_literal = LiteralUtil::CreateR1<float>(
{10, 20, 30, 40, 11, 21, 31, 41, 12, 22, 32, 42,
15, 25, 35, 45, 16, 26, 36, 46, 17, 27, 37, 47});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, DocR3_R2_Collapse_120_Refine_83) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {1, 2, 0},
{8, 3});
auto expected_literal = LiteralUtil::CreateR2<float>({{10, 20, 30},
{40, 11, 21},
{31, 41, 12},
{22, 32, 42},
{15, 25, 35},
{45, 16, 26},
{36, 46, 17},
{27, 37, 47}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, DocR3_R3_Collapse_120_Refine_262) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateFromArray(ArrayForDocR3Tests());
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {1, 2, 0},
{2, 6, 2});
auto expected_literal = LiteralUtil::CreateR3<float>(
{{{10, 20}, {30, 40}, {11, 21}, {31, 41}, {12, 22}, {32, 42}},
{{15, 25}, {35, 45}, {16, 26}, {36, 46}, {17, 27}, {37, 47}}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, FullyConnectedCollapse) {
XlaBuilder builder(TestName());
Array4D<float> t2x2x2x3(2, 2, 2, 3);
auto filler2x3 = MakeLinspaceArray2D(1.0f, 6.0f, 2, 3);
t2x2x2x3.FillWithYX(*filler2x3);
auto input_literal = LiteralUtil::CreateFromArray(t2x2x2x3);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Collapse(parameter, {1, 2, 3});
auto expected_literal = LiteralUtil::CreateR2<float>(
{{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, FullyConnectedCollapseDesugared) {
XlaBuilder builder(TestName());
Array4D<float> t(2, 1, 2, 2);
t(0, 0, 0, 0) = 0;
t(0, 0, 0, 1) = 1;
t(0, 0, 1, 0) = 2;
t(0, 0, 1, 1) = 3;
t(1, 0, 0, 0) = 4;
t(1, 0, 0, 1) = 5;
t(1, 0, 1, 0) = 6;
t(1, 0, 1, 1) = 7;
auto input_literal = LiteralUtil::CreateFromArray(t);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1, 2, 3},
{2, 4});
auto expected_literal =
LiteralUtil::CreateR2<float>({{0, 1, 2, 3}, {4, 5, 6, 7}});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, ToScalar) {
for (int rank = 0; rank < 8; ++rank) {
XlaBuilder b(TestName());
std::vector<int64_t> ones(rank, 1);
std::vector<int64_t> dimensions(rank);
std::iota(dimensions.begin(), dimensions.end(), 0);
Literal input_literal(ShapeUtil::MakeShape(F32, ones));
std::vector<int64_t> zeros(rank, 0);
input_literal.Set<float>(zeros, 83.0f);
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&b, ¶meter));
Reshape(parameter, dimensions, {});
auto expected_literal = LiteralUtil::CreateR0<float>(83.0f);
ComputeAndCompareLiteral(&b, expected_literal, {input.get()},
zero_error_spec_);
}
}
XLA_TEST_P(ReshapeTest, BadDimensions) {
XlaBuilder b(TestName());
auto input_literal = LiteralUtil::CreateR1<float>({1.0f});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&b, ¶meter));
Reshape(parameter, {}, {});
EXPECT_THAT(
ExecuteToString(&b, {}),
::testing::HasSubstr("not a permutation of the operand dimensions"));
}
XLA_TEST_P(ReshapeTest, BadNewSizes) {
XlaBuilder b(TestName());
auto input_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&b, ¶meter));
Reshape(parameter, {1}, {});
EXPECT_THAT(ExecuteToString(&b, {}),
::testing::HasSubstr("mismatched element counts"));
}
XLA_TEST_P(ReshapeTest, R4Dim0MinorLayoutToR2Dim0MajorLayout) {
XlaBuilder builder(TestName());
auto input_literal = LiteralUtil::CreateR4FromArray4DWithLayout(
Array4D<float>{
{
{
{0, 1},
{2, 3},
},
{
{100, 101},
{102, 103},
},
},
{
{
{222, 333},
{444, 555},
},
{
{666, 777},
{888, 999},
},
},
},
LayoutUtil::MakeLayout({0, 1, 2, 3}));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1, 2, 3}, {2, 8});
Array2D<float> expected_array({
{0, 1, 2, 3, 100, 101, 102, 103},
{222, 333, 444, 555, 666, 777, 888, 999},
});
XlaComputation computation = builder.Build().value();
ExecutionOptions execution_options = execution_options_;
*execution_options.mutable_shape_with_output_layout() =
ShapeUtil::MakeShapeWithDenseLayout(use_bfloat16() ? BF16 : F32, {2, 8},
{1, 0})
.ToProto();
Literal actual =
client_
->ExecuteAndTransfer(computation, {input.get()}, &execution_options)
.value();
Literal expected = LiteralUtil::CreateR2FromArray2D<float>(expected_array);
if (use_bfloat16()) {
expected = LiteralUtil::ConvertF32ToBF16(expected);
}
EXPECT_TRUE(LiteralTestUtil::Equal(expected, actual));
}
XLA_TEST_P(ReshapeTest, R2ToR4_3x8_To_3x2x1x4) {
XlaBuilder builder(TestName());
Literal input_literal = LiteralUtil::CreateR2<float>({
{0, 1, 2, 3, 4, 5, 6, 7},
{100, 101, 102, 103, 104, 105, 106, 107},
{200, 201, 202, 203, 204, 205, 206, 207},
});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {0, 1}, {3, 2, 1, 4});
auto expected_literal = LiteralUtil::CreateR4<float>({
{{{0, 1, 2, 3}},
{{4, 5, 6, 7}}},
{{{100, 101, 102, 103}},
{{104, 105, 106, 107}}},
{{{200, 201, 202, 203}},
{{204, 205, 206, 207}}}
});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, R2ToR4_3x8_To_3x2x1x4_Dimensions_10) {
XlaBuilder builder(TestName());
Literal input_literal = LiteralUtil::CreateR2<float>({
{0, 1, 2, 3, 4, 5, 6, 7},
{100, 101, 102, 103, 104, 105, 106, 107},
{200, 201, 202, 203, 204, 205, 206, 207},
});
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(
auto input, CreateParameterAndTransferLiteral(0, input_literal, "input",
&builder, ¶meter));
Reshape(parameter, {1, 0}, {3, 2, 1, 4});
auto expected_literal = LiteralUtil::CreateR4<float>({
{{{0, 100, 200, 1}},
{{101, 201, 2, 102}}},
{{{202, 3, 103, 203}},
{{4, 104, 204, 5}}},
{{{105, 205, 6, 106}},
{{206, 7, 107, 207}}}
});
ComputeAndCompareLiteral(&builder, expected_literal, {input.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, R4ToR2_2x1x1x1_To_2x1) {
XlaBuilder builder(TestName());
std::mt19937 rng;
std::uniform_real_distribution<float> distribution;
Array4D<float> input(2, 1, 1, 1);
input.Each([&rng, &distribution](absl::Span<const int64_t> ,
float* cell) { *cell = distribution(rng); });
Literal input_literal = LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(auto input_data,
CreateParameterAndTransferLiteral(
0, input_literal, "input", &builder, ¶meter));
Reshape(parameter, {0, 1, 2, 3}, {2, 1});
Literal expected = LiteralUtil::ReshapeSlice({2, 1}, {1, 0}, input_literal);
ComputeAndCompareLiteral(&builder, expected, {input_data.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, R4ToR2_2x1x4x1_To_4x2) {
XlaBuilder builder(TestName());
std::mt19937 rng;
std::uniform_real_distribution<float> distribution;
Array4D<float> input(2, 1, 4, 1);
input.Each([&rng, &distribution](absl::Span<const int64_t> ,
float* cell) { *cell = distribution(rng); });
Literal input_literal = LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(auto input_data,
CreateParameterAndTransferLiteral(
0, input_literal, "input", &builder, ¶meter));
Reshape(parameter, {0, 1, 2, 3}, {4, 2});
Literal expected = LiteralUtil::ReshapeSlice({4, 2}, {1, 0}, input_literal);
ComputeAndCompareLiteral(&builder, expected, {input_data.get()},
zero_error_spec_);
}
XLA_TEST_P(ReshapeTest, R4ToR2_5x10x2x3_To_5x60_Dimensions_0213) {
XlaBuilder builder(TestName());
std::mt19937 rng;
std::uniform_real_distribution<float> distribution;
Array4D<float> input(5, 10, 2, 3);
input.Each([&rng, &distribution](absl::Span<const int64_t> ,
float* cell) { *cell = distribution(rng); });
Literal input_literal = LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
XlaOp parameter;
TF_ASSERT_OK_AND_ASSIGN(auto input_data,
CreateParameterAndTransferLiteral(
0, input_literal, "input", &builder, ¶meter));
Reshape(parameter, {0, 2, 1, 3},
{5, 60});
Array2D<float> expected_array(5, 60);
input.Each([&](absl::Span<const int64_t> indices, float* cell) {
expected_array(indices[0], indices[2] * 30 + indices[1] * 3 + indices[3]) =
*cell;
});
auto expected = LiteralUtil::CreateR2FromArray2D(expected_array);
ComputeAndCompareLiteral(&builder, expected, {input_data.get()}, |
922 | cpp | tensorflow/tensorflow | round | tensorflow/lite/kernels/round.cc | tensorflow/lite/kernels/round_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline float RoundToNearest(float value) {
auto floor_val = std::floor(value);
auto diff = value - floor_val;
if ((diff < 0.5f) ||
((diff == 0.5f) && (static_cast<int>(floor_val) % 2 == 0))) {
return floor_val;
} else {
return floor_val = floor_val + 1.0f;
}
}
inline void Round(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = RoundToNearest(input_data[i]);
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/round.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace round {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
output->type = input->type;
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
optimized_ops::Round(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_ROUND() {
static TfLiteRegistration r = {nullptr,
nullptr, round::Prepare, round::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Round, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
TEST(Round, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
TEST(Round, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
TEST(Round, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_ROUND,
xnnpack_delegate.get());
}
TEST(Round, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ROUND, xnnpack_delegate.get());
}
}
} |
923 | cpp | tensorflow/tensorflow | batch_to_space_nd | tensorflow/lite/kernels/batch_to_space_nd.cc | tensorflow/lite/kernels/internal/batch_to_space_nd_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline RuntimeShape ExtendShapeBatchToSpace(const RuntimeShape& shape) {
if (shape.DimensionsCount() == 4) {
return shape;
}
RuntimeShape new_shape(4, 1);
new_shape.SetDim(0, shape.Dims(0));
new_shape.SetDim(1, shape.Dims(1));
new_shape.SetDim(3, shape.Dims(2));
return new_shape;
}
template <typename T>
inline void BatchToSpaceND(const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const int32_t* block_shape_data,
const RuntimeShape& unextended_input3_shape,
const int32_t* crops_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("BatchToSpaceND");
TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(),
unextended_output_shape.DimensionsCount());
const RuntimeShape input1_shape =
ExtendShapeBatchToSpace(unextended_input1_shape);
const RuntimeShape output_shape =
ExtendShapeBatchToSpace(unextended_output_shape);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch_size = output_shape.Dims(0);
const int depth = input1_shape.Dims(3);
const int input_width = input1_shape.Dims(2);
const int input_height = input1_shape.Dims(1);
const int input_batch_size = input1_shape.Dims(0);
const int block_shape_height = block_shape_data[0];
const int block_shape_width =
unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1;
const int crops_top = crops_data[0];
const int crops_left =
unextended_input1_shape.DimensionsCount() == 4 ? crops_data[2] : 0;
for (int in_batch = 0; in_batch < input_batch_size; ++in_batch) {
const int out_batch = in_batch % output_batch_size;
const int spatial_offset = in_batch / output_batch_size;
for (int in_h = 0; in_h < input_height; ++in_h) {
const int out_h = in_h * block_shape_height +
spatial_offset / block_shape_width - crops_top;
if (out_h < 0 || out_h >= output_height) {
continue;
}
for (int in_w = 0; in_w < input_width; ++in_w) {
const int out_w = in_w * block_shape_width +
spatial_offset % block_shape_width - crops_left;
if (out_w < 0 || out_w >= output_width) {
continue;
}
T* out = output_data + Offset(output_shape, out_batch, out_h, out_w, 0);
const T* in =
input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0);
memcpy(out, in, depth * sizeof(T));
}
}
}
}
}
}
#endif
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace batch_to_space_nd {
enum KernelType {
kReference,
kGenericOptimized,
};
struct BatchToSpaceNDContext {
BatchToSpaceNDContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
block_shape = GetInput(context, node, 1);
crops = GetInput(context, node, 2);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* block_shape;
const TfLiteTensor* crops;
TfLiteTensor* output;
};
const int kInputMinDimensionNum = 3;
const int kInputMaxDimensionNum = 4;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
BatchToSpaceNDContext* op_context) {
TfLiteIntArray* input_size = op_context->input->dims;
const int* block_shape = GetTensorData<int32>(op_context->block_shape);
const int* crops = GetTensorData<int32>(op_context->crops);
int spatial_dims_num = input_size->size - 2;
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), 1);
TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->crops), 2);
TF_LITE_ENSURE_EQ(context, op_context->crops->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, op_context->crops->dims->data[1], 2);
for (int i = 0; i < spatial_dims_num * 2; ++i) {
TF_LITE_ENSURE(context, crops[i] >= 0);
}
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input_size);
int output_batch_size = input_size->data[0];
for (int dim = 0; dim < spatial_dims_num; ++dim) {
TF_LITE_ENSURE(context, block_shape[dim] != 0);
TF_LITE_ENSURE_EQ(context, output_batch_size % block_shape[dim], 0);
output_batch_size = output_batch_size / block_shape[dim];
output_size->data[dim + 1] = input_size->data[dim + 1] * block_shape[dim] -
crops[dim * 2] - crops[dim * 2 + 1];
}
output_size->data[0] = output_batch_size;
output_size->data[input_size->size - 1] =
input_size->data[input_size->size - 1];
return context->ResizeTensor(context, op_context->output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
BatchToSpaceNDContext op_context(context, node);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) >= kInputMinDimensionNum);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) <= kInputMaxDimensionNum);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
if (op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
op_context.output->params.scale);
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
op_context.output->params.zero_point);
}
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(op_context.block_shape) ||
!IsConstantOrPersistentTensor(op_context.crops)) {
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BatchToSpaceNDContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_BATCH_TO_SPACE_ND(type, scalar) \
type::BatchToSpaceND(GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
GetTensorShape(op_context.block_shape), \
GetTensorData<int32_t>(op_context.block_shape), \
GetTensorShape(op_context.crops), \
GetTensorData<int32_t>(op_context.crops), \
GetTensorShape(op_context.output), \
GetTensorData<scalar>(op_context.output))
switch (op_context.input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, float);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, float);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, uint8_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, uint8_t);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int8_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int8_t);
}
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int16_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int16_t);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int32_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int32_t);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int64_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int64_t);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by BatchToSpace.",
op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_BATCH_TO_SPACE_ND
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, batch_to_space_nd::Prepare,
batch_to_space_nd::Eval<batch_to_space_nd::kReference>};
return &r;
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, batch_to_space_nd::Prepare,
batch_to_space_nd::Eval<batch_to_space_nd::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND() {
return Register_BATCH_TO_SPACE_ND_GENERIC_OPT();
}
}
}
} | #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include <gtest/gtest.h>
namespace tflite {
namespace {
std::pair<int, int> GetIndexRange(int spatial_index_dim, int block_shape_dim,
int input_dim, int output_dim) {
int index_start = 0;
int index_end = 0;
optimized_ops::GetIndexRange(spatial_index_dim, block_shape_dim, input_dim,
output_dim, &index_start, &index_end);
return {index_start, index_end};
}
TEST(BatchToSpaceNDTest, TestIndexRange) {
EXPECT_EQ(GetIndexRange(3, 6,
1, 6),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(2, 6,
5, 30),
std::make_pair(0, 5));
EXPECT_EQ(GetIndexRange(0, 2,
3, 4),
std::make_pair(0, 2));
EXPECT_EQ(GetIndexRange(-2, 2,
3, 4),
std::make_pair(1, 3));
EXPECT_EQ(GetIndexRange(-30, 5,
7, 5),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(-26, 5,
7, 5),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(0, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(4, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(3, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(0, 5,
7, 1),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(-30, 5,
7, 1),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(1, 5,
7, 1),
std::make_pair(0, 0));
EXPECT_EQ(GetIndexRange(-29, 5,
7, 1),
std::make_pair(6, 6));
}
}
} |
924 | cpp | tensorflow/tensorflow | concatenation | tensorflow/lite/kernels/concatenation.cc | tensorflow/lite/kernels/concatenation_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
#include <algorithm>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename Scalar>
inline void Concatenation(const ConcatenationParams& params,
const RuntimeShape* const* input_shapes,
const Scalar* const* input_data,
const RuntimeShape& output_shape,
Scalar* output_data) {
int axis = params.axis;
int inputs_count = params.inputs_count;
const int concat_dimensions = output_shape.DimensionsCount();
TFLITE_DCHECK_LT(axis, concat_dimensions);
int64_t concat_size = 0;
for (int i = 0; i < inputs_count; i++) {
TFLITE_DCHECK_EQ(input_shapes[i]->DimensionsCount(), concat_dimensions);
for (int j = 0; j < concat_dimensions; j++) {
if (j != axis) {
MatchingDim(*input_shapes[i], j, output_shape, j);
}
}
concat_size += input_shapes[i]->Dims(axis);
}
TFLITE_DCHECK_EQ(concat_size, output_shape.Dims(axis));
int64_t outer_size = 1;
for (int i = 0; i < axis; ++i) {
outer_size *= output_shape.Dims(i);
}
int64_t base_inner_size = 1;
for (int i = axis + 1; i < concat_dimensions; ++i) {
base_inner_size *= output_shape.Dims(i);
}
Scalar* output_ptr = output_data;
for (int k = 0; k < outer_size; k++) {
for (int i = 0; i < inputs_count; ++i) {
const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size;
const Scalar* input_ptr = input_data[i] + k * copy_size;
memcpy(output_ptr, input_ptr, copy_size * sizeof(Scalar));
output_ptr += copy_size;
}
}
}
inline void ConcatenationWithScaling(const ConcatenationParams& params,
const RuntimeShape* const* input_shapes,
const uint8_t* const* input_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
int axis = params.axis;
const int32_t* input_zeropoint = params.input_zeropoint;
const float* input_scale = params.input_scale;
int inputs_count = params.inputs_count;
const int32_t output_zeropoint = params.output_zeropoint;
const float output_scale = params.output_scale;
const int concat_dimensions = output_shape.DimensionsCount();
TFLITE_DCHECK_LT(axis, concat_dimensions);
int64_t concat_size = 0;
for (int i = 0; i < inputs_count; i++) {
TFLITE_DCHECK_EQ(input_shapes[i]->DimensionsCount(), concat_dimensions);
for (int j = 0; j < concat_dimensions; j++) {
if (j != axis) {
MatchingDim(*input_shapes[i], j, output_shape, j);
}
}
concat_size += input_shapes[i]->Dims(axis);
}
TFLITE_DCHECK_EQ(concat_size, output_shape.Dims(axis));
int64_t outer_size = 1;
for (int i = 0; i < axis; ++i) {
outer_size *= output_shape.Dims(i);
}
int64_t base_inner_size = 1;
for (int i = axis + 1; i < concat_dimensions; ++i) {
base_inner_size *= output_shape.Dims(i);
}
const float inverse_output_scale = 1.f / output_scale;
uint8_t* output_ptr = output_data;
for (int k = 0; k < outer_size; k++) {
for (int i = 0; i < inputs_count; ++i) {
const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size;
const uint8_t* input_ptr = input_data[i] + k * copy_size;
if (input_zeropoint[i] == output_zeropoint &&
input_scale[i] == output_scale) {
memcpy(output_ptr, input_ptr, copy_size);
} else {
const float scale = input_scale[i] * inverse_output_scale;
const float bias = -input_zeropoint[i] * scale;
for (int j = 0; j < copy_size; ++j) {
const int32_t value = static_cast<int32_t>(tflite::TfLiteRound(
input_ptr[j] * scale + bias)) +
output_zeropoint;
output_ptr[j] = static_cast<uint8_t>(
std::max<int32_t>(std::min<int32_t>(255, value), 0));
}
}
output_ptr += copy_size;
}
}
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/reference/concatenation.h"
#include <stdint.h>
#include <cstddef>
#include <cstring>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace concatenation {
enum KernelType {
kReference,
kGenericOptimized,
};
template <KernelType kernel_type>
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, int axis,
TfLiteTensor* output) {
#define TF_LITE_CONCATENATION(scalar) \
{ \
VectorOfTensors<scalar> all_inputs(*context, *node->inputs); \
tflite::ConcatenationParams op_params; \
op_params.axis = axis; \
op_params.inputs_count = node->inputs->size; \
if (kernel_type == kReference) { \
reference_ops::Concatenation(op_params, all_inputs.shapes(), \
all_inputs.data(), GetTensorShape(output), \
GetTensorData<scalar>(output)); \
} else { \
optimized_ops::Concatenation(op_params, all_inputs.shapes(), \
all_inputs.data(), GetTensorShape(output), \
GetTensorData<scalar>(output)); \
} \
}
#define TF_LITE_CONCATENATION_QUANTIZED() \
{ \
VectorOfQuantizedTensors all_inputs(*context, *node->inputs); \
tflite::ConcatenationParams op_params; \
op_params.axis = axis; \
op_params.input_zeropoint = all_inputs.zero_point(); \
op_params.input_scale = all_inputs.scale(); \
op_params.inputs_count = node->inputs->size; \
op_params.output_zeropoint = output->params.zero_point; \
op_params.output_scale = output->params.scale; \
if (kernel_type == kReference) { \
reference_ops::ConcatenationWithScaling( \
op_params, all_inputs.shapes(), all_inputs.data(), \
GetTensorShape(output), GetTensorData<uint8>(output)); \
} else { \
optimized_ops::ConcatenationWithScaling( \
op_params, all_inputs.shapes(), all_inputs.data(), \
GetTensorShape(output), GetTensorData<uint8>(output)); \
} \
}
switch (output->type) {
case kTfLiteFloat32:
TF_LITE_CONCATENATION(float);
break;
case kTfLiteInt32:
TF_LITE_CONCATENATION(int32);
break;
case kTfLiteUInt32:
TF_LITE_CONCATENATION(uint32_t);
break;
case kTfLiteUInt8:
TF_LITE_CONCATENATION_QUANTIZED();
break;
case kTfLiteInt8:
TF_LITE_CONCATENATION(int8_t);
break;
case kTfLiteInt64:
TF_LITE_CONCATENATION(int64_t);
break;
case kTfLiteInt16:
TF_LITE_CONCATENATION(int16_t);
break;
case kTfLiteBool:
TF_LITE_CONCATENATION(bool);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported currently.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
#undef TF_LITE_CONCATENATION_QUANTIZED
#undef TF_LITE_CONCATENATION
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data);
int axis = params->axis;
int num_inputs = node->inputs->size;
const TfLiteTensor* t0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &t0));
TfLiteType input_type = t0->type;
if (axis < 0) axis += t0->dims->size;
TF_LITE_ENSURE(context, axis >= 0);
TF_LITE_ENSURE(context,
axis < t0->dims->size || (t0->dims->size == 0 && axis == 0));
TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone);
TF_LITE_ENSURE(context,
input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 ||
input_type == kTfLiteInt8 || input_type == kTfLiteInt16 ||
input_type == kTfLiteInt32 || input_type == kTfLiteInt64 ||
input_type == kTfLiteBool || input_type == kTfLiteUInt32);
bool all_inputs_at_prepare = true;
for (int i = 0; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
if (!IsConstantOrPersistentTensor(t)) {
all_inputs_at_prepare = false;
break;
}
}
int sum_axis = t0->dims->size > 0 ? t0->dims->data[axis] : 1;
if (all_inputs_at_prepare && t0->dims->size == 0 && axis == 0) {
for (int i = 1; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
TF_LITE_ENSURE_EQ(context, t->dims->size, t0->dims->size);
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* output_size = TfLiteIntArrayCreate(1);
output_size->data[0] = num_inputs;
SetTensorToPersistentRo(output);
context->ResizeTensor(context, output, output_size);
size_t input_type_size;
TF_LITE_ENSURE_STATUS(GetSizeOfType(context, t0->type, &input_type_size));
void* o_data = output->data.data;
for (int i = 0; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
const void* i_data = t->data.data;
memcpy(o_data, i_data, input_type_size);
o_data = (void*)((uintptr_t)o_data + input_type_size);
}
return kTfLiteOk;
} else {
for (int i = 1; i < num_inputs; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
TF_LITE_ENSURE_EQ(context, t->dims->size, t0->dims->size);
TF_LITE_ENSURE_EQ(context, t->type, input_type);
for (int d = 0; d < t0->dims->size; ++d) {
if (d == axis) {
TF_LITE_ENSURE(context, t->dims->data[axis] >= 0);
TF_LITE_ENSURE(context,
t->dims->data[axis] <=
std::numeric_limits<int>::max() - sum_axis);
sum_axis += t->dims->data[axis];
} else {
TF_LITE_ENSURE_EQ(context, t->dims->data[d], t0->dims->data[d]);
}
}
}
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(t0->dims->size);
for (int d = 0; d < t0->dims->size; ++d) {
output_size->data[d] = (d == axis) ? sum_axis : t0->dims->data[d];
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type);
if (input_type == kTfLiteInt8) {
VectorOfTensors<int8_t> all_inputs(*context, *node->inputs);
for (int i = 0; i < node->inputs->size; ++i) {
const TfLiteTensor* t;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &t));
TF_LITE_ENSURE_EQ(context, t->params.scale, output->params.scale);
TF_LITE_ENSURE_EQ(context, t->params.zero_point,
output->params.zero_point);
}
}
if (input_type == kTfLiteInt16) {
for (int i = 0; i < node->inputs->size; ++i) {
const TfLiteTensor* t = GetInput(context, node, i);
TF_LITE_ENSURE_EQ(context, t->params.zero_point, 0);
}
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
if (all_inputs_at_prepare) {
SetTensorToPersistentRo(output);
context->ResizeTensor(context, output, output_size);
return EvalImpl<kReference>(context, node, axis, output);
}
return context->ResizeTensor(context, output, output_size);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data);
int axis = params->axis;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
if (IsConstantOrPersistentTensor(output)) {
return kTfLiteOk;
}
if (axis < 0) axis += output->dims->size;
return EvalImpl<kernel_type>(context, node, axis, output);
}
#undef TF_LITE_MACRO_DISPATCH
}
TfLiteRegistration* Register_CONCATENATION_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, concatenation::Prepare,
concatenation::Eval<concatenation::kReference>};
return &r;
}
TfLiteRegistration* Register_CONCATENATION_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, concatenation::Prepare,
concatenation::Eval<concatenation::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_CONCATENATION() {
return Register_CONCATENATION_GENERIC_OPT();
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/concatenation_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Concatenation, 1D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_2_inputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 1D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
const std::vector<int32_t> shape3({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_of_3) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 1D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
const std::vector<int32_t> shape3({shape_rng()});
const std::vector<int32_t> shape4({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_of_4) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 1D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
const std::vector<int32_t> shape1({shape_rng()});
const std::vector<int32_t> shape2({shape_rng()});
const std::vector<int32_t> shape3({shape_rng()});
const std::vector<int32_t> shape4({shape_rng()});
const std::vector<int32_t> shape5({shape_rng()});
for (int i = -1; i < 1; i++) {
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 2D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -2; i < 2; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape5 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 3D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -3; i < 3; i++) {
const std::vector<int32_t> shape1({shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape5 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Concatenation, 4D_of_5) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
for (int i = -4; i < 4; i++) {
const std::vector<int32_t> shape1(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
auto shape2 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape3 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape4 = SameShapeDifferentAxis(shape1, i, shape_rng());
auto shape5 = SameShapeDifferentAxis(shape1, i, shape_rng());
ConcatenationTester()
.InputShapes({shape1, shape2, shape3, shape4, shape5})
.Axis(i)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
}
} |
925 | cpp | tensorflow/tensorflow | slice | third_party/xla/xla/service/memory_space_assignment/slice.cc | third_party/xla/xla/service/memory_space_assignment/slice_test.cc | #ifndef XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_SLICE_H_
#define XLA_SERVICE_MEMORY_SPACE_ASSIGNMENT_SLICE_H_
#include <cstdint>
#include <ostream>
#include <string>
#include <tuple>
#include <type_traits>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/shape.h"
namespace xla::memory_space_assignment {
inline constexpr char kConcatBitcastCustomCall[] = "ConcatBitcast";
struct SliceParam {
std::string ToString() const;
bool operator==(const SliceParam& other) const;
int64_t start_inclusive;
int64_t end_exclusive;
};
struct SliceProposal {
std::string ToString() const;
friend std::ostream& operator<<(std::ostream& os,
const SliceProposal& proposal);
std::tuple<const Shape&, const std::vector<SliceParam>&, int64_t> ToTuple()
const;
bool operator==(const SliceProposal& other) const;
Shape slice_shape;
std::vector<SliceParam> slice_params;
int64_t slice_size;
};
using SliceProposalCollection = std::vector<SliceProposal>;
using SliceProposalFunction =
std::function<absl::StatusOr<SliceProposalCollection>(
const Shape& shape, const SlicedPrefetchOptions& options)>;
struct SliceDecision {
std::string ToString() const;
bool operator==(const SliceDecision& other) const;
HeapSimulator::Chunk chunk;
int64_t exclusive_start_time;
SliceProposal sizing;
float copy_resource_consumed;
};
bool IsUniformSliceSizingEnabled(const SlicedPrefetchOptions& options);
class SlicedPrefetchStartTimePicker {
public:
using ElapsedTimeFn = std::add_pointer<float(
int64_t exclusive_start_time, int64_t exclusive_end_time) const>::type;
using SameComputationParentFn =
std::add_pointer<bool(int64_t lhs_time, int64_t rhs_time) const>::type;
static std::vector<int64_t> Pick(
int64_t num_slices, int64_t exclusive_prefetch_start_time,
int64_t prefetch_end_time, absl::AnyInvocable<ElapsedTimeFn> elapsed_fn,
absl::AnyInvocable<SameComputationParentFn> has_same_parent_fn);
};
}
#endif
#include "xla/service/memory_space_assignment/slice.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <ostream>
#include <string>
#include <tuple>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/time_utils.h"
#include "xla/shape.h"
namespace xla::memory_space_assignment {
std::tuple<const HeapSimulator::Chunk&, int64_t, const SliceProposal&, float>
SliceDecisionToTuple(const SliceDecision& decision) {
return std::make_tuple(
std::ref(decision.chunk), decision.exclusive_start_time,
std::ref(decision.sizing), decision.copy_resource_consumed);
}
std::string SliceDecision::ToString() const {
return absl::StrCat("{ chunk: ", chunk.ToString(),
", (exclusive) start_time: ", exclusive_start_time,
", sizing: ", sizing.ToString(),
", copy_resource_consumed: ", copy_resource_consumed,
" }");
}
bool SliceDecision::operator==(const SliceDecision& other) const {
return SliceDecisionToTuple(*this) == SliceDecisionToTuple(other);
}
std::string SliceProposal::ToString() const {
return absl::StrCat(
"{ slice_shape: ", slice_shape.ToString(true), ", slice_params: { ",
absl::StrJoin(slice_params, ", ",
[](std::string* out, const SliceParam& param) {
absl::StrAppend(out, param.ToString());
}),
" }, slice_size: ", slice_size, " }");
}
std::ostream& operator<<(std::ostream& os, const SliceProposal& proposal) {
os << proposal.ToString();
return os;
}
std::tuple<const Shape&, const std::vector<SliceParam>&, int64_t>
SliceProposal::ToTuple() const {
return std::make_tuple(std::ref(slice_shape), std::ref(slice_params),
slice_size);
}
bool SliceProposal::operator==(const SliceProposal& other) const {
return ToTuple() == other.ToTuple();
}
std::string SliceParam::ToString() const {
return absl::StrCat("[", start_inclusive, ",", end_exclusive, ")");
}
bool SliceParam::operator==(const SliceParam& other) const {
return start_inclusive == other.start_inclusive &&
end_exclusive == other.end_exclusive;
}
bool IsUniformSliceSizingEnabled(const SlicedPrefetchOptions& options) {
return options.max_slices() > 0 && options.preferred_slice_size() > 0;
}
std::vector<int64_t> SlicedPrefetchStartTimePicker::Pick(
int64_t num_slices, int64_t exclusive_prefetch_start_time,
int64_t prefetch_end_time, absl::AnyInvocable<ElapsedTimeFn> elapsed_fn,
absl::AnyInvocable<SameComputationParentFn> has_same_parent_fn) {
CHECK_LE(exclusive_prefetch_start_time, prefetch_end_time);
VLOG(5) << "Picking slice start times. num_slices = " << num_slices
<< "; exclusive_prefetch_start_time = "
<< exclusive_prefetch_start_time
<< "; prefetch_end_time = " << prefetch_end_time;
if (exclusive_prefetch_start_time >= prefetch_end_time - 2 ||
num_slices == 1) {
return std::vector<int64_t>(num_slices, exclusive_prefetch_start_time);
}
float total_elapsed =
elapsed_fn(exclusive_prefetch_start_time, prefetch_end_time);
if (total_elapsed <= 0.0) {
return std::vector<int64_t>(num_slices, exclusive_prefetch_start_time);
}
std::vector<int64_t> start_times;
start_times.reserve(num_slices);
start_times.push_back(exclusive_prefetch_start_time);
int64_t last_valid_candidate = exclusive_prefetch_start_time;
int64_t candidate = exclusive_prefetch_start_time;
while (candidate < prefetch_end_time - 1 && start_times.size() < num_slices) {
float target_elapsed = total_elapsed *
static_cast<float>(num_slices - start_times.size()) /
static_cast<float>(num_slices);
float elapsed = elapsed_fn(candidate, prefetch_end_time);
if (elapsed < target_elapsed) {
start_times.push_back(last_valid_candidate);
continue;
}
bool updating_candidate_impacts_elapsed =
last_valid_candidate != candidate &&
elapsed_fn(last_valid_candidate,
ExclusiveToInclusiveStartTime(candidate)) > 0.0;
if (has_same_parent_fn(std::max<int64_t>(0, exclusive_prefetch_start_time),
std::max<int64_t>(0, candidate)) &&
updating_candidate_impacts_elapsed) {
last_valid_candidate = candidate;
}
++candidate;
}
while (start_times.size() < num_slices) {
start_times.push_back(last_valid_candidate);
}
return start_times;
}
} | #include <numeric>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/reference_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class SliceTest : public ClientLibraryTestBase {};
TEST_F(SliceTest, Slice3x3x3_To_3x3x1_F32) {
Array3D<float> values(3, 3, 3);
values.FillIota(0);
XlaBuilder builder(TestName());
auto original = ConstantR3FromArray3D<float>(&builder, values);
Slice(original, {0, 0, 0}, {3, 3, 1}, {1, 1, 1});
Array3D<float> expected{
{{0.0}, {3.0}, {6.0}}, {{9.0}, {12.0}, {15.0}}, {{18.0}, {21.0}, {24.0}}};
ComputeAndCompareR3<float>(&builder, expected, {}, ErrorSpec(0.000001));
}
TEST_F(SliceTest, Slice3x3x3_To_3x1x3_F32) {
Array3D<float> values(3, 3, 3);
values.FillIota(0);
XlaBuilder builder(TestName());
auto original = ConstantR3FromArray3D<float>(&builder, values);
Slice(original, {0, 0, 0}, {3, 1, 3}, {1, 1, 1});
Array3D<float> expected{
{{0.0, 1.0, 2.0}}, {{9.0, 10.0, 11.0}}, {{18.0, 19.0, 20.0}}};
ComputeAndCompareR3<float>(&builder, expected, {}, ErrorSpec(0.000001));
}
TEST_F(SliceTest, Slice3x3x3_To_1x3x3_F32) {
Array3D<float> values(3, 3, 3);
values.FillIota(0);
XlaBuilder builder(TestName());
auto original = ConstantR3FromArray3D<float>(&builder, values);
Slice(original, {0, 0, 0}, {1, 3, 3}, {1, 1, 1});
Array3D<float> expected{
{{{0.0, 1.0, 2.0}, {3.0, 4.0, 5.0}, {6.0, 7.0, 8.0}}}};
ComputeAndCompareR3<float>(&builder, expected, {}, ErrorSpec(0.000001));
}
XLA_TEST_F(SliceTest, Slice0x0to0x0F32) {
XlaBuilder builder(TestName());
auto original = ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 0));
Slice(original, {0, 0}, {0, 0}, {1, 1});
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 0), {});
}
XLA_TEST_F(SliceTest, Slice0x20to0x5F32) {
XlaBuilder builder(TestName());
auto original = ConstantR2FromArray2D<float>(&builder, Array2D<float>(0, 20));
Slice(original, {0, 15}, {0, 20}, {1, 1});
ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 5), {});
}
XLA_TEST_F(SliceTest, Slice3x0to2x0F32) {
XlaBuilder builder(TestName());
auto original = ConstantR2FromArray2D<float>(&builder, Array2D<float>(3, 0));
Slice(original, {1, 0}, {3, 0}, {1, 1});
ComputeAndCompareR2<float>(&builder, Array2D<float>(2, 0), {});
}
XLA_TEST_F(SliceTest, SliceQuadrantOf256x256) {
Array2D<float> values(256, 256);
for (int row = 0; row < 256; ++row) {
for (int col = 0; col < 256; ++col) {
values(row, col) = (row << 10) | col;
}
}
XlaBuilder builder(TestName());
auto original = ConstantR2FromArray2D<float>(&builder, values);
Slice(original, {128, 128}, {256, 256}, {1, 1});
Array2D<float> expected(128, 128);
for (int row = 0; row < 128; ++row) {
for (int col = 0; col < 128; ++col) {
expected(row, col) = ((row + 128) << 10) | (col + 128);
}
}
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.000001));
}
TEST_F(SliceTest, Slice_1x4096_To_1x1024) {
Array2D<float> values(1, 4096);
std::iota(values.data(), values.data() + 4096, 0.0);
XlaBuilder builder(TestName());
auto original = ConstantR2FromArray2D<float>(&builder, values);
Slice(original, {0, 3072}, {1, 4096}, {1, 1});
Array2D<float> expected(1, 1024);
std::iota(expected.data(), expected.data() + 1024, 3072.0);
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.000001));
}
TEST_F(SliceTest, Slice_16x4_To_16x2) {
Array2D<float> values(16, 4);
Array2D<float> expected(16, 2);
for (int row = 0; row < 16; ++row) {
for (int col = 0; col < 4; ++col) {
values(row, col) = (row << 10) | col;
if (col < 2) {
expected(row, col) = (row << 10) | col;
}
}
}
XlaBuilder builder(TestName());
auto original = ConstantR2FromArray2D<float>(&builder, values);
Slice(original, {0, 0}, {16, 2}, {1, 1});
ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.000001));
}
TEST_F(SliceTest, SliceR4ThreeDimsMiddleMinor) {
Array4D<float> values(2, 2, 24, 256);
values.FillRandom(3.14f);
auto expected = ReferenceUtil::Slice4D(
values, {{1, 0, 8, 0}}, {{2, 2, 16, 128}}, {{1, 1, 1, 1}});
XlaBuilder builder(TestName());
auto original = ConstantR4FromArray4D(&builder, values);
Slice(original, {1, 0, 8, 0}, {2, 2, 16, 128}, {1, 1, 1, 1});
ComputeAndCompareR4(&builder, *expected, {}, ErrorSpec(0.000001));
}
TEST_F(SliceTest, SliceOfReshape) {
Array2D<int> values(2 * 3 * 24, 7);
values.FillIota(1);
XlaBuilder builder(TestName());
auto original = ConstantR2FromArray2D(&builder, values);
auto reshape = Reshape(original, {24, 3, 2, 7});
Slice(reshape, {0, 0, 0, 0}, {11, 3, 2, 7}, {1, 1, 1, 1});
ComputeAndCompare(&builder, {});
}
TEST_F(SliceTest, SliceOfCollapsingReshape) {
Array4D<int> values(2, 3, 5, 7);
values.FillIota(1);
XlaBuilder builder(TestName());
auto original = ConstantR4FromArray4D(&builder, values);
auto reshape = Reshape(original, {2 * 3 * 5, 7});
Slice(reshape, {0, 0}, {4, 7}, {1, 1});
ComputeAndCompare(&builder, {});
}
XLA_TEST_F(SliceTest, StridedSliceR4WithOutputLayout) {
Array4D<float> values(2, 4, 6, 8);
values.FillRandom(3.14f);
auto expected = ReferenceUtil::Slice4D(values, {{0, 0, 0, 0}}, {{2, 4, 6, 8}},
{{1, 1, 2, 1}});
auto expected_literal = LiteralUtil::CreateR4FromArray4DWithLayout(
*expected, LayoutUtil::MakeLayout({0, 1, 2, 3}));
XlaBuilder builder(TestName());
auto original = ConstantR4FromArray4D(&builder, values);
Slice(original, {0, 0, 0, 0}, {2, 4, 6, 8}, {1, 1, 2, 1});
ComputeAndCompareLiteral(&builder, expected_literal, {}, ErrorSpec(0.000001),
&expected_literal.shape());
}
struct R1Spec {
int64_t input_dim0;
int64_t slice_start;
int64_t slice_limit;
int64_t slice_stride;
};
class SliceR1Test : public ClientLibraryTestBase,
public ::testing::WithParamInterface<R1Spec> {
protected:
template <typename NativeT>
void Run(const R1Spec& spec) {
absl::InlinedVector<NativeT, 1> input(spec.input_dim0);
for (size_t i = 0; i < input.size(); ++i) {
input[i] = static_cast<NativeT>(i);
}
auto literal = LiteralUtil::CreateR1<NativeT>(input);
XlaBuilder builder(TestName());
auto original = Parameter(&builder, 0, literal.shape(), "p0");
Slice(original, {spec.slice_start}, {spec.slice_limit},
{spec.slice_stride});
absl::InlinedVector<NativeT, 1> expected;
for (int i = spec.slice_start; i < spec.slice_limit;
i += spec.slice_stride) {
expected.push_back(i);
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> arg,
client_->TransferToServer(literal));
ComputeAndCompareR1<NativeT>(&builder, expected, {arg.get()});
}
};
class SliceR1LargeTest : public SliceR1Test {};
std::string SliceR1TestDataToString(
const ::testing::TestParamInfo<R1Spec>& data) {
const R1Spec& spec = data.param;
return absl::StrFormat("%d_%d_%d_%d", spec.input_dim0, spec.slice_start,
spec.slice_limit, spec.slice_stride);
}
XLA_TEST_P(SliceR1Test, DoIt_F32) { Run<float>(GetParam()); }
XLA_TEST_P(SliceR1Test, DoIt_F64) { Run<double>(GetParam()); }
XLA_TEST_P(SliceR1Test, DoIt_U32) { Run<uint32_t>(GetParam()); }
XLA_TEST_P(SliceR1Test, DoIt_S32) { Run<int32_t>(GetParam()); }
XLA_TEST_P(SliceR1Test, DoIt_U64) { Run<uint64_t>(GetParam()); }
XLA_TEST_P(SliceR1Test, DoIt_S64) { Run<int64_t>(GetParam()); }
XLA_TEST_P(SliceR1LargeTest, DISABLED_ON_GPU(DoIt_F32)) {
Run<float>(GetParam());
}
XLA_TEST_P(SliceR1LargeTest, DISABLED_ON_GPU(DoIt_F64)) {
Run<double>(GetParam());
}
XLA_TEST_P(SliceR1LargeTest, DISABLED_ON_GPU(DoIt_U32)) {
Run<uint32_t>(GetParam());
}
XLA_TEST_P(SliceR1LargeTest, DISABLED_ON_GPU(DoIt_S32)) {
Run<int32_t>(GetParam());
}
XLA_TEST_P(SliceR1LargeTest, DISABLED_ON_GPU(DoIt_U64)) {
Run<uint64_t>(GetParam());
}
XLA_TEST_P(SliceR1LargeTest, DISABLED_ON_GPU(DoIt_S64)) {
Run<int64_t>(GetParam());
}
XLA_TEST_P(SliceR1Test, DoIt_PRED) { Run<bool>(GetParam()); }
INSTANTIATE_TEST_CASE_P(
SliceR1TestInstantiation,
SliceR1Test,
::testing::Values(
R1Spec{10, 0, 0, 1},
R1Spec{10, 7, 7, 1},
R1Spec{10, 0, 5, 1},
R1Spec{10, 3, 5, 1},
R1Spec{10, 0, 10, 1},
R1Spec{1024, 0, 5, 1},
R1Spec{1024, 3, 5, 1},
R1Spec{1024 + 17, 0, 5, 1},
R1Spec{1024 + 17, 3, 5, 1},
R1Spec{1024 + 17, 1024, 1024 + 6, 1},
R1Spec{1024 + 17, 1024 + 1, 1024 + 6, 1},
R1Spec{1024, 1024 - 4, 1024, 1},
R1Spec{4 * 1024, 7, 7 + 1024, 1},
R1Spec{4 * 1024, 0, 4 * 1024, 1},
R1Spec{4 * 1024, 1, 4 * 1024 - 1, 1},
R1Spec{4 * 1024, 1024, 3 * 1024, 1},
R1Spec{4 * 1024, 1024 + 1, 3 * 1024 - 1, 1},
R1Spec{16 * 1024, 0, 5, 1},
R1Spec{16 * 1024, 3, 5, 1},
R1Spec{16 * 1024 + 17, 0, 5, 1},
R1Spec{16 * 1024 + 17, 3, 5, 1},
R1Spec{16 * 1024 + 17, 16 * 1024, 16 * 1024 + 6, 1},
R1Spec{16 * 1024 + 17, 16 * 1024 + 1, 16 * 1024 + 6, 1},
R1Spec{16 * 1024, 4 * 1024 - 17, 8 * 1024 - 18, 1},
R1Spec{64 * 1024, 0, 64 * 1024, 1},
R1Spec{64 * 1024, 1, 64 * 1024 - 1, 1},
R1Spec{64 * 1024, 1024, 63 * 1024, 1},
R1Spec{64 * 1024, 1024 + 1, 63 * 1024 - 1, 1},
R1Spec{64 * 1024, 32 * 1024, 33 * 1024, 1},
R1Spec{64 * 1024, 32 * 1024 + 1, 33 * 1024 - 1, 1},
R1Spec{64 * 1024, 32 * 1024 - 17, 36 * 1024 - 18, 1}
),
SliceR1TestDataToString
);
INSTANTIATE_TEST_CASE_P(
SliceR1TestBigSlicesInstantiation,
SliceR1LargeTest,
::testing::Values(
R1Spec{
16 * 1024 * 1024, 4 * 1024 * 1024, 12 * 1024 * 1024, 1},
R1Spec{
16 * 1024 * 1024, 4 * 1024 * 1024 + 1, 12 * 1024 * 1024 - 1, 1},
R1Spec{
16 * 1024 * 1024, 4 * 1024 * 1024 - 1, 12 * 1024 * 1024 + 1, 1}
),
SliceR1TestDataToString
);
INSTANTIATE_TEST_CASE_P(
SliceStridedR1TestInstantiation,
SliceR1Test,
::testing::Values(
R1Spec{10, 2, 4, 2},
R1Spec{10, 0, 10, 2},
R1Spec{10, 0, 10, 3},
R1Spec{10, 0, 10, 4},
R1Spec{10, 0, 10, 5},
R1Spec{10, 0, 10, 10},
R1Spec{500, 200, 400, 7},
R1Spec{4096, 1, 4095, 3},
R1Spec{2047, 1024 - 24, 1024 + 160, 31},
R1Spec{2047, 1, 2046, 3 * 128},
R1Spec{4096, 1024 + 3, 4095, 500},
R1Spec{8192, 0, 8192, 1024 * 3 + 400},
R1Spec{1024 * 1024, 0, 1024 * 1024, 2},
R1Spec{1024 * 1024, 0, 1024 * 1024, 8},
R1Spec{1024 * 1024, 0, 1024 * 1024, 7},
R1Spec{1024 * 1024, 0, 1024 * 1024, 125},
R1Spec{1024 * 1024, 3, 1024 - 9, 2},
R1Spec{1024 * 1024, 3, 1024 - 9, 8},
R1Spec{1024 * 1024, 3, 1024 - 9, 7},
R1Spec{1024 * 1024, 3, 1024 - 9, 125},
R1Spec{1024 * 1024, 3, 1024 * 512 - 9, 2},
R1Spec{1024 * 1024, 3, 1024 * 512 - 9, 8},
R1Spec{1024 * 1024, 3, 1024 * 512 - 9, 7},
R1Spec{1024 * 1024, 3, 1024 * 512 - 9, 125},
R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 2},
R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 8},
R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 7},
R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 125},
R1Spec{16 * 1024 * 1024, 0, 16 * 1024 * 1024, 4097},
R1Spec{16 * 1024 * 1024, 0, 16 * 1024 * 1024, 4093},
R1Spec{16 * 1024 * 1024, 12 * 1024 + 17, 16 * 1024 * 1024 - 231, 4097},
R1Spec{16 * 1024 * 1024, 12 * 1024 + 17, 16 * 1024 * 1024 - 231, 4093}
),
SliceR1TestDataToString
);
struct R2Spec {
int64_t input_dim0;
int64_t input_dim1;
std::array<int64_t, 2> slice_starts;
std::array<int64_t, 2> slice_limits;
std::array<int64_t, 2> slice_strides;
std::array<int64_t, 2> layout;
};
class SliceR2Test : public ClientLibraryTestBase,
public ::testing::WithParamInterface<R2Spec> {};
XLA_TEST_P(SliceR2Test, DoIt) {
const R2Spec& spec = GetParam();
Array2D<int32_t> input(spec.input_dim0, spec.input_dim1);
input.FillUnique();
auto literal = LiteralUtil::CreateR2FromArray2DWithLayout(
input, LayoutUtil::MakeLayout(spec.layout));
XlaBuilder builder(TestName());
auto a = Parameter(&builder, 0, literal.shape(), "p0");
Slice(a, spec.slice_starts, spec.slice_limits, spec.slice_strides);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> arg,
client_->TransferToServer(literal));
std::unique_ptr<Array2D<int32_t>> expected = ReferenceUtil::Slice2D(
input, spec.slice_starts, spec.slice_limits, spec.slice_strides);
ComputeAndCompareR2<int32_t>(&builder, *expected, {arg.get()});
}
INSTANTIATE_TEST_CASE_P(
SliceR2TestInstantiation, SliceR2Test,
::testing::Values(
R2Spec{4, 12, {{0, 3}}, {{4, 6}}, {{1, 1}}, {{0, 1}}},
R2Spec{4, 12, {{0, 3}}, {{4, 6}}, {{1, 1}}, {{1, 0}}},
R2Spec{16, 4, {{0, 2}}, {{16, 4}}, {{1, 1}}, {{0, 1}}},
R2Spec{16, 4, {{0, 2}}, {{16, 4}}, {{1, 1}}, {{1, 0}}},
R2Spec{256, 400, {{0, 300}}, {{256, 400}}, {{1, 1}}, {{1, 0}}},
R2Spec{500, 400, {{111, 123}}, {{300, 257}}, {{1, 1}}, {{1, 0}}},
R2Spec{500, 400, {{111, 123}}, {{300, 400}}, {{1, 1}}, {{1, 0}}},
R2Spec{384, 512, {{128, 256}}, {{256, 384}}, {{1, 1}}, {{1, 0}}},
R2Spec{357, 512, {{111, 256}}, {{301, 384}}, {{1, 1}}, {{1, 0}}},
R2Spec{10, 10, {{0, 0}}, {{10, 10}}, {{1, 2}}, {{0, 1}}},
R2Spec{10, 10, {{0, 0}}, {{10, 10}}, {{1, 2}}, {{1, 0}}},
R2Spec{10, 10, {{0, 0}}, {{10, 10}}, {{2, 1}}, {{0, 1}}},
R2Spec{10, 10, {{0, 0}}, {{10, 10}}, {{2, 1}}, {{1, 0}}},
R2Spec{10, 10, {{0, 0}}, {{10, 10}}, {{2, 2}}, {{0, 1}}},
R2Spec{10, 10, {{0, 0}}, {{10, 10}}, {{2, 2}}, {{1, 0}}},
R2Spec{256, 400, {{100, 129}}, {{256, 400}}, {{3, 5}}, {{1, 0}}},
R2Spec{256, 400, {{100, 129}}, {{256, 400}}, {{3, 5}}, {{0, 1}}},
R2Spec{256, 400, {{100, 129}}, {{256, 400}}, {{5, 3}}, {{1, 0}}},
R2Spec{256, 400, {{100, 129}}, {{256, 400}}, {{5, 3}}, {{0, 1}}},
R2Spec{511, 513, {{129, 300}}, {{400, 500}}, {{7, 11}}, {{1, 0}}},
R2Spec{511, 513, {{129, 300}}, {{400, 500}}, {{7, 11}}, {{0, 1}}},
R2Spec{511, 513, {{129, 300}}, {{400, 500}}, {{11, 7}}, {{1, 0}}},
R2Spec{511, 513, {{129, 300}}, {{400, 500}}, {{11, 7}}, {{0, 1}}},
R2Spec{8672, 512, {{8, 0}}, {{8672, 512}}, {{542, 1}}, {{1, 0}}},
R2Spec{
511, 513, {{129, 300}}, {{400, 500}}, {{101, 129}}, {{1, 0}}},
R2Spec{
511, 513, {{129, 300}}, {{400, 500}}, {{101, 129}}, {{0, 1}}},
R2Spec{
511, 513, {{129, 300}}, {{400, 500}}, {{129, 101}}, {{1, 0}}},
R2Spec{
511, 513, {{129, 300}}, {{400, 500}}, {{129, 101}}, {{0, 1}}},
R2Spec{
511, 1023, {{129, 257}}, {{500, 1000}}, {{129, 255}}, {{1, 0}}},
R2Spec{
511, 1023, {{129, 257}}, {{500, 1000}}, {{129, 255}}, {{0, 1}}},
R2Spec{511,
513,
{{129, 255}},
{{511 - 129, 513 - 140}},
{{13, 19}},
{{1, 0}}},
R2Spec{511,
513,
{{129, 255}},
{{511 - 129, 513 - 140}},
{{13, 19}},
{{0, 1}}}
));
struct R4Spec {
std::array<int64_t, 4> input_dims;
std::array<int64_t, 4> input_layout;
std::array<int64_t, 4> slice_starts;
std::array<int64_t, 4> slice_limits;
std::array<int64_t, 4> slice_strides;
};
std::string R4SpecToString(const ::testing::TestParamInfo<R4Spec>& data) {
const R4Spec& spec = data.param;
return absl::StrCat("input_", absl::StrJoin(spec.input_dims, "x"),
"__layout_", absl::StrJoin(spec.input_layout, ""),
"__starts_", absl::StrJoin(spec.slice_starts, "x"),
"__limits_", absl::StrJoin(spec.slice_limits, "x"),
"__strides_", absl::StrJoin(spec.slice_strides, "x"));
}
class SliceR4Test : public ClientLibraryTestBase,
public ::testing::WithParamInterface<R4Spec> {
protected:
void Run(const R4Spec& spec) {
Array4D<float> values(spec.input_dims[0], spec.input_dims[1],
spec.input_dims[2], spec.input_dims[3]);
values.FillIota(3.14159);
auto expected = ReferenceUtil::Slice4D(
values, spec.slice_starts, spec.slice_limits, spec.slice_strides);
XlaBuilder builder(TestName());
auto literal = LiteralUtil::CreateR4FromArray4DWithLayout(
values, LayoutUtil::MakeLayout(spec.input_layout));
auto parameter = Parameter(&builder, 0, literal.shape(), "p0");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> arg,
client_->TransferToServer(literal));
Slice(parameter, spec.slice_starts, spec.slice_limits, spec.slice_strides);
ComputeAndCompareR4(&builder, *expected, {arg.get()}, ErrorSpec(0.000001));
}
};
XLA_TEST_P(SliceR4Test, DoIt) { Run(GetParam()); }
const R4Spec kR4SpecValues[] = {
R4Spec{{{2, 2, 2, 2}},
{{3, 2, 1, 0}},
{{0, 0, 0, 0}},
{{0, 0, 0, 0}},
{{1, 1, 1, 1}}},
R4Spec{{{3, 3, 4, 4}},
{{3, 2, 1, 0}},
{{0, 0, 0, 0}},
{{3, 3, 4, 4}},
{{1, 1, 2, 1}}},
R4Spec{{{2, 3, 16, 4}},
{{3, 2, 1, 0}},
{{0, 0, 0, 0}},
{{2, 3, 16, 4}},
{{1, 1, 3, 1}}},
R4Spec{{{4, 16, 3, 2}},
{{0, 1, 2, 3}},
{{1, 4, 1, 0}},
{{3, 12, 3, 2}},
{{1, 1, 3, 2}}},
R4Spec{{{2, 2, 257, 129}},
{{3, 2, 1, 0}},
{{1, 1, 62, 64}},
{{2, 2, 195, 129}},
{{1, 1, 3, 1}}},
R4Spec{{{3, 5, 257, 129}},
{{3, 2, 1, 0}},
{{1, 2, 61, 64}},
{{3, 5, 199, 129}},
{{1, 1, 3, 1}}},
R4Spec{{{5, 8, 257, 129}},
{{3, 2, 1, 0}},
{{2, 3, 60, 64}},
{{3, 5, 200, 68}},
{{1, 1, 1, 1}}},
R4Spec{{{8, 10, 256, 130}},
{{3, 2, 1, 0}},
{{1, 2, 60, 127}},
{{7, 9, 166, 129}},
{{4, 2, 3, 1}}},
R4Spec{{{2, 4, 8, 4}},
{{3, 2, 1, 0}},
{{1, 2, 0, 1}},
{{2, 4, 8, 3}},
{{1, 1, 7, 1}}},
R4Spec{{{10, 21, 256, 150}},
{{3, 2, 1, 0}},
{{1, 2, 9, 127}},
{{9, 16, 82, 133}},
{{3, 5, 7, 2}}},
R4Spec{{{15, 25, 256, 150}},
{{3, 2, 1, 0}},
{{4, 6, 19, 126}},
{{15, 25, 89, 135}},
{{5, 7, 7, 3}}},
R4Spec{{{2, 4, 256, 150}},
{{3, 2, 1, 0}},
{{1, 2, 29, 125}},
{{2, 4, 159, 145}},
{{1, 1, 7, 7}}},
R4Spec{{{2, 4, 256, 150}},
{{3, 2, 1, 0}},
{{1, 2, 39, 119}},
{{2, 4, 158, 145}},
{{1, 1, 7, 11}}},
R4Spec{{{1, 1, 5, 512}},
{{3, 2, 1, 0}},
{{0, 0, 0, 0}},
{{1, 1, 5, 512}},
{{1, 1, 4, 1}}},
R4Spec{{{1, 1, 513, 513}},
{{3, 2, 1, 0}},
{{0, 0, 0, 0}},
{{1, 1, 513, 513}},
{{1, 1, 512, 512}}},
R4Spec{{{1, 1, 1024, 4}},
{{3, 2, 1, 0}},
{{0, 0, 15, 0}},
{{1, 1, 1022, 4}},
{{1, 1, 23, 1}}},
R4Spec{{{1, 1, 1024, 4}},
{{3, 2, 1, 0}},
{{0, 0, 14, 0}},
{{1, 1, 1023, 4}},
{{1, 1, 101, 1}}},
R4Spec{{{1, 1, 4, 1024}},
{{3, 2, 1, 0}},
{{0, 0, 1, 20}},
{{1, 1, 4, 1023}},
{{1, 1, 1, 129}}},
R4Spec{{{5, 5, 512, 1024}},
{{3, 2, 1, 0}},
{{1, 1, 0, 0}},
{{4, 4, 512, 1024}},
{{2, 2, 2, 1}}},
R4Spec{{{5, 5, 512, 1024}},
{{3, 2, 1, 0}},
{{1, 1, 0, 0}},
{{4, 4, 512, 1024}},
{{2, 1, 1, 400}}},
R4Spec{{{32, 64, 128, 256}},
{{3, 2, 1, 0}},
{{10, 20, 30, 40}},
{{30, 60, 100, 200}},
{{11, 21, 31, 41}}},
R4Spec{{{1, 1, 14, 2048}},
{{3, 2, 1, 0}},
{{0, 0, 2, 0}},
{{1, 1, 14, 2}},
{{1, 1, 1, 1}}},
};
INSTANTIATE_TEST_CASE_P(SliceR4TestInstantiation, SliceR4Test,
::testing::ValuesIn(kR4SpecValues), R4SpecToString);
}
} |
926 | cpp | tensorflow/tensorflow | list_ops_util | tensorflow/lite/kernels/variants/list_ops_util.cc | tensorflow/lite/kernels/variants/list_ops_util_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_VARIANTS_LIST_OPS_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_VARIANTS_LIST_OPS_UTIL_H_
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
IntArrayUniquePtr TensorAsShape(const TfLiteTensor& shape);
IntArrayUniquePtr MergeShapesOrNull(IntArrayUniquePtr l, IntArrayUniquePtr r);
bool IsShapeFullyDefined(const TfLiteIntArray& shape);
TfLiteStatus GetShapeIfAllEqual(const TensorArray& arr,
IntArrayUniquePtr& result);
}
}
#endif
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
IntArrayUniquePtr TensorAsShape(const TfLiteTensor& shape) {
if (shape.dims->size == 0) {
return BuildTfLiteArray({});
}
const int rank = shape.dims->data[0];
const int* begin = reinterpret_cast<const int*>(shape.data.data);
const int* end = begin + rank;
return BuildTfLiteArray(std::vector<int>(begin, end));
}
IntArrayUniquePtr MergeShapesOrNull(IntArrayUniquePtr l, IntArrayUniquePtr r) {
if (l == nullptr) {
return r;
}
if (r == nullptr) {
return l;
}
if (l->size == 0) {
return r;
}
if (r->size == 0) {
return l;
}
if (l->size != r->size) {
return nullptr;
}
for (int i = 0; i < r->size; ++i) {
if (l->data[i] == -1) {
l->data[i] = r->data[i];
} else if (r->data[i] != -1 && l->data[i] != r->data[i]) {
return nullptr;
}
}
return l;
}
bool IsShapeFullyDefined(const TfLiteIntArray& shape) {
for (int i = 0; i < shape.size; ++i) {
if (shape.data[i] < 0) {
return false;
}
}
return true;
}
TfLiteStatus GetShapeIfAllEqual(const TensorArray& arr,
IntArrayUniquePtr& result) {
const TfLiteIntArray* common_shape = nullptr;
for (int i = 0; i < arr.NumElements(); ++i) {
const TfLiteTensor* cur_element = arr.At(i);
if (cur_element == nullptr) {
continue;
}
if (common_shape == nullptr) {
common_shape = cur_element->dims;
continue;
}
if (!TfLiteIntArrayEqual(common_shape, cur_element->dims)) {
return kTfLiteError;
}
}
result = common_shape != nullptr ? BuildTfLiteArray(*common_shape) : nullptr;
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace {
TEST(TensorAsShape, ScalarTensor_ReturnsEmptyIntArray) {
TensorUniquePtr scalar_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({}), kTfLiteDynamic);
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*scalar_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({}));
}
TEST(TensorAsShape, SingleElementTensor_ReturnsSize1Shape) {
TensorUniquePtr single_el_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({1}), kTfLiteDynamic);
single_el_tensor->data.i32[0] = 10;
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*single_el_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({10}));
}
TEST(TensorAsShape, OneDMultipleElementShape_ReturnsHighRankedShape) {
TensorUniquePtr one_d_mul_el_tensor =
BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({3}), kTfLiteDynamic);
one_d_mul_el_tensor->data.i32[0] = 10;
one_d_mul_el_tensor->data.i32[1] = 9;
one_d_mul_el_tensor->data.i32[2] = 8;
IntArrayUniquePtr shape_from_tensor = TensorAsShape(*one_d_mul_el_tensor);
ASSERT_THAT(shape_from_tensor.get(), DimsAre({10, 9, 8}));
}
TEST(MergeShapesOrNull, IncompatibleSameRank_ReturnsNull) {
IntArrayUniquePtr l = BuildTfLiteArray({2, 3});
IntArrayUniquePtr r = BuildTfLiteArray({3, 3});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, NotSameRank_ReturnsNull) {
IntArrayUniquePtr l = BuildTfLiteArray({1});
IntArrayUniquePtr r = BuildTfLiteArray({1, 2});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, MergeShapesOrNullSameRankNENull) {
IntArrayUniquePtr l = BuildTfLiteArray({1});
IntArrayUniquePtr r = BuildTfLiteArray({2});
EXPECT_EQ(MergeShapesOrNull(std::move(l), std::move(r)).get(), nullptr);
}
TEST(MergeShapesOrNull, RankedUnknownLKnownR_ReturnsStatic) {
IntArrayUniquePtr l = BuildTfLiteArray({-1});
IntArrayUniquePtr r = BuildTfLiteArray({2});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2}));
}
TEST(MergeShapesOrNull, UnknownRKnownL_ReturnsStatic) {
IntArrayUniquePtr l = BuildTfLiteArray({2});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2}));
}
TEST(MergeShapesOrNull, UnknownBoth_ReturnsUnknown) {
IntArrayUniquePtr l = BuildTfLiteArray({-1});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({-1}));
}
TEST(MergeShapesOrNull, RankedUnknownDifferentDims_ConstrainsUnknownDims) {
IntArrayUniquePtr l = BuildTfLiteArray({-1, 2, 5});
IntArrayUniquePtr r = BuildTfLiteArray({1, -1, 5});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({1, 2, 5}));
}
TEST(MergeShapesOrNull, BothUnranked_ReturnsUnranked) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(), DimsAre({}));
}
TEST(MergeShapesOrNull, UrankedAndStatic1D_ReturnsStatic1D) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({1}));
}
TEST(MergeShapesOrNull, UnrankedAndStaticND_ReturnsStaticND) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({2, 3});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({2, 3}));
}
TEST(MergeShapesOrNull, UnrankedAndRankedUnknown_ReturnsRankedUnknown) {
IntArrayUniquePtr l = BuildTfLiteArray({});
IntArrayUniquePtr r = BuildTfLiteArray({-1});
EXPECT_THAT(MergeShapesOrNull(std::move(l), std::move(r)).get(),
DimsAre({-1}));
}
TEST(MergeShapesOrNull, NullInput_ReturnsOther) {
EXPECT_THAT(MergeShapesOrNull(BuildTfLiteArray({3}), nullptr).get(),
DimsAre({3}));
EXPECT_THAT(MergeShapesOrNull(nullptr, BuildTfLiteArray({2})).get(),
DimsAre({2}));
EXPECT_EQ(MergeShapesOrNull(nullptr, nullptr).get(), nullptr);
}
TEST(MergeShapesOrNull, NullInput_ReturnsUnrankedOther) {
EXPECT_THAT(MergeShapesOrNull(BuildTfLiteArray({}), nullptr).get(),
DimsAre({}));
EXPECT_THAT(MergeShapesOrNull(nullptr, BuildTfLiteArray({})).get(),
DimsAre({}));
}
TEST(ElementsSameShape, NoElements_SucceedsWithNullptr) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(2);
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_EQ(res.get(), nullptr);
}
TEST(ElementsSameShape, ZeroSize_SucceedsWithNullptr) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_EQ(res.get(), nullptr);
}
TEST(ElementsSameShape, OneSize_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(1);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
ASSERT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_AllSet_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(2);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(1, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_SomeSet_SucceedsWithShape) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteOk);
EXPECT_THAT(res.get(), DimsAre({2}));
}
TEST(ElementsSameShape, MultipleElements_SomeSetNotSameRank_Fails) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 3}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteError);
}
TEST(ElementsSameShape, MultipleElements_SomeSetNotSameDim_Fails) {
TensorArray arr = {kTfLiteInt32, BuildTfLiteArray({})};
arr.Resize(3);
arr.Set(0, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 2}),
kTfLiteDynamic));
arr.Set(2, BuildTfLiteTensor(kTfLiteInt32, BuildTfLiteArray({2, 3}),
kTfLiteDynamic));
IntArrayUniquePtr res;
EXPECT_EQ(GetShapeIfAllEqual(arr, res), kTfLiteError);
}
}
}
} |
927 | cpp | tensorflow/tensorflow | tensor_array | tensorflow/lite/kernels/variants/tensor_array.cc | tensorflow/lite/kernels/variants/tensor_array_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_TENSOR_ARRAY_H_
#define TENSORFLOW_CORE_KERNELS_TENSOR_ARRAY_H_
#include <limits.h>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/aggregate_ops.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace tensor_array {
template <typename Device, typename T>
Status AddToTensor(OpKernelContext* ctx, Tensor* sum, const Tensor* current,
const Tensor* add) {
return errors::InvalidArgument(
"tensor_array::AddToTensor type not supported: ",
DataTypeString(DataTypeToEnum<T>::value));
}
#define TENSOR_ARRAY_WRITE_OR_ADD(Device, T) \
template <> \
Status AddToTensor<Device, T>(OpKernelContext * ctx, Tensor * sum, \
const Tensor* current, const Tensor* add);
#define TENSOR_ARRAY_WRITE_OR_ADD_CPU(T) TENSOR_ARRAY_WRITE_OR_ADD(CPUDevice, T)
TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_CPU)
#undef TENSOR_ARRAY_WRITE_OR_ADD_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define TENSOR_ARRAY_WRITE_OR_ADD_GPU(T) TENSOR_ARRAY_WRITE_OR_ADD(GPUDevice, T)
TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
TF_CALL_COMPLEX_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
#undef TENSOR_ARRAY_WRITE_OR_ADD_GPU
#endif
#undef TENSOR_ARRAY_WRITE_OR_ADD
template <typename Device, typename T>
Status TensorSetZero(OpKernelContext* ctx, Tensor* value) {
return errors::InvalidArgument(
"tensor_array::TensorSetZero type not supported: ",
DataTypeString(DataTypeToEnum<T>::value));
}
#define TENSOR_ARRAY_SET_ZERO(Device, T) \
template <> \
Status TensorSetZero<Device, T>(OpKernelContext * ctx, Tensor * value);
#define TENSOR_ARRAY_SET_ZERO_CPU(T) TENSOR_ARRAY_SET_ZERO(CPUDevice, T)
TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_CPU);
TF_CALL_bool(TENSOR_ARRAY_SET_ZERO_CPU);
#undef TENSOR_ARRAY_SET_ZERO_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define TENSOR_ARRAY_SET_ZERO_GPU(T) TENSOR_ARRAY_SET_ZERO(GPUDevice, T)
TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_GPU);
TF_CALL_COMPLEX_TYPES(TENSOR_ARRAY_SET_ZERO_GPU);
#undef TENSOR_ARRAY_SET_ZERO_GPU
#endif
#undef TENSOR_ARRAY_SET_ZERO
}
class TensorArray : public ResourceBase {
public:
static std::atomic<int64_t> tensor_array_counter;
TensorArray(const string& key, const DataType& dtype, const Tensor& handle,
int32_t N, const PartialTensorShape& element_shape,
bool identical_element_shapes, bool dynamic_size,
bool multiple_writes_aggregate, bool is_grad, int32_t marked_size,
bool clear_after_read)
: key_(key),
dtype_(dtype),
handle_(handle),
closed_(false),
dynamic_size_(dynamic_size),
multiple_writes_aggregate_(multiple_writes_aggregate),
gradients_disallowed_(false),
clear_after_read_(clear_after_read),
is_grad_(is_grad),
marked_size_(marked_size),
element_shape_(element_shape),
identical_element_shapes_(identical_element_shapes),
tensors_(N) {}
template <typename Device, typename T>
Status WriteOrAggregate(OpKernelContext* ctx, const int32_t index,
const Tensor* value) {
mutex_lock l(mu_);
return LockedWriteOrAggregate<Device, T>(ctx, index, value);
}
template <typename Device, typename T>
Status WriteOrAggregateMany(OpKernelContext* ctx,
const std::vector<int32>& indices,
std::vector<Tensor>* values) {
mutex_lock l(mu_);
int32_t i = 0;
for (const int32_t ix : indices) {
Status s = LockedWriteOrAggregate<Device, T>(ctx, ix, &(*values)[i]);
++i;
TF_RETURN_IF_ERROR(s);
}
return absl::OkStatus();
}
template <typename Device, typename T>
Status Read(OpKernelContext* ctx, const int32_t index, Tensor* value) {
mutex_lock l(mu_);
return LockedRead<Device, T>(ctx, index, value);
}
template <typename Device, typename T>
Status ReadMany(OpKernelContext* ctx, const std::vector<int32>& indices,
std::vector<Tensor>* values) {
mutex_lock l(mu_);
values->clear();
values->resize(indices.size());
int32_t i = 0;
for (const int32_t ix : indices) {
Status s = LockedRead<Device, T>(ctx, ix, &(*values)[i]);
++i;
if (!s.ok()) return s;
}
return absl::OkStatus();
}
DataType ElemType() const { return dtype_; }
PartialTensorShape ElemShape() {
mutex_lock l(mu_);
return element_shape_;
}
Status SetElemShape(const PartialTensorShape& candidate) {
mutex_lock l(mu_);
PartialTensorShape new_element_shape_;
Status s = element_shape_.MergeWith(candidate, &new_element_shape_);
if (!s.ok()) {
return s;
}
element_shape_ = new_element_shape_;
return absl::OkStatus();
}
string DebugString() const override {
mutex_lock l(mu_);
CHECK(!closed_);
return strings::StrCat("TensorArray[", tensors_.size(), "]");
}
bool IsClosed() {
mutex_lock l(mu_);
return closed_;
}
Status Size(int32* size) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
*size = tensors_.size();
return absl::OkStatus();
}
Status SetMarkedSize(int32_t size) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
if (!is_grad_) {
marked_size_ = size;
}
return absl::OkStatus();
}
Status MarkedSize(int32* size) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
*size = marked_size_;
return absl::OkStatus();
}
Status PackOrConcatSize(int32* size) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
*size = is_grad_ ? marked_size_ : tensors_.size();
return absl::OkStatus();
}
void DisableDynamicSize() {
mutex_lock l(mu_);
dynamic_size_ = false;
}
bool HasDynamicSize() {
mutex_lock l(mu_);
return dynamic_size_;
}
bool GradientsAllowed() {
mutex_lock l(mu_);
return !gradients_disallowed_;
}
bool HasIdenticalElementShapes() const { return identical_element_shapes_; }
Status CopyShapesFrom(TensorArray* rhs, const TensorShape* shape_to_prepend);
void ClearAndMarkClosed() {
mutex_lock l(mu_);
tensors_.clear();
closed_ = true;
}
mutex* mu() { return &mu_; }
Tensor* handle() { return &handle_; }
ResourceHandle resource_handle(OpKernelContext* ctx) {
return ctx->step_container()->MakeResourceHandle<TensorArray>(
key_, *ctx->device());
}
private:
Status LockedWrite(OpKernelContext* ctx, const int32_t index, Tensor* value)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
template <typename Device, typename T>
Status LockedWriteOrAggregate(OpKernelContext* ctx, const int32_t index,
const Tensor* value)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
template <typename Device, typename T>
Status LockedRead(OpKernelContext* ctx, const int32_t index, Tensor* value)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status LockedReturnIfClosed() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (closed_) {
return errors::InvalidArgument("TensorArray ", handle_.vec<tstring>()(1),
" has already been closed.");
}
return absl::OkStatus();
}
const string key_;
const DataType dtype_;
Tensor handle_;
mutable mutex mu_;
bool closed_ TF_GUARDED_BY(mu_);
bool dynamic_size_;
const bool multiple_writes_aggregate_;
bool gradients_disallowed_ TF_GUARDED_BY(mu_);
const bool clear_after_read_;
const bool is_grad_;
int32 marked_size_;
PartialTensorShape element_shape_ TF_GUARDED_BY(mu_);
const bool identical_element_shapes_;
struct TensorAndState {
TensorAndState()
: written(false), read(false), cleared(false), local_copy(false) {}
Tensor tensor;
TensorShape shape;
bool written;
bool read;
bool cleared;
bool local_copy;
};
std::vector<TensorAndState> tensors_ TF_GUARDED_BY(mu_);
};
template <typename Device, typename T>
Status TensorArray::LockedWriteOrAggregate(OpKernelContext* ctx,
const int32_t index,
const Tensor* value) {
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
size_t index_size = static_cast<size_t>(index);
if (index < 0 || (!dynamic_size_ && index_size >= tensors_.size())) {
return errors::InvalidArgument(
"TensorArray ", handle_.vec<tstring>()(1), ": Tried to write to index ",
index, " but array is not resizeable and size is: ", tensors_.size());
}
if (dynamic_size_) {
if (index_size >= tensors_.capacity()) {
tensors_.reserve(2 * (index_size + 1));
}
if (index_size >= tensors_.size()) {
tensors_.resize(index_size + 1);
}
}
TensorAndState& t = tensors_[index];
if (value->dtype() != dtype_) {
return errors::InvalidArgument(
"TensorArray ", handle_.vec<tstring>()(1),
": Could not write to TensorArray index ", index,
" because the value dtype is ", DataTypeString(value->dtype()),
" but TensorArray dtype is ", DataTypeString(dtype_), ".");
}
if (!element_shape_.IsCompatibleWith(value->shape())) {
return errors::InvalidArgument(
"TensorArray ", handle_.vec<tstring>()(1),
": Could not write to TensorArray index ", index,
" because the value shape is ", value->shape().DebugString(),
" which is incompatible with the TensorArray's inferred element "
"shape: ",
element_shape_.DebugString(), " (consider setting infer_shape=False).");
} else if (identical_element_shapes_ && !element_shape_.IsFullyDefined()) {
element_shape_ = PartialTensorShape(value->shape().dim_sizes());
}
if (t.read) {
return errors::InvalidArgument("TensorArray ", handle_.vec<tstring>()(1),
": Could not write to TensorArray index ",
index, " because it has already been read.");
}
if (!multiple_writes_aggregate_ && t.written) {
return errors::InvalidArgument("TensorArray ", handle_.vec<tstring>()(1),
": Could not write to TensorArray index ",
index,
" because it has already been written to.");
}
if (t.written) {
DCHECK(multiple_writes_aggregate_);
if (value->shape() != t.shape) {
return errors::InvalidArgument(
"TensorArray ", handle_.vec<tstring>()(1),
": Could not aggregate to TensorArray index ", index,
" because the existing shape is ", t.shape.DebugString(),
" but the new input shape is ", value->shape().DebugString(), ".");
}
if (!t.tensor.IsInitialized() || t.tensor.NumElements() == 0) {
t.tensor = *value;
return absl::OkStatus();
}
Tensor* existing_t = &t.tensor;
if (t.local_copy) {
Status s = tensor_array::AddToTensor<Device, T>(ctx, existing_t,
existing_t, value);
TF_RETURN_IF_ERROR(s);
} else {
Tensor local_tensor;
TF_RETURN_IF_ERROR(
ctx->allocate_temp(dtype_, existing_t->shape(), &local_tensor));
Status s = tensor_array::AddToTensor<Device, T>(ctx, &local_tensor,
existing_t, value);
TF_RETURN_IF_ERROR(s);
t.tensor = local_tensor;
t.local_copy = true;
}
gradients_disallowed_ = true;
} else {
t.tensor = *value;
t.shape = value->shape();
t.written = true;
}
return absl::OkStatus();
}
template <typename Device, typename T>
Status TensorArray::LockedRead(OpKernelContext* ctx, const int32_t index,
Tensor* value) {
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
if ((index < 0) ||
(!is_grad_ && (static_cast<size_t>(index) >= tensors_.size()))) {
return errors::InvalidArgument("Tried to read from index ", index,
" but array size is: ", tensors_.size());
}
size_t index_t = static_cast<size_t>(index);
if ((is_grad_ && (index_t >= tensors_.size() || !tensors_[index].written)) ||
(!is_grad_ && (index_t < tensors_.size() && !tensors_[index].written))) {
TensorShape element_shape;
if (is_grad_ && index_t < tensors_.size() &&
tensors_[index].shape.dims() > 0) {
element_shape = tensors_[index].shape;
} else if (!element_shape_.IsFullyDefined()) {
return errors::InvalidArgument(
"TensorArray ", handle_.vec<tstring>()(1),
": Could not read from TensorArray index ", index,
". Furthermore, the element shape is not fully defined: ",
element_shape_.DebugString(),
". It is possible you are working with a resizeable TensorArray and "
"stop_gradients is not allowing the gradients to be written. If you "
"set the full "
"element_shape property on the forward TensorArray, the proper "
"all-zeros tensor "
"will be returned instead of incurring this error.");
} else {
element_shape_.AsTensorShape(&element_shape);
}
if (index_t >= tensors_.size()) {
size_t old_tensors_size = tensors_.size();
tensors_.resize(index + 1);
for (size_t i = old_tensors_size; i < index + 1; ++i) {
tensors_[i].shape = element_shape;
tensors_[i].written = true;
}
} else {
tensors_[index].shape = element_shape;
tensors_[index].written = true;
}
}
TensorAndState& t = tensors_[index];
if (t.cleared) {
return errors::InvalidArgument("TensorArray ", handle_.vec<tstring>()(1),
": Could not read index ", index,
" twice because it was cleared after a "
"previous read (perhaps try setting "
"clear_after_read = false?).");
}
if (!t.tensor.IsInitialized() || t.tensor.NumElements() == 0) {
TF_RETURN_IF_ERROR(ctx->allocate_temp(dtype_, t.shape, &t.tensor));
if (t.shape.num_elements() > 0) {
Status s = tensor_array::TensorSetZero<Device, T>(ctx, &t.tensor);
if (!s.ok()) return s;
}
}
*value = t.tensor;
if (clear_after_read_) {
t.tensor = Tensor();
t.cleared = true;
}
t.read = true;
return absl::OkStatus();
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/tensor_array.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/kernels/aggregate_ops_cpu.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace tensor_array {
#define TENSOR_ARRAY_WRITE_OR_ADD(Device, T) \
template <> \
Status AddToTensor<Device, T>(OpKernelContext * ctx, Tensor * sum, \
const Tensor* current, const Tensor* add) { \
functor::Add2Functor<Device, T> add_functor; \
add_functor(ctx->template eigen_device<Device>(), sum->flat<T>(), \
current->flat<T>(), add->flat<T>()); \
return OkStatus(); \
}
#define TENSOR_ARRAY_WRITE_OR_ADD_CPU(T) TENSOR_ARRAY_WRITE_OR_ADD(CPUDevice, T)
TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_CPU)
#undef TENSOR_ARRAY_WRITE_OR_ADD_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define TENSOR_ARRAY_WRITE_OR_ADD_GPU(T) TENSOR_ARRAY_WRITE_OR_ADD(GPUDevice, T)
TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
TF_CALL_COMPLEX_TYPES(TENSOR_ARRAY_WRITE_OR_ADD_GPU);
#undef TENSOR_ARRAY_WRITE_OR_ADD_GPU
#endif
#undef TENSOR_ARRAY_WRITE_OR_ADD
#define TENSOR_ARRAY_SET_ZERO(Device, T) \
template <> \
Status TensorSetZero<Device, T>(OpKernelContext * ctx, Tensor * value) { \
functor::SetZeroFunctor<Device, T> set_zero_functor; \
set_zero_functor(ctx->template eigen_device<Device>(), value->flat<T>()); \
return OkStatus(); \
}
#define TENSOR_ARRAY_SET_ZERO_CPU(T) TENSOR_ARRAY_SET_ZERO(CPUDevice, T)
TF_CALL_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_CPU);
TF_CALL_bool(TENSOR_ARRAY_SET_ZERO_CPU);
#undef TENSOR_ARRAY_SET_ZERO_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define TENSOR_ARRAY_SET_ZERO_GPU(T) TENSOR_ARRAY_SET_ZERO(GPUDevice, T)
TF_CALL_GPU_NUMBER_TYPES(TENSOR_ARRAY_SET_ZERO_GPU);
TF_CALL_COMPLEX_TYPES(TENSOR_ARRAY_SET_ZERO_GPU);
#undef TENSOR_ARRAY_SET_ZERO_GPU
#endif
#undef TENSOR_ARRAY_SET_ZERO
}
std::atomic<int64_t> TensorArray::tensor_array_counter{0};
Status TensorArray::CopyShapesFrom(TensorArray* rhs,
const TensorShape* shape_to_prepend) {
mutex_lock l(mu_);
mutex_lock l_rhs(rhs->mu_);
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
TF_RETURN_IF_ERROR(rhs->LockedReturnIfClosed());
if (tensors_.size() != rhs->tensors_.size()) {
return errors::InvalidArgument(
"TensorArray sizes do not match during CopyShapesFrom: ",
handle_.vec<tstring>()(1), " has size ", tensors_.size(), " but rhs ",
rhs->handle_.vec<tstring>()(1), " has size ", rhs->tensors_.size());
}
for (std::size_t i = 0; i < tensors_.size(); ++i) {
if (!rhs->tensors_[i].written) continue;
if (shape_to_prepend) {
tensors_[i].shape = *shape_to_prepend;
tensors_[i].shape.AppendShape(rhs->tensors_[i].shape);
} else {
tensors_[i].shape = rhs->tensors_[i].shape;
}
tensors_[i].written = true;
}
return absl::OkStatus();
}
} | #include "tensorflow/lite/kernels/variants/tensor_array.h"
#include <memory>
#include <numeric>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace {
template <typename T>
TensorUniquePtr MakeTensorWithData(std::vector<int> dims,
const std::vector<T>& data) {
TensorUniquePtr tensor =
BuildTfLiteTensor(typeToTfLiteType<T>(), dims, kTfLiteDynamic);
const int num_elements =
std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>());
T* data_start = (T*)tensor->data.data;
for (int i = 0; i < num_elements; ++i) {
data_start[i] = data[i];
}
return tensor;
}
TensorArray MakeTensorArrayForTest(const std::vector<int>& dims) {
return TensorArray(kTfLiteInt32, BuildTfLiteArray(dims));
}
TEST(TensorArrayTest, InsertSingleElement) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
ASSERT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
const TfLiteTensor* added_tensor = arr.At(0);
ASSERT_TRUE(added_tensor != nullptr);
ASSERT_THAT(added_tensor, DimsAre({2}));
EXPECT_EQ(added_tensor->data.i32[0], 3);
EXPECT_EQ(added_tensor->data.i32[1], 4);
}
TEST(TensorArrayTest, ResizeToZero) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
EXPECT_EQ(arr.NumElements(), 2);
arr.Resize(0);
EXPECT_EQ(arr.NumElements(), 0);
}
TEST(TensorArrayTest, InsertOOB) {
auto arr = MakeTensorArrayForTest({});
TensorUniquePtr tensor = MakeTensorWithData<int>({2}, {3, 4});
arr.Resize(1);
ASSERT_FALSE(arr.Set(-1, std::move(tensor)));
EXPECT_FALSE(arr.At(0));
}
TEST(TensorArrayTest, InsertMultipleElements) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
EXPECT_TRUE(arr.Set(1, MakeTensorWithData<int>({3}, {3, 4, 5})));
EXPECT_THAT(arr.At(0), DimsAre({2}));
EXPECT_THAT(arr.At(1), DimsAre({3}));
}
TEST(TensorArrayTest, InsertSameIndexTwiceDeletes) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 2})));
EXPECT_TRUE(arr.Set(0, MakeTensorWithData<int>({3}, {3, 4, 5})));
EXPECT_THAT(arr.At(0), DimsAre({3}));
}
TEST(TensorArrayTest, ResizeUpWithElements) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(1);
ASSERT_TRUE(arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
arr.Resize(2);
EXPECT_THAT(arr.At(0), DimsAre({2}));
EXPECT_FALSE(arr.At(1));
EXPECT_EQ(arr.NumElements(), 2);
}
TEST(TensorArrayTest, ResizeDownDeletesElements) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
ASSERT_TRUE(arr.Set(1, MakeTensorWithData<int>({2}, {3, 4})));
arr.Resize(1);
EXPECT_EQ(arr.NumElements(), 1);
EXPECT_FALSE(arr.At(0));
}
TEST(TensorArrayTest, CopyListWithZeroLength) {
auto arr = MakeTensorArrayForTest({});
TensorArray arr2{arr};
EXPECT_EQ(arr.NumElements(), arr2.NumElements());
EXPECT_EQ(arr.NumElements(), 0);
}
TEST(TensorArrayTest, CopyAssignListWithZeroLength) {
auto arr = MakeTensorArrayForTest({});
arr = MakeTensorArrayForTest({2, 2});
EXPECT_EQ(arr.NumElements(), 0);
EXPECT_THAT(arr.ElementShape(), DimsAre({2, 2}));
}
TEST(TensorArrayTest, CopyEmptyList) {
auto arr = MakeTensorArrayForTest({});
arr.Resize(2);
TensorArray arr2{arr};
EXPECT_EQ(arr.NumElements(), arr2.NumElements());
EXPECT_EQ(arr.NumElements(), 2);
}
TEST(TensorArrayTest, CopyAssignToEmptyList) {
auto arr = MakeTensorArrayForTest({});
auto target_arr = MakeTensorArrayForTest({2, 2});
target_arr.Resize(2);
target_arr = arr;
EXPECT_EQ(target_arr.NumElements(), 0);
EXPECT_THAT(target_arr.ElementShape(), DimsAre({}));
}
TEST(TensorArrayTest, CopyListWithItem) {
std::optional<TensorArray> arr = TensorArray(kTfLiteInt32, {});
arr->Resize(1);
ASSERT_TRUE(arr->Set(0, MakeTensorWithData<int>({2}, {3, 4})));
TensorArray arr2{*arr};
EXPECT_EQ(arr->NumElements(), arr2.NumElements());
EXPECT_EQ(arr->At(0), arr2.At(0));
arr.reset();
EXPECT_THAT(arr2.At(0), DimsAre({2}));
}
TEST(TensorArrayTest, CopyAssignToListWithItem) {
auto target_arr = MakeTensorArrayForTest({});
target_arr.Resize(2);
ASSERT_TRUE(target_arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
auto src_arr = MakeTensorArrayForTest({2, 2});
src_arr.Resize(1);
target_arr = src_arr;
EXPECT_EQ(target_arr.NumElements(), src_arr.NumElements());
EXPECT_EQ(target_arr.At(0), nullptr);
}
TEST(TensorArrayTest, CopyAssignFromListWithItem) {
auto target_arr = MakeTensorArrayForTest({2, 2});
target_arr.Resize(1);
auto src_arr = MakeTensorArrayForTest({});
src_arr.Resize(2);
ASSERT_TRUE(src_arr.Set(0, MakeTensorWithData<int>({2}, {3, 4})));
target_arr = src_arr;
EXPECT_EQ(target_arr.NumElements(), src_arr.NumElements());
EXPECT_EQ(src_arr.At(0), target_arr.At(0));
}
TEST(TensorArrayTest, DeleteEmptyTensorArray) {
TensorArray* arr = new TensorArray{kTfLiteInt32, {}};
delete arr;
}
TEST(TensorArrayTest, DeleteResizedEmptyTensorArray) {
TensorArray* arr = new TensorArray{kTfLiteInt32, {}};
arr->Resize(2);
delete arr;
}
TEST(OpaqueVariantTensorArrayDataTest, CastThroughVoidAndCopy) {
TensorArray* arr = new TensorArray{kTfLiteFloat32, {}};
arr->Resize(2);
ASSERT_TRUE(arr->Set(0, MakeTensorWithData<int>({2}, {3, 4})));
void* erased = static_cast<VariantData*>(arr);
VariantData* d = static_cast<VariantData*>(erased);
VariantData* copied_d = d->CloneTo(nullptr);
auto* copied_arr = static_cast<TensorArray*>(copied_d);
ASSERT_THAT(copied_arr->At(0), DimsAre({2}));
ASSERT_THAT(arr->At(0), DimsAre({2}));
ASSERT_EQ(arr->At(0), arr->At(0));
delete d;
delete copied_d;
}
}
}
} |
928 | cpp | tensorflow/tensorflow | bcast_grad_args | tensorflow/lite/kernels/gradient/bcast_grad_args.cc | tensorflow/lite/kernels/gradient/bcast_grad_args_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_GRADIENT_BCAST_GRAD_ARGS_H_
#define TENSORFLOW_LITE_KERNELS_GRADIENT_BCAST_GRAD_ARGS_H_
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_BROADCAST_GRADIENT_ARGS();
}
}
}
#endif
#include <algorithm>
#include <array>
#include <cmath>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
static const int kInputOneTensor = 0;
static const int kInputTwoTensor = 1;
static const int kOutputOneTensor = 0;
static const int kOutputTwoTensor = 1;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* input1 = GetInput(context, node, kInputOneTensor);
TF_LITE_ENSURE(context, input1 != nullptr);
const RuntimeShape input1_shape = GetTensorShape(input1);
TF_LITE_ENSURE(context,
input1->type == kTfLiteInt32 || input1->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, input1_shape.DimensionsCount(), 1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTwoTensor);
TF_LITE_ENSURE(context, input2 != nullptr);
const RuntimeShape input2_shape = GetTensorShape(input2);
TF_LITE_ENSURE_TYPES_EQ(context, input2->type, input1->type);
TF_LITE_ENSURE_EQ(context, input2_shape.DimensionsCount(), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor* output1 = GetOutput(context, node, kOutputOneTensor);
TF_LITE_ENSURE(context, output1 != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, output1->type, input1->type);
TfLiteTensor* output2 = GetOutput(context, node, kOutputTwoTensor);
TF_LITE_ENSURE(context, output2 != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, output2->type, input1->type);
SetTensorToDynamic(output1);
SetTensorToDynamic(output2);
return kTfLiteOk;
}
TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputOneTensor);
TF_LITE_ENSURE(context, input1 != nullptr);
const RuntimeShape input1_shape = GetTensorShape(input1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTwoTensor);
TF_LITE_ENSURE(context, input2 != nullptr);
const RuntimeShape input2_shape = GetTensorShape(input2);
TfLiteTensor* output1 = GetOutput(context, node, kOutputOneTensor);
TF_LITE_ENSURE(context, output1 != nullptr);
TfLiteTensor* output2 = GetOutput(context, node, kOutputTwoTensor);
TF_LITE_ENSURE(context, output2 != nullptr);
std::vector<int64_t> input1_vec;
std::vector<int64_t> input2_vec;
if (input1->type == kTfLiteInt32) {
input1_vec = std::vector<int64_t>(input1->data.i32,
input1->data.i32 + input1_shape.Dims(0));
} else {
input1_vec = std::vector<int64_t>(input1->data.i64,
input1->data.i64 + input1_shape.Dims(0));
}
if (input2->type == kTfLiteInt32) {
input2_vec = std::vector<int64_t>(input2->data.i32,
input2->data.i32 + input2_shape.Dims(0));
} else {
input2_vec = std::vector<int64_t>(input2->data.i64,
input2->data.i64 + input2_shape.Dims(0));
}
if (input1_vec == input2_vec) {
TfLiteIntArray* output1_shape = TfLiteIntArrayCreate(1);
output1_shape->data[0] = 0;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output1, output1_shape));
TfLiteIntArray* output2_shape = TfLiteIntArrayCreate(1);
output2_shape->data[0] = 0;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output2, output2_shape));
return kTfLiteOk;
}
size_t largest_rank = std::max(input1_vec.size(), input2_vec.size());
std::vector<int64_t> copy[2];
copy[0] = std::vector<int64_t>(input1_vec.rbegin(), input1_vec.rend());
copy[1] = std::vector<int64_t>(input2_vec.rbegin(), input2_vec.rend());
for (int i = 0; i < 2; ++i) {
if (copy[i].size() < largest_rank) {
copy[i].resize(largest_rank, 1);
}
}
std::array<bool, 2> prev_is_one = {false, false};
std::array<bool, 2> current_is_one = {false, false};
bool set_one = false;
std::vector<int64_t> grad_reduce_idx[2];
for (int j = 0; j < largest_rank; ++j) {
int output_dim = -1;
int output_dim_set = false;
bool none_is_one = true;
for (int i = 0; i < 2; ++i) {
if (copy[i][j] == 1) {
current_is_one[i] = true;
none_is_one = false;
} else {
current_is_one[i] = false;
if (!output_dim_set || copy[i][j] == output_dim) {
output_dim = copy[i][j];
output_dim_set = true;
} else {
return kTfLiteError;
}
}
}
if (!output_dim_set) {
for (int i = 0; i < 2; ++i) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
continue;
} else if (current_is_one == prev_is_one && set_one) {
for (int i = 0; i < 2; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
}
} else {
for (int i = 0; i < 2; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
}
}
set_one = true;
for (int i = 0; i < 2; ++i) {
prev_is_one[i] = current_is_one[i];
}
}
for (int i = 0; i < 2; ++i) {
std::reverse(grad_reduce_idx[i].begin(), grad_reduce_idx[i].end());
}
TfLiteIntArray* output1_shape = TfLiteIntArrayCreate(1);
output1_shape->data[0] = grad_reduce_idx[0].size();
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output1, output1_shape));
if (output1->type == kTfLiteInt32) {
for (int i = 0; i < grad_reduce_idx[0].size(); ++i) {
output1->data.i32[i] = grad_reduce_idx[0][i];
}
} else if (output1->type == kTfLiteInt64) {
for (int i = 0; i < grad_reduce_idx[0].size(); ++i) {
output1->data.i64[i] = grad_reduce_idx[0][i];
}
}
TfLiteIntArray* output2_shape = TfLiteIntArrayCreate(1);
output2_shape->data[0] = grad_reduce_idx[1].size();
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output2, output2_shape));
if (output2->type == kTfLiteInt32) {
for (int i = 0; i < grad_reduce_idx[1].size(); ++i) {
output2->data.i32[i] = grad_reduce_idx[1][i];
}
} else if (output2->type == kTfLiteInt64) {
for (int i = 0; i < grad_reduce_idx[1].size(); ++i) {
output2->data.i64[i] = grad_reduce_idx[1][i];
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_GRADIENT_ARGS() {
static TfLiteRegistration reg = {nullptr,
nullptr,
Prepare,
Invoke};
return ®
}
}
}
} | #include "tensorflow/lite/kernels/gradient/bcast_grad_args.h"
#include <cstdint>
#include <vector>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class BcastGradArgsInt32OpModel : public SingleOpModel {
public:
BcastGradArgsInt32OpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output1,
const TensorData& output2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
std::vector<uint8_t> custom_option;
SetCustomOp("BroadcastGradientArgs", custom_option,
Register_BROADCAST_GRADIENT_ARGS);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(const std::vector<int>& data) {
PopulateTensor(input1_, data);
}
void SetInput2(const std::vector<int>& data) {
PopulateTensor(input2_, data);
}
std::vector<int> GetOutput1() { return ExtractVector<int>(output1_); }
std::vector<int> GetOutput1Shape() { return GetTensorShape(output1_); }
std::vector<int> GetOutput2() { return ExtractVector<int>(output2_); }
std::vector<int> GetOutput2Shape() { return GetTensorShape(output2_); }
protected:
int input1_;
int input2_;
int output1_;
int output2_;
};
TEST(BcastGradArgsInt32OpModel, AllEqualsInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 1, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput1Int32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 1, 3});
model.SetInput2({3, 4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput2Int32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({1}));
}
TEST(BcastGradArgsInt32OpModel, DifferentInputSizesInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {3}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({0}));
}
TEST(BcastGradArgsInt32OpModel, NonBroadcastableDimsInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({9, 9, 9, 9});
EXPECT_THAT(model.Invoke(), kTfLiteError);
}
class BcastGradArgsInt64OpModel : public SingleOpModel {
public:
BcastGradArgsInt64OpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output1,
const TensorData& output2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
std::vector<uint8_t> custom_option;
SetCustomOp("BroadcastGradientArgs", custom_option,
Register_BROADCAST_GRADIENT_ARGS);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(const std::vector<int64_t>& data) {
PopulateTensor(input1_, data);
}
void SetInput2(const std::vector<int64_t>& data) {
PopulateTensor(input2_, data);
}
std::vector<int64_t> GetOutput1() { return ExtractVector<int64_t>(output1_); }
std::vector<int> GetOutput1Shape() { return GetTensorShape(output1_); }
std::vector<int64_t> GetOutput2() { return ExtractVector<int64_t>(output2_); }
std::vector<int> GetOutput2Shape() { return GetTensorShape(output2_); }
protected:
int input1_;
int input2_;
int output1_;
int output2_;
};
TEST(BcastGradArgsInt32OpModel, AllEqualsInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 1, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput1Int64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 1, 3});
model.SetInput2({3, 4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput2Int64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({1}));
}
TEST(BcastGradArgsInt32OpModel, DifferentInputSizesInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {3}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({0}));
}
TEST(BcastGradArgsInt32OpModel, NonBroadcastableDimsInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({9, 9, 9, 9});
EXPECT_THAT(model.Invoke(), kTfLiteError);
}
}
}
}
} |
929 | cpp | tensorflow/tensorflow | tf_tensor_view | tensorflow/lite/kernels/shim/tf_tensor_view.cc | tensorflow/lite/kernels/shim/tf_tensor_view_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TF_TENSOR_VIEW_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TF_TENSOR_VIEW_H_
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/lite/kernels/shim/tensor_view.h"
namespace tflite {
namespace shim {
class TfTensorView : public TensorView {
public:
TfTensorView(TfTensorView &&o) noexcept;
TfTensorView(const TfTensorView &o);
TfTensorView &operator=(TfTensorView &&o) noexcept;
TfTensorView &operator=(const TfTensorView &);
protected:
template <typename DType>
TfTensorView(const ::tensorflow::Tensor *wrapped_tensor, const DType &dtype);
template <typename TfTensorType>
friend absl::StatusOr<
typename MatchConstNess<TfTensorType, TfTensorView>::Type>
TfTensorViewTemplatizedNew(TfTensorType *wrapped_tensor);
std::vector<int> shape_data_;
};
template <>
struct TensorViewSubType<::tensorflow::Tensor> {
using Type = TfTensorView;
};
template <>
struct TensorViewSubType<const ::tensorflow::Tensor> {
using Type = const TfTensorView;
};
template <>
absl::StatusOr<TfTensorView> TensorView::New<::tensorflow::Tensor>(
::tensorflow::Tensor *wrapped_tensor);
template <>
absl::StatusOr<const TfTensorView> TensorView::New<const ::tensorflow::Tensor>(
const ::tensorflow::Tensor *wrapped_tensor);
template <typename DType>
TfTensorView::TfTensorView(const ::tensorflow::Tensor *wrapped_tensor,
const DType &dtype)
: TensorView({}, wrapped_tensor->data(),
wrapped_tensor->tensor_data().size(), dtype) {
shape_data_.resize(wrapped_tensor->shape().dims());
for (int dim = 0; dim < wrapped_tensor->shape().dims(); ++dim) {
shape_data_[dim] = wrapped_tensor->shape().dim_size(dim);
}
shape_ = absl::Span<int>(shape_data_);
}
}
}
#endif
#include "tensorflow/lite/kernels/shim/tf_tensor_view.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/framework/types.pb.h"
#define CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(TF_DTYPE, CPP_DTYPE) \
case TF_DTYPE: { \
using DType = typename CPP_DTYPE; \
return TfTensorView(wrapped_tensor, DType()); \
}
#define CASE_FOR_DTYPE(TF_DTYPE) \
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(TF_DTYPE, \
::tensorflow::EnumToDataType<TF_DTYPE>::Type)
namespace tflite {
namespace shim {
TfTensorView::TfTensorView(TfTensorView &&o) noexcept
: TensorView(std::move(o)), shape_data_(std::move(o.shape_data_)) {
shape_ = absl::Span<int>(shape_data_);
}
TfTensorView::TfTensorView(const TfTensorView &o)
: TensorView(o), shape_data_(o.shape_data_) {
shape_ = absl::Span<int>(shape_data_);
}
TfTensorView &TfTensorView::operator=(TfTensorView &&o) noexcept {
shape_data_ = std::move(o.shape_data_);
TensorView::operator=(std::move(o));
shape_ = absl::Span<int>(shape_data_);
return *this;
}
TfTensorView &TfTensorView::operator=(const TfTensorView &o) {
if (&o == this) return *this;
TensorView::operator=(o);
shape_data_ = o.shape_data_;
shape_ = absl::Span<int>(shape_data_);
return *this;
}
template <typename TfTensorType>
absl::StatusOr<typename MatchConstNess<TfTensorType, TfTensorView>::Type>
TfTensorViewTemplatizedNew(TfTensorType *wrapped_tensor) {
switch (wrapped_tensor->dtype()) {
CASE_FOR_DTYPE(::tensorflow::DT_BOOL);
CASE_FOR_DTYPE(::tensorflow::DT_UINT8);
CASE_FOR_DTYPE(::tensorflow::DT_UINT64);
CASE_FOR_DTYPE(::tensorflow::DT_INT8);
CASE_FOR_DTYPE(::tensorflow::DT_INT16);
CASE_FOR_DTYPE(::tensorflow::DT_INT32);
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(::tensorflow::DT_INT64, std::int64_t);
CASE_FOR_DTYPE(::tensorflow::DT_FLOAT);
CASE_FOR_DTYPE(::tensorflow::DT_DOUBLE);
CASE_FOR_DTYPE(::tensorflow::DT_STRING);
default: {
return absl::UnimplementedError(
absl::StrCat("Unsupported data type: ", wrapped_tensor->dtype()));
}
}
}
template <>
absl::StatusOr<TfTensorView> TensorView::New<::tensorflow::Tensor>(
::tensorflow::Tensor *wrapped_tensor) {
return TfTensorViewTemplatizedNew(wrapped_tensor);
}
template <>
absl::StatusOr<const TfTensorView> TensorView::New<const ::tensorflow::Tensor>(
const ::tensorflow::Tensor *wrapped_tensor) {
return TfTensorViewTemplatizedNew(wrapped_tensor);
}
}
} | #include "tensorflow/lite/kernels/shim/tf_tensor_view.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tstring.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::protobuf::TextFormat;
TEST(TfTensorView, Bool) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_BOOL
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
bool_val: [ false, false, false, false, false, false ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_premove_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto tensor_data_as_vector = t.Data<bool>();
for (int i = 0; i < 3 * 2; ++i) tensor_data_as_vector[i] = i % 5 == 0;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[1 0]\n [0 0]\n [0 1]]"));
}
TEST(TfTensorView, Int32) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_INT32
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
int_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_premove_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto tensor_data_as_vector = t.Data<int32_t>();
for (int i = 0; i < 3 * 2; ++i) tensor_data_as_vector[i] = i;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 1]\n [2 3]\n [4 5]]"));
}
TEST(TfTensorView, Int64) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_INT64
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
int_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<int64_t>();
for (int i = 0; i < 3 * 2; ++i) tensor_data_as_vector[i] = i;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 1]\n [2 3]\n [4 5]]"));
}
TEST(TfTensorView, Float) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_FLOAT
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
float_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<float>();
for (int i = 0; i < 3 * 2; ++i)
tensor_data_as_vector[i] = static_cast<float>(i) / 2.0;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 0.5]\n [1 1.5]\n [2 2.5]]"));
}
TEST(TfTensorView, Double) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_DOUBLE
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
double_val: [ 0, 0, 0, 0, 0, 0 ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<double>();
for (int i = 0; i < 3 * 2; ++i)
tensor_data_as_vector[i] = static_cast<double>(i) / 2.0;
ASSERT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq("[[0 0.5]\n [1 1.5]\n [2 2.5]]"));
}
TEST(TfTensorView, Str) {
::tensorflow::TensorProto tf_tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_STRING
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
string_val: [ "", "", "", "", "", "" ]
)pb",
&tf_tensor_pb));
::tensorflow::Tensor tf_tensor;
ASSERT_TRUE(tf_tensor.FromProto(tf_tensor_pb));
auto t_or = TensorView::New(&tf_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto tensor_data_as_vector = t.Data<::tensorflow::tstring>();
tensor_data_as_vector[0] = "a";
tensor_data_as_vector[1] = "bc";
tensor_data_as_vector[2] = "def";
tensor_data_as_vector[3] = "g";
tensor_data_as_vector[4] = "hi";
tensor_data_as_vector[5] = "";
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "hi", ""));
const auto& const_tf_tensor = tf_tensor;
const auto const_t_or = TensorView::New(&const_tf_tensor);
ASSERT_TRUE(const_t_or.ok()) << const_t_or.status();
const auto& const_t = const_t_or.value();
EXPECT_THAT(const_t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "hi", ""));
const char expectation[] = R"(
[["a" "bc"]
["def" "g"]
["hi" ""]])";
EXPECT_THAT(tf_tensor.SummarizeValue(10, true),
::testing::Eq(absl::string_view(expectation).substr(1)));
}
}
}
} |
930 | cpp | tensorflow/tensorflow | tflite_tensor_view | tensorflow/lite/kernels/shim/tflite_tensor_view.cc | tensorflow/lite/kernels/shim/tflite_tensor_view_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TFLITE_TENSOR_VIEW_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TFLITE_TENSOR_VIEW_H_
#include <cstring>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/shim/tensor_view.h"
namespace tflite {
namespace shim {
class TfLiteTensorView : public TensorView {
public:
TfLiteTensorView(TfLiteTensorView &&o) noexcept;
TfLiteTensorView(const TfLiteTensorView &o);
TfLiteTensorView &operator=(TfLiteTensorView &&o) noexcept;
TfLiteTensorView &operator=(const TfLiteTensorView &);
protected:
template <typename DType>
TfLiteTensorView(::TfLiteTensor *wrapped_tensor, const DType &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
wrapped_tensor->data.raw, wrapped_tensor->bytes, dtype),
wrapped_tensor_(wrapped_tensor),
const_wrapped_tensor_(wrapped_tensor) {}
TfLiteTensorView(::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype);
template <typename DType>
TfLiteTensorView(const ::TfLiteTensor *wrapped_tensor, const DType &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
wrapped_tensor->data.raw, wrapped_tensor->bytes, dtype),
const_wrapped_tensor_(wrapped_tensor) {}
TfLiteTensorView(const ::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype);
template <typename TfLiteTensorType>
friend absl::StatusOr<
typename MatchConstNess<TfLiteTensorType, TfLiteTensorView>::Type>
TfLiteTensorViewTemplatizedNew(TfLiteTensorType *wrapped_tensor);
struct StringBuffer {
explicit StringBuffer(TfLiteTensorView *t_view);
~StringBuffer();
std::vector<::tensorflow::tstring> buffer;
::TfLiteTensor *wrapped_tensor = nullptr;
};
void InitForStringDType();
::TfLiteTensor *wrapped_tensor_ = nullptr;
const ::TfLiteTensor *const_wrapped_tensor_ = nullptr;
std::shared_ptr<StringBuffer> str_vec_ = nullptr;
};
template <>
struct TensorViewSubType<::TfLiteTensor> {
using Type = TfLiteTensorView;
};
template <>
struct TensorViewSubType<const ::TfLiteTensor> {
using Type = const TfLiteTensorView;
};
template <>
absl::StatusOr<TfLiteTensorView> TensorView::New<::TfLiteTensor>(
::TfLiteTensor *wrapped_tensor);
template <>
absl::StatusOr<const TfLiteTensorView> TensorView::New<const ::TfLiteTensor>(
const ::TfLiteTensor *wrapped_tensor);
}
}
#endif
#include "tensorflow/lite/kernels/shim/tflite_tensor_view.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/shim/tensor_view.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/type_to_tflitetype.h"
#define CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(TFLITE_DTYPE, CPP_DTYPE) \
case TFLITE_DTYPE: { \
using DType = typename CPP_DTYPE; \
return TfLiteTensorView(wrapped_tensor, DType()); \
}
#define CASE_FOR_DTYPE(TFLITE_DTYPE) \
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE( \
TFLITE_DTYPE, ::tflite::TfLiteTypeToType<TFLITE_DTYPE>::Type)
namespace tflite {
namespace shim {
TfLiteTensorView::TfLiteTensorView(::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
nullptr, 0, dtype),
wrapped_tensor_(wrapped_tensor),
const_wrapped_tensor_(wrapped_tensor) {
InitForStringDType();
}
TfLiteTensorView::TfLiteTensorView(const ::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
nullptr, 0, dtype),
const_wrapped_tensor_(wrapped_tensor) {
InitForStringDType();
}
TfLiteTensorView::TfLiteTensorView(TfLiteTensorView &&o) noexcept
: TensorView(std::move(o)),
wrapped_tensor_(o.wrapped_tensor_),
const_wrapped_tensor_(o.const_wrapped_tensor_),
str_vec_(std::move(o.str_vec_)) {
}
TfLiteTensorView::TfLiteTensorView(const TfLiteTensorView &o)
: TensorView(o),
wrapped_tensor_(o.wrapped_tensor_),
const_wrapped_tensor_(o.const_wrapped_tensor_),
str_vec_(o.str_vec_) {
}
TfLiteTensorView &TfLiteTensorView::operator=(TfLiteTensorView &&o) noexcept {
wrapped_tensor_ = o.wrapped_tensor_;
const_wrapped_tensor_ = o.const_wrapped_tensor_;
str_vec_ = std::move(o.str_vec_);
TensorView::operator=(std::move(o));
return *this;
}
TfLiteTensorView &TfLiteTensorView::operator=(const TfLiteTensorView &o) {
if (&o == this) return *this;
TensorView::operator=(o);
wrapped_tensor_ = o.wrapped_tensor_;
const_wrapped_tensor_ = o.const_wrapped_tensor_;
str_vec_ = o.str_vec_;
return *this;
}
void TfLiteTensorView::InitForStringDType() {
if (str_vec_ == nullptr) {
str_vec_ = std::make_shared<StringBuffer>(this);
}
data_ = absl::Span<::tensorflow::tstring>(str_vec_->buffer);
}
TfLiteTensorView::StringBuffer::StringBuffer(TfLiteTensorView *t_view)
: wrapped_tensor(t_view->wrapped_tensor_) {
buffer.resize(NumElements(t_view->shape_));
const auto const_wrapped_tensor = t_view->const_wrapped_tensor_;
std::size_t str_count;
if (const_wrapped_tensor->data.raw == nullptr)
str_count = 0;
else
str_count = ::tflite::GetStringCount(const_wrapped_tensor);
for (int i = 0; i < str_count; ++i) {
const auto str_ref = ::tflite::GetString(const_wrapped_tensor, i);
buffer[i].assign_as_view(str_ref.str, str_ref.len);
}
}
TfLiteTensorView::StringBuffer::~StringBuffer() {
if (wrapped_tensor == nullptr) return;
tflite::DynamicBuffer buf;
for (const auto &s : buffer) buf.AddString(s.data(), s.length());
buf.WriteToTensor(wrapped_tensor, nullptr);
}
template <typename TfLiteTensorType>
absl::StatusOr<
typename MatchConstNess<TfLiteTensorType, TfLiteTensorView>::Type>
TfLiteTensorViewTemplatizedNew(TfLiteTensorType *wrapped_tensor) {
switch (wrapped_tensor->type) {
CASE_FOR_DTYPE(kTfLiteBool);
CASE_FOR_DTYPE(kTfLiteUInt8);
CASE_FOR_DTYPE(kTfLiteUInt64);
CASE_FOR_DTYPE(kTfLiteInt8);
CASE_FOR_DTYPE(kTfLiteInt16);
CASE_FOR_DTYPE(kTfLiteInt32);
CASE_FOR_DTYPE(kTfLiteInt64);
CASE_FOR_DTYPE(kTfLiteFloat32);
CASE_FOR_DTYPE(kTfLiteFloat64);
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(kTfLiteString, ::tensorflow::tstring);
default: {
return absl::UnimplementedError(
absl::StrCat("Unsupported dtype: ", wrapped_tensor->type));
}
}
}
template <>
absl::StatusOr<TfLiteTensorView> TensorView::New<::TfLiteTensor>(
::TfLiteTensor *wrapped_tensor) {
return TfLiteTensorViewTemplatizedNew(wrapped_tensor);
}
template <>
absl::StatusOr<const TfLiteTensorView> TensorView::New<const ::TfLiteTensor>(
const ::TfLiteTensor *wrapped_tensor) {
return TfLiteTensorViewTemplatizedNew(wrapped_tensor);
}
}
} | #include "tensorflow/lite/kernels/shim/tflite_tensor_view.h"
#include <cstdint>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/shim/test_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace shim {
namespace {
using ::testing::Eq;
TEST(TfLiteTensorW, Bool) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<bool>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_bool";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_premove_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto data = t.Data<bool>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = (i % 5 == 0);
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[1, 0], [0, 0], [0, 1]]"));
}
template <typename IntType>
void IntTest() {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<IntType>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_int";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_premove_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto data = t.Data<IntType>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = i;
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[0, 1], [2, 3], [4, 5]]"));
}
TEST(TfLiteTensorW, Int8) { IntTest<int8_t>(); }
TEST(TfLiteTensorW, UInt8) { IntTest<uint8_t>(); }
TEST(TfLiteTensorW, Int16) { IntTest<int16_t>(); }
TEST(TfLiteTensorW, Int32) { IntTest<int32_t>(); }
TEST(TfLiteTensorW, Int64) { IntTest<int64_t>(); }
template <typename FloatType>
void FloatTest() {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<FloatType>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_float";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto data = t.Data<FloatType>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = static_cast<FloatType>(i) / 2.;
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[0, 0.5], [1, 1.5], [2, 2.5]]"));
}
TEST(TfLiteTensorW, Float) { FloatTest<float>(); }
TEST(TfLiteTensorW, Double) { FloatTest<double>(); }
TEST(TfLiteTensorW, Str) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<std::string>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_str";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto t_mat = t.As<::tensorflow::tstring, 2>();
t.Data<::tensorflow::tstring>()[0] = "a";
t.Data<::tensorflow::tstring>()[1] = "bc";
t_mat(1, 0) = "def";
t.Data<::tensorflow::tstring>()[3] = "g";
t.Data<::tensorflow::tstring>()[4] = "";
t_mat(2, 1) = "hi";
}
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "", "hi"));
}
const auto const_tflite_tensor = tflite_tensor;
{
const auto t_or = TensorView::New(const_tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
const auto& t = t_or.value();
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "", "hi"));
}
EXPECT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[a, bc], [def, g], [, hi]]"));
}
TEST(TfLiteTensorW, EmptyStr) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<std::string>({0}, tflite_tensor);
tflite_tensor->name = "test_str";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
}
EXPECT_THAT(GetStringCount(tflite_tensor), Eq(0));
}
}
}
} |
931 | cpp | tensorflow/tensorflow | tmpl_tflite_op | tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.cc | tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TFLITE_OP_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TFLITE_OP_H_
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
namespace tflite {
namespace ops {
namespace custom {
void AddTmplOp(MutableOpResolver* resolver);
TfLiteRegistration* Register_TMPL_OP();
const char* OpName_TMPL_OP();
}
}
}
#endif
#include "tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/shim/test_op/tmpl_op.h"
#include "tensorflow/lite/kernels/shim/tflite_op_shim.h"
#include "tensorflow/lite/kernels/shim/tflite_op_wrapper.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
const char a_type[]("AType"), b_type[]("BType");
}
using ::tflite::shim::op_wrapper::Attr;
using ::tflite::shim::op_wrapper::AttrName;
using ::tflite::shim::op_wrapper::OpWrapper;
template <shim::Runtime Rt>
using Op = OpWrapper<Rt, shim::TmplOp, Attr<AttrName<a_type>, int32_t, float>,
Attr<AttrName<b_type>, int32_t, int64_t, bool>>;
using OpKernel = ::tflite::shim::TfLiteOpKernel<Op>;
void AddTmplOp(MutableOpResolver* resolver) { OpKernel::Add(resolver); }
TfLiteRegistration* Register_TMPL_OP() {
return OpKernel::GetTfLiteRegistration();
}
const char* OpName_TMPL_OP() { return OpKernel::OpName(); }
}
}
} | #include "tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace shim {
namespace {
template <typename AType, typename BType>
class TmplOpModel : public SingleOpModel {
public:
TmplOpModel(const std::vector<uint8_t>& op_options,
const std::vector<tflite::TensorType>& input_types,
const std::vector<std::vector<int>>& input_shapes,
const std::vector<AType>& input0,
const std::vector<BType>& input1,
const std::vector<tflite::TensorType>& output_types) {
std::vector<int> input_idx;
for (const auto input_type : input_types) {
input_idx.push_back(AddInput(input_type));
}
for (const auto output_type : output_types) {
output_idx_.push_back(AddOutput(output_type));
}
SetCustomOp(ops::custom::OpName_TMPL_OP(), op_options,
ops::custom::Register_TMPL_OP);
BuildInterpreter(input_shapes);
PopulateTensor(input_idx[0], input0);
PopulateTensor(input_idx[1], input1);
}
template <typename T>
std::vector<T> GetOutput(const int i) {
return ExtractVector<T>(output_idx_[i]);
}
std::vector<int> GetOutputShape(const int i) {
return GetTensorShape(output_idx_[i]);
}
protected:
std::vector<int> output_idx_;
};
TEST(TmplOpModel, float_int32) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteFloat32);
builder.Int("BType", kTfLiteInt32);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_FLOAT32,
tflite::TensorType_INT32};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<float> input0 = {5.6f};
const std::vector<int32_t> input1 = {3};
TmplOpModel<float, int32_t> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(8.6f));
}
TEST(TmplOpModel, int32_int64) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteInt32);
builder.Int("BType", kTfLiteInt64);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_INT32,
tflite::TensorType_INT64};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<int32_t> input0 = {12};
const std::vector<int64_t> input1 = {33l};
TmplOpModel<int32_t, int64_t> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(45.0f));
}
TEST(TmplOpModel, int32_bool) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteInt32);
builder.Int("BType", kTfLiteBool);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_INT32,
tflite::TensorType_BOOL};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<int32_t> input0 = {12};
const std::vector<bool> input1 = {true};
TmplOpModel<int32_t, bool> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(13.0f));
}
}
}
} |
932 | cpp | tensorflow/tensorflow | simple_tf_op | tensorflow/lite/kernels/shim/test_op/simple_tf_op.cc | tensorflow/lite/kernels/shim/test_op/simple_tf_op_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_SIMPLE_TF_OP_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_SIMPLE_TF_OP_H_
#include "tensorflow/lite/kernels/shim/test_op/simple_op.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
class SimpleOpKernel : public TfOpKernel<SimpleOp> {
public:
using TfOpKernel::TfOpKernel;
};
}
}
#endif
#include "tensorflow/lite/kernels/shim/test_op/simple_tf_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
namespace tflite {
namespace shim {
REGISTER_TF_OP_SHIM(SimpleOpKernel);
REGISTER_KERNEL_BUILDER(
Name(SimpleOpKernel::OpName()).Device(::tensorflow::DEVICE_CPU),
SimpleOpKernel);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/tstring.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_INT64;
using ::tensorflow::DT_STRING;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::tstring;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class SimpleOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(SimpleOpTfTest, Output1Size_5_N_2) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 5)
.Attr("output2_suffix", "foo")
.Attr("N", 2)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(2, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abc"});
AddInputFromArray<int64_t>(TensorShape({}), {123});
AddInputFromArray<int64_t>(TensorShape({2}), {456, 789});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(
*GetOutput(1), AsTensor<float>({0, 0.5, 1., 1.5, 2.}, {5}));
ExpectTensorEqual<tstring>(
*GetOutput(2), AsTensor<tstring>({"0", "1", "2", "foo"}, {4}));
ExpectTensorEqual<int64_t>(*GetOutput(3),
AsTensor<int64_t>({124}, {}));
ExpectTensorEqual<int64_t>(*GetOutput(4),
AsTensor<int64_t>({457, 790}, {2}));
}
TEST_F(SimpleOpTfTest, Output1Size_3_N_0) {
TF_ASSERT_OK(NodeDefBuilder("simple_op", "SimpleOperation")
.Attr("output1_size", 3)
.Attr("output2_suffix", "foo")
.Attr("N", 0)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(0, DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {"abcde"});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<int>(*GetOutput(0),
AsTensor<int>({0, 1, 2, 3, 4}, {5}));
ExpectTensorEqual<float>(*GetOutput(1),
AsTensor<float>({0, 0.5, 1.}, {3}));
ExpectTensorEqual<tstring>(
*GetOutput(2),
AsTensor<tstring>({"0", "1", "2", "3", "4", "foo"}, {6}));
}
}
}
} |
933 | cpp | tensorflow/tensorflow | tmpl_tf_op | tensorflow/lite/kernels/shim/test_op/tmpl_tf_op.cc | tensorflow/lite/kernels/shim/test_op/tmpl_tf_op_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TF_OP_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TF_OP_H_
#include "tensorflow/lite/kernels/shim/test_op/tmpl_op.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
template <typename AType, typename BType>
class TmplOpKernel : public TfOpKernel<TmplOp, AType, BType> {
public:
using TfOpKernel<TmplOp, AType, BType>::TfOpKernel;
};
}
}
#endif
#include "tensorflow/lite/kernels/shim/test_op/tmpl_tf_op.h"
#include <cstdint>
#include "tensorflow/core/framework/types.h"
namespace tflite {
namespace shim {
using TmplOpKernelInstance = TmplOpKernel<float, int32_t>;
REGISTER_TF_OP_SHIM(TmplOpKernelInstance);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<float>("AType")
.TypeConstraint<int32_t>("BType"),
TmplOpKernel<float, int32_t>);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<int32_t>("AType")
.TypeConstraint<int64_t>("BType"),
TmplOpKernel<int32_t, int64_t>);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_FLOAT;
using ::tensorflow::DT_INT32;
using ::tensorflow::DT_INT64;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class TmplOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(TmplOpTfTest, float_int32) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_FLOAT)
.Attr("BType", DT_INT32)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {10.5});
AddInputFromArray<int32_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0),
AsTensor<float>({30.5}, {}));
}
TEST_F(TmplOpTfTest, int32_int64) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_INT32)
.Attr("BType", DT_INT64)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<int32_t>(TensorShape({}), {10});
AddInputFromArray<int64_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0), AsTensor<float>({30}, {}));
}
}
}
} |
934 | cpp | tensorflow/tensorflow | simple_tflite_op | tensorflow/lite/kernels/shim/test_op/simple_tflite_op.cc | tensorflow/lite/kernels/shim/test_op/simple_tflite_op_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_SIMPLE_TFLITE_OP_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_SIMPLE_TFLITE_OP_H_
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
namespace tflite {
namespace ops {
namespace custom {
void AddSimpleOp(MutableOpResolver* resolver);
TfLiteRegistration* Register_SIMPLE_OP();
const char* OpName_SIMPLE_OP();
}
}
}
#endif
#include "tensorflow/lite/kernels/shim/test_op/simple_tflite_op.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/shim/test_op/simple_op.h"
#include "tensorflow/lite/kernels/shim/tflite_op_shim.h"
namespace tflite {
namespace ops {
namespace custom {
using OpKernel = ::tflite::shim::TfLiteOpKernel<tflite::shim::SimpleOp>;
void AddSimpleOp(MutableOpResolver* resolver) { OpKernel::Add(resolver); }
TfLiteRegistration* Register_SIMPLE_OP() {
return OpKernel::GetTfLiteRegistration();
}
const char* OpName_SIMPLE_OP() { return OpKernel::OpName(); }
}
}
} | #include "tensorflow/lite/kernels/shim/test_op/simple_tflite_op.h"
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
class SimpleOpModel : public SingleOpModel {
public:
SimpleOpModel(const std::vector<uint8_t>& op_options,
const std::vector<tflite::TensorType>& input_types,
const std::vector<std::vector<int>>& input_shapes,
const std::string& input0,
const std::vector<std::vector<int64_t>>& input1,
const std::vector<tflite::TensorType>& output_types) {
std::vector<int> input_idx;
for (const auto input_type : input_types) {
input_idx.push_back(AddInput(input_type));
}
for (const auto output_type : output_types) {
output_idx_.push_back(AddOutput(output_type));
}
SetCustomOp(OpName_SIMPLE_OP(), op_options, Register_SIMPLE_OP);
BuildInterpreter(input_shapes);
PopulateStringTensor(input_idx[0], {input0});
for (int i = 0; i < input1.size(); ++i) {
PopulateTensor(input_idx[1 + i], input1[i]);
}
}
template <typename T>
std::vector<T> GetOutput(const int i) {
return ExtractVector<T>(output_idx_[i]);
}
std::vector<int> GetOutputShape(const int i) {
return GetTensorShape(output_idx_[i]);
}
protected:
std::vector<int> output_idx_;
};
TEST(SimpleOpModel, OutputSize_5_N_2) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("output1_size", 5);
builder.String("output2_suffix", "foo");
builder.Int("N", 2);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}, {2}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_STRING,
tflite::TensorType_INT64,
tflite::TensorType_INT64};
std::vector<tflite::TensorType> output_types = {
tflite::TensorType_INT32, tflite::TensorType_FLOAT32,
tflite::TensorType_STRING, tflite::TensorType_INT64,
tflite::TensorType_INT64};
const std::string input0 = "abc";
const std::vector<std::vector<int64_t>> input1 = {{123}, {456, 789}};
SimpleOpModel m(builder.GetBuffer(), input_types, input_shapes,
input0, input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int>(0), testing::ElementsAre(0, 1, 2, 3, 4));
EXPECT_THAT(m.GetOutput<float>(1),
testing::ElementsAre(0, 0.5, 1.0, 1.5, 2.0));
EXPECT_THAT(m.GetOutput<std::string>(2),
testing::ElementsAre("0", "1", "2", "foo"));
EXPECT_THAT(m.GetOutput<int64_t>(3), testing::ElementsAre(124));
EXPECT_THAT(m.GetOutputShape(3), testing::ElementsAre());
EXPECT_THAT(m.GetOutput<int64_t>(4), testing::ElementsAre(457, 790));
EXPECT_THAT(m.GetOutputShape(4), testing::ElementsAre(2));
}
TEST(SimpleOpModel, OutputSize_3_N_0) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("output1_size", 3);
builder.String("output2_suffix", "foo");
builder.Int("N", 0);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_STRING};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_INT32,
tflite::TensorType_FLOAT32,
tflite::TensorType_STRING};
const std::string input0 = "abcde";
const std::vector<std::vector<int64_t>> input1;
SimpleOpModel m(builder.GetBuffer(), input_types, input_shapes,
input0, input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int>(0), testing::ElementsAre(0, 1, 2, 3, 4));
EXPECT_THAT(m.GetOutput<float>(1), testing::ElementsAre(0, 0.5, 1.0));
EXPECT_THAT(m.GetOutput<std::string>(2),
testing::ElementsAre("0", "1", "2", "3", "4", "foo"));
}
}
}
}
} |
935 | cpp | tensorflow/tensorflow | tensor_utils | tensorflow/lite/core/api/tensor_utils.cc | tensorflow/lite/kernels/internal/tensor_utils_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_UNIFORM_QUANT_OPS_TENSOR_UTILS_H_
#define TENSORFLOW_CORE_KERNELS_UNIFORM_QUANT_OPS_TENSOR_UTILS_H_
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
namespace tensorflow {
template <typename T>
bool AllElementsPositive(const Tensor& tensor) {
Eigen::Tensor<bool, 0, Eigen::RowMajor> positive =
(tensor.flat<T>() > 0).all();
return positive();
}
Status QuantizationAxisAndShapeValid(const TensorShape& data_shape,
const TensorShape& scales_shape,
const TensorShape& zero_points_shape,
int quantization_axis);
TensorShape TransposedShape(const TensorShape& in_shape,
const gtl::ArraySlice<int32_t> perm);
template <typename T>
void Transpose(const Tensor& in, const gtl::ArraySlice<int32_t> perm,
Tensor& out) {
gtl::InlinedVector<int64_t, 8> in_strides =
ComputeStride<int64_t>(in.shape());
gtl::InlinedVector<int64_t, 8> out_strides =
ComputeStride<int64_t>(out.shape());
const T* in_data = in.flat<T>().data();
T* out_data = out.flat<T>().data();
for (int64_t out_idx = 0; out_idx < out.NumElements(); ++out_idx) {
int64_t in_idx = 0;
int64_t remain_out_idx = out_idx;
for (int dim = 0; dim < out.dims(); ++dim) {
const int64_t ratio = remain_out_idx / out_strides[dim];
remain_out_idx -= ratio * out_strides[dim];
in_idx += ratio * in_strides[perm[dim]];
}
out_data[out_idx] = in_data[in_idx];
}
}
}
#endif
#include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h"
namespace tensorflow {
using tensorflow::errors::InvalidArgument;
Status QuantizationAxisAndShapeValid(const TensorShape& data_shape,
const TensorShape& scales_shape,
const TensorShape& zero_points_shape,
int quantization_axis) {
if (!scales_shape.IsSameSize(zero_points_shape)) {
return InvalidArgument(
"scales and zero_points shape must be same, but given scales shape ",
scales_shape.DebugString(), " and zero_points shape ",
zero_points_shape.DebugString());
}
if (quantization_axis < -1 || quantization_axis >= data_shape.dims()) {
return InvalidArgument(
"quantization_axis must be -1 or in range [0, input.rank), but given ",
quantization_axis);
}
if (quantization_axis == -1) {
if (scales_shape.dims() != 0) {
return InvalidArgument(
"If quantization_axis is -1, scales and zero_points must be scalar "
"tensors, but given scales shape ",
scales_shape.DebugString(), " and zero_points shape ",
zero_points_shape.DebugString());
}
} else {
if (!(scales_shape.dims() == 1 &&
scales_shape.dim_size(0) == data_shape.dim_size(quantization_axis))) {
return InvalidArgument(
"If quantization_axis is not -1, scales and zero_points must be a "
"tensor of rank 1 and the size must be equal to the "
"input.dim_size(quantization_axis), but given quantization_axis ",
quantization_axis, ", scales shape ", scales_shape.DebugString(),
" and zero_points shape ", zero_points_shape.DebugString());
}
}
return absl::OkStatus();
}
TensorShape TransposedShape(const TensorShape& in_shape,
const absl::Span<const int32_t> perm) {
TensorShape out_shape = in_shape;
for (int i = 0; i < out_shape.dims(); ++i) {
out_shape.set_dim(i, in_shape.dim_size(perm[i]));
}
return out_shape;
}
} | #include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
TEST(TensorUtilsTest, AllElementsPositive) {
EXPECT_TRUE(AllElementsPositive<int32_t>(
test::AsTensor<int32_t>({1, 2, 3, 4, 5}, {5})));
EXPECT_FALSE(AllElementsPositive<int32_t>(
test::AsTensor<int32_t>({1, 2, 0, 4, 5}, {5})));
EXPECT_FALSE(AllElementsPositive<int32_t>(
test::AsTensor<int32_t>({1, 2, -2, 4, 5}, {5})));
}
TEST(TensorUtilsTest, QuantizationAxisAndShapeValid) {
TF_EXPECT_OK(QuantizationAxisAndShapeValid({2, 3, 4},
{3},
{3},
1));
TF_EXPECT_OK(QuantizationAxisAndShapeValid({2, 3, 4},
{},
{},
-1));
EXPECT_TRUE(absl::IsInvalidArgument(
QuantizationAxisAndShapeValid({2, 3, 4},
{3},
{2},
1)));
EXPECT_TRUE(absl::IsInvalidArgument(
QuantizationAxisAndShapeValid({2, 3, 4},
{3},
{3},
3)));
EXPECT_TRUE(absl::IsInvalidArgument(
QuantizationAxisAndShapeValid({2, 3, 4},
{3},
{3},
-1)));
EXPECT_TRUE(absl::IsInvalidArgument(
QuantizationAxisAndShapeValid({2, 3, 4},
{5},
{5},
1)));
}
TEST(TensorUtilsTest, TransposedShape) {
EXPECT_EQ(TransposedShape({2, 3, 4, 5}, {1, 2, 3, 0}),
TensorShape({3, 4, 5, 2}));
}
TEST(TensorUtilsTest, Transpose) {
const std::vector<int32_t> perm = {1, 2, 0};
const TensorShape shape({2, 3, 4});
const TensorShape transposed_shape = TransposedShape(shape, perm);
Tensor transposed_tensor = test::AsTensor<int32_t>(
std::vector<int32_t>(2 * 3 * 4, 0), transposed_shape);
Transpose<int32_t>(
test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
shape),
perm, transposed_tensor);
test::ExpectTensorEqual<int32_t>(
transposed_tensor,
test::AsTensor<int32_t>({0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17,
6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23},
transposed_shape));
}
} |
936 | cpp | tensorflow/tensorflow | runtime_shape | tensorflow/lite/kernels/internal/runtime_shape.cc | tensorflow/lite/kernels/internal/runtime_shape_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
#include <cstdint>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <memory>
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
template <int N>
struct Dims {
int sizes[N];
int strides[N];
};
class RuntimeShape {
public:
static constexpr int kMaxSmallSize = 6;
RuntimeShape& operator=(RuntimeShape const&) = delete;
RuntimeShape() : size_(0) {}
explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) {
if (dimensions_count > kMaxSmallSize) {
dims_pointer_ = new int32_t[dimensions_count];
}
}
RuntimeShape(int shape_size, int32_t value) : size_(0) {
Resize(shape_size);
for (int i = 0; i < shape_size; ++i) {
SetDim(i, value);
}
}
RuntimeShape(int dimensions_count, const int32_t* dims_data) : size_(0) {
ReplaceWith(dimensions_count, dims_data);
}
RuntimeShape(const std::initializer_list<int> init_list) : size_(0) {
BuildFrom(init_list);
}
RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) {
if (size_ > kMaxSmallSize) {
dims_pointer_ = new int32_t[size_];
}
std::memcpy(DimsData(), other.DimsData(), sizeof(int32_t) * size_);
}
bool operator==(const RuntimeShape& comp) const {
return this->size_ == comp.size_ &&
std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) ==
0;
}
~RuntimeShape();
inline int32_t DimensionsCount() const { return size_; }
int32_t Dims(int i) const;
inline void SetDim(int i, int32_t val) {
TFLITE_DCHECK_GE(i, 0);
TFLITE_DCHECK_LT(i, size_);
if (size_ > kMaxSmallSize) {
dims_pointer_[i] = val;
} else {
dims_[i] = val;
}
}
inline int32_t* DimsData() {
return size_ > kMaxSmallSize ? dims_pointer_ : dims_;
}
inline const int32_t* DimsData() const {
return size_ > kMaxSmallSize ? dims_pointer_ : dims_;
}
inline const int32_t* DimsDataUpTo5D() const { return dims_; }
inline void Resize(int dimensions_count) {
const int32_t old_size = size_;
size_ = dimensions_count;
if (old_size <= kMaxSmallSize) {
if (dimensions_count <= kMaxSmallSize) {
return;
} else {
int32_t* new_big_data = new int32_t[dimensions_count];
memcpy(new_big_data, dims_, sizeof(int32_t) * old_size);
dims_pointer_ = new_big_data;
}
} else {
if (dimensions_count > kMaxSmallSize && dimensions_count <= old_size) {
return;
}
std::unique_ptr<int32_t[]> old_data(dims_pointer_);
if (dimensions_count <= old_size) {
memcpy(dims_, old_data.get(), sizeof(int32_t) * dimensions_count);
} else {
dims_pointer_ = new int32_t[dimensions_count];
memcpy(dims_pointer_, old_data.get(), sizeof(int32_t) * old_size);
}
}
}
void ReplaceWith(int dimensions_count, const int32_t* dims_data);
template <typename T>
inline void BuildFrom(const T& src_iterable) {
const int dimensions_count =
std::distance(src_iterable.begin(), src_iterable.end());
Resize(dimensions_count);
int32_t* data = DimsData();
for (auto it : src_iterable) {
*data = it;
++data;
}
}
inline static RuntimeShape ExtendedShape(int new_shape_size,
const RuntimeShape& shape) {
return RuntimeShape(new_shape_size, shape, 1);
}
inline void BuildFrom(const std::initializer_list<int> init_list) {
BuildFrom<const std::initializer_list<int>>(init_list);
}
int FlatSize() const;
bool operator!=(const RuntimeShape& comp) const { return !((*this) == comp); }
private:
RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value)
: size_(0) {
TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount());
Resize(new_shape_size);
const int size_increase = new_shape_size - shape.DimensionsCount();
for (int i = 0; i < size_increase; ++i) {
SetDim(i, pad_value);
}
std::memcpy(DimsData() + size_increase, shape.DimsData(),
sizeof(int32_t) * shape.DimensionsCount());
}
int32_t size_;
union {
int32_t dims_[kMaxSmallSize];
int32_t* dims_pointer_;
};
};
inline tflite::Dims<4> ToRuntimeDims(const tflite::RuntimeShape& array_shape) {
tflite::Dims<4> result;
const int dimensions_count = array_shape.DimensionsCount();
TFLITE_CHECK_LE(dimensions_count, 4);
int cum_prod = 1;
for (int i = 0; i < 4; i++) {
const int new_dim =
(i < dimensions_count) ? array_shape.Dims(dimensions_count - 1 - i) : 1;
result.sizes[i] = new_dim;
result.strides[i] = cum_prod;
cum_prod *= new_dim;
}
return result;
}
inline RuntimeShape DimsToShape(const tflite::Dims<4>& dims) {
return RuntimeShape(
{dims.sizes[3], dims.sizes[2], dims.sizes[1], dims.sizes[0]});
}
inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) {
TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4);
const int* dims_data = reinterpret_cast<const int*>(shape.DimsDataUpTo5D());
TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
(i0 >= 0 && i0 < dims_data[0]));
TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
(i1 >= 0 && i1 < dims_data[1]));
TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
(i2 >= 0 && i2 < dims_data[2]));
TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
(i3 >= 0 && i3 < dims_data[3]));
return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3;
}
inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3,
int i4) {
TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5);
const int* dims_data = reinterpret_cast<const int*>(shape.DimsDataUpTo5D());
TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
(i0 >= 0 && i0 < dims_data[0]));
TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
(i1 >= 0 && i1 < dims_data[1]));
TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
(i2 >= 0 && i2 < dims_data[2]));
TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
(i3 >= 0 && i3 < dims_data[3]));
TFLITE_DCHECK((dims_data[4] == 0 && i4 == 0) ||
(i4 >= 0 && i4 < dims_data[4]));
return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) *
dims_data[4] +
i4;
}
inline int Offset(const RuntimeShape& shape, int* index) {
return Offset(shape, index[0], index[1], index[2], index[3]);
}
}
#endif
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include <cstring>
namespace tflite {
RuntimeShape::~RuntimeShape() {
if (size_ > kMaxSmallSize) {
delete[] dims_pointer_;
}
}
int32_t RuntimeShape::Dims(int i) const {
TFLITE_DCHECK_GE(i, 0);
TFLITE_DCHECK_LT(i, size_);
return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i];
}
void RuntimeShape::ReplaceWith(int dimensions_count, const int32_t* dims_data) {
Resize(dimensions_count);
int32_t* dst_dims = DimsData();
std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t));
}
int RuntimeShape::FlatSize() const {
int buffer_size = 1;
const int* dims_data = reinterpret_cast<const int*>(DimsData());
for (int i = 0; i < size_; i++) {
buffer_size *= dims_data[i];
}
return buffer_size;
}
} | #include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
using testing::Each;
using testing::ElementsAreArray;
namespace tflite {
namespace {
constexpr int kSmallSize = RuntimeShape::kMaxSmallSize;
constexpr int kBigSize = RuntimeShape::kMaxSmallSize + 1;
std::vector<int32_t> IotaVector(int size, int start = 0) {
std::vector<int32_t> vec(size);
absl::c_iota(vec, start);
return vec;
}
absl::Span<const int32_t> AsSpan(const RuntimeShape& shape) {
return absl::Span<const int32_t>(shape.DimsData(), shape.DimensionsCount());
}
class RuntimeShapeTest : public testing::TestWithParam<int> {};
TEST(RuntimeShapeTest, TestDefaultConstructor) {
const RuntimeShape shape;
EXPECT_EQ(shape.DimensionsCount(), 0);
}
TEST_P(RuntimeShapeTest, TestConstructorWithSize) {
const int size = GetParam();
const RuntimeShape shape(size);
EXPECT_EQ(shape.DimensionsCount(), size);
}
TEST_P(RuntimeShapeTest, TestConstructorWithSizeAndDefaultValue) {
const int size = GetParam();
const RuntimeShape shape(size, 34);
EXPECT_EQ(shape.DimensionsCount(), size);
EXPECT_THAT(AsSpan(shape), Each(34));
}
TEST_P(RuntimeShapeTest, TestConstructorFromCArray) {
const int size = GetParam();
const std::vector<int32_t> src = IotaVector(size);
const RuntimeShape shape(size, src.data());
EXPECT_EQ(shape.DimensionsCount(), size);
EXPECT_THAT(AsSpan(shape), ElementsAreArray(src));
}
TEST(RuntimeShapeTest, TestConstructorFromSmallInitList) {
std::initializer_list<int> init{1, 2, 3};
ASSERT_LE(init.size(), RuntimeShape::kMaxSmallSize);
const RuntimeShape shape(init);
EXPECT_EQ(shape.DimensionsCount(), init.size());
EXPECT_THAT(AsSpan(shape), ElementsAreArray(init));
}
TEST(RuntimeShapeTest, TestConstructorFromBigInitList) {
std::initializer_list<int> init{1, 2, 3, 4, 5, 6, 7, 8, 9};
ASSERT_GT(init.size(), RuntimeShape::kMaxSmallSize);
const RuntimeShape shape(init);
EXPECT_EQ(shape.DimensionsCount(), init.size());
EXPECT_THAT(AsSpan(shape), ElementsAreArray(init));
}
TEST_P(RuntimeShapeTest, TestCopyConstructorFromShape) {
const int size = GetParam();
const RuntimeShape src(size, 34);
const RuntimeShape dst(src);
EXPECT_EQ(dst.DimensionsCount(), src.DimensionsCount());
EXPECT_THAT(AsSpan(dst), ElementsAreArray(AsSpan(src)));
}
TEST_P(RuntimeShapeTest, TestEqualityOperator) {
const int size = GetParam();
const RuntimeShape shape1(size, 34);
const RuntimeShape shape2(size, 34);
EXPECT_TRUE(shape1 == shape2);
EXPECT_FALSE(shape1 != shape2);
}
TEST_P(RuntimeShapeTest, TestEqualityOperatorDifferentSizes) {
const int size = GetParam();
const RuntimeShape shape1(size, 34);
const RuntimeShape shape2(size + 1, 34);
EXPECT_FALSE(shape1 == shape2);
EXPECT_TRUE(shape1 != shape2);
}
TEST_P(RuntimeShapeTest, TestEqualityOperatorDifferentValues) {
const int size = GetParam();
const RuntimeShape shape1(size, 34);
const RuntimeShape shape2(size, 43);
EXPECT_FALSE(shape1 == shape2);
EXPECT_TRUE(shape1 != shape2);
}
TEST_P(RuntimeShapeTest, TestSetterGetter) {
const int size = GetParam();
RuntimeShape shape(size);
for (int i = 0; i < size; ++i) {
shape.SetDim(i, i);
EXPECT_EQ(shape.Dims(i), i);
}
EXPECT_THAT(AsSpan(shape), ElementsAreArray(IotaVector(size)));
}
TEST(RuntimeShapeTest, TestResizeSmallSmall) {
ASSERT_GE(kSmallSize, 1);
RuntimeShape shape(kSmallSize - 1, 23);
shape.Resize(kSmallSize);
EXPECT_EQ(shape.DimensionsCount(), kSmallSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kSmallSize - 1),
Each(23));
}
TEST(RuntimeShapeTest, TestResizeSmallBig) {
RuntimeShape shape(kSmallSize, 23);
shape.Resize(kBigSize);
EXPECT_EQ(shape.DimensionsCount(), kBigSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kSmallSize),
Each(23));
}
TEST(RuntimeShapeTest, TestResizeBigSmall) {
RuntimeShape shape(kBigSize, 23);
shape.Resize(kSmallSize);
EXPECT_EQ(shape.DimensionsCount(), kSmallSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kSmallSize),
Each(23));
}
TEST(RuntimeShapeTest, TestResizeDownBigBig) {
RuntimeShape shape(kBigSize + 3, 23);
shape.Resize(kBigSize);
EXPECT_EQ(shape.DimensionsCount(), kBigSize);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kBigSize), Each(23));
}
TEST(RuntimeShapeTest, TestResizeUpBigBig) {
RuntimeShape shape(kBigSize, 23);
shape.Resize(kBigSize + 1);
EXPECT_EQ(shape.DimensionsCount(), kBigSize + 1);
EXPECT_THAT(absl::Span<const int32_t>(shape.DimsData(), kBigSize), Each(23));
}
TEST_P(RuntimeShapeTest, TestReplaceWith) {
static_assert(
RuntimeShape::kMaxSmallSize > 2,
"kMaxSmallSize should be greater than 2 for this test to work.");
const int size = GetParam();
for (const int offset : {-2, 2}) {
const std::vector<int32_t> src =
IotaVector(offset + RuntimeShape::kMaxSmallSize);
RuntimeShape shape(size);
shape.ReplaceWith(src.size(), src.data());
EXPECT_EQ(shape.DimensionsCount(), src.size());
EXPECT_THAT(AsSpan(shape), testing::ElementsAreArray(src));
}
}
TEST_P(RuntimeShapeTest, TestBuildFrom) {
const int size = GetParam();
const std::vector<int32_t> src = IotaVector(size);
RuntimeShape shape;
shape.BuildFrom(src);
EXPECT_EQ(shape.DimensionsCount(), src.size());
EXPECT_THAT(AsSpan(shape), testing::ElementsAreArray(src));
}
TEST(RuntimeShapeTest, TestExtendedShapeSmall) {
ASSERT_GE(kSmallSize, 2);
const std::vector<int32_t> dims = IotaVector(kSmallSize - 2);
const RuntimeShape src(dims.size(), dims.data());
const RuntimeShape extended = RuntimeShape::ExtendedShape(kSmallSize, src);
EXPECT_EQ(extended.DimensionsCount(), kSmallSize);
EXPECT_EQ(extended.Dims(0), 1);
EXPECT_EQ(extended.Dims(1), 1);
EXPECT_THAT(absl::Span<const int32_t>(extended.DimsData() + 2, dims.size()),
ElementsAreArray(dims));
}
TEST(RuntimeShapeTest, TestExtendedShapeBig) {
ASSERT_GE(kSmallSize, 2);
const std::vector<int32_t> dims = IotaVector(kBigSize);
const RuntimeShape src(dims.size(), dims.data());
const RuntimeShape extended = RuntimeShape::ExtendedShape(kBigSize + 2, src);
EXPECT_EQ(extended.DimensionsCount(), kBigSize + 2);
EXPECT_EQ(extended.Dims(0), 1);
EXPECT_EQ(extended.Dims(1), 1);
EXPECT_THAT(absl::Span<const int32_t>(extended.DimsData() + 2, dims.size()),
ElementsAreArray(dims));
}
TEST(RuntimeShapeTest, TestExtendedShapeSmallToBig) {
const std::vector<int32_t> dims = IotaVector(kSmallSize);
const RuntimeShape src(dims.size(), dims.data());
const RuntimeShape extended = RuntimeShape::ExtendedShape(kBigSize, src);
EXPECT_EQ(extended.DimensionsCount(), kBigSize);
EXPECT_THAT(
absl::Span<const int32_t>(extended.DimsData(), kBigSize - kSmallSize),
Each(1));
EXPECT_THAT(absl::Span<const int32_t>(
extended.DimsData() + kBigSize - kSmallSize, dims.size()),
ElementsAreArray(dims));
}
TEST_P(RuntimeShapeTest, TestFlatSize) {
const std::vector<int32_t> src = IotaVector(kSmallSize);
const RuntimeShape shape(src.size(), src.data());
EXPECT_EQ(shape.FlatSize(),
std::reduce(src.begin(), src.end(), 1, std::multiplies<int>{}));
}
INSTANTIATE_TEST_SUITE_P(BigSmall, RuntimeShapeTest,
testing::Values(kSmallSize, kBigSize),
[](const testing::TestParamInfo<int>& info) {
return info.param == kSmallSize ? "Small" : "Big";
});
}
} |
937 | cpp | tensorflow/tensorflow | mfcc_dct | tensorflow/lite/kernels/internal/mfcc_dct.cc | tensorflow/core/kernels/mfcc_dct_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MFCC_DCT_H_
#define TENSORFLOW_CORE_KERNELS_MFCC_DCT_H_
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
class MfccDct {
public:
MfccDct();
bool Initialize(int input_length, int coefficient_count);
void Compute(const std::vector<double>& input,
std::vector<double>* output) const;
private:
bool initialized_;
int coefficient_count_;
int input_length_;
std::vector<std::vector<double> > cosines_;
MfccDct(const MfccDct&) = delete;
void operator=(const MfccDct&) = delete;
};
}
#endif
#include "tensorflow/core/kernels/mfcc_dct.h"
#include <math.h>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
MfccDct::MfccDct() : initialized_(false) {}
bool MfccDct::Initialize(int input_length, int coefficient_count) {
coefficient_count_ = coefficient_count;
input_length_ = input_length;
if (coefficient_count_ < 1) {
LOG(ERROR) << "Coefficient count must be positive.";
return false;
}
if (input_length < 1) {
LOG(ERROR) << "Input length must be positive.";
return false;
}
if (coefficient_count_ > input_length_) {
LOG(ERROR) << "Coefficient count must be less than or equal to "
<< "input length.";
return false;
}
cosines_.resize(coefficient_count_);
double fnorm = sqrt(2.0 / input_length_);
const double pi = std::atan(1) * 4;
double arg = pi / input_length_;
for (int i = 0; i < coefficient_count_; ++i) {
cosines_[i].resize(input_length_);
for (int j = 0; j < input_length_; ++j) {
cosines_[i][j] = fnorm * cos(i * arg * (j + 0.5));
}
}
initialized_ = true;
return true;
}
void MfccDct::Compute(const std::vector<double> &input,
std::vector<double> *output) const {
if (!initialized_) {
LOG(ERROR) << "DCT not initialized.";
return;
}
output->resize(coefficient_count_);
int length = input.size();
if (length > input_length_) {
length = input_length_;
}
for (int i = 0; i < coefficient_count_; ++i) {
double sum = 0.0;
for (int j = 0; j < length; ++j) {
sum += cosines_[i][j] * input[j];
}
(*output)[i] = sum;
}
}
} | #include "tensorflow/core/kernels/mfcc_dct.h"
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(MfccDctTest, AgreesWithMatlab) {
MfccDct dct;
std::vector<double> input = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
const int kCoefficientCount = 6;
ASSERT_TRUE(dct.Initialize(input.size(), kCoefficientCount));
std::vector<double> output;
dct.Compute(input, &output);
std::vector<double> expected = {12.1243556530, -4.1625617959, 0.0,
-0.4082482905, 0.0, -0.0800788912};
ASSERT_EQ(output.size(), kCoefficientCount);
for (int i = 0; i < kCoefficientCount; ++i) {
EXPECT_NEAR(output[i], expected[i], 1e-10);
}
}
TEST(MfccDctTest, InitializeFailsOnInvalidInput) {
MfccDct dct1;
EXPECT_FALSE(dct1.Initialize(-50, 1));
EXPECT_FALSE(dct1.Initialize(10, -4));
EXPECT_FALSE(dct1.Initialize(-1, -1));
EXPECT_FALSE(dct1.Initialize(20, 21));
}
} |
938 | cpp | tensorflow/tensorflow | mfcc_mel_filterbank | tensorflow/lite/kernels/internal/mfcc_mel_filterbank.cc | tensorflow/core/kernels/mfcc_mel_filterbank_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MFCC_MEL_FILTERBANK_H_
#define TENSORFLOW_CORE_KERNELS_MFCC_MEL_FILTERBANK_H_
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
class MfccMelFilterbank {
public:
MfccMelFilterbank();
bool Initialize(int input_length,
double input_sample_rate, int output_channel_count,
double lower_frequency_limit, double upper_frequency_limit);
void Compute(const std::vector<double>& input,
std::vector<double>* output) const;
private:
double FreqToMel(double freq) const;
bool initialized_;
int num_channels_;
double sample_rate_;
int input_length_;
std::vector<double> center_frequencies_;
std::vector<double> weights_;
std::vector<int> band_mapper_;
int start_index_;
int end_index_;
MfccMelFilterbank(const MfccMelFilterbank&) = delete;
void operator=(const MfccMelFilterbank&) = delete;
};
}
#endif
#include "tensorflow/core/kernels/mfcc_mel_filterbank.h"
#include <math.h>
#include <limits>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
MfccMelFilterbank::MfccMelFilterbank() : initialized_(false) {}
bool MfccMelFilterbank::Initialize(int input_length, double input_sample_rate,
int output_channel_count,
double lower_frequency_limit,
double upper_frequency_limit) {
num_channels_ = output_channel_count;
sample_rate_ = input_sample_rate;
input_length_ = input_length;
if (num_channels_ < 1) {
LOG(ERROR) << "Number of filterbank channels must be positive.";
return false;
}
if (sample_rate_ <= 0) {
LOG(ERROR) << "Sample rate must be positive.";
return false;
}
if (input_length < 2) {
LOG(ERROR) << "Input length must greater than 1.";
return false;
}
if (lower_frequency_limit < 0) {
LOG(ERROR) << "Lower frequency limit must be nonnegative.";
return false;
}
if (upper_frequency_limit <= lower_frequency_limit) {
LOG(ERROR) << "Upper frequency limit must be greater than "
<< "lower frequency limit.";
return false;
}
std::size_t center_frequencies_size = std::size_t(num_channels_) + 1;
if (center_frequencies_size >= std::numeric_limits<int>::max() ||
center_frequencies_size > center_frequencies_.max_size()) {
LOG(ERROR) << "Number of filterbank channels must be less than "
<< std::numeric_limits<int>::max()
<< " and less than or equal to "
<< center_frequencies_.max_size();
return false;
}
center_frequencies_.resize(center_frequencies_size);
const double mel_low = FreqToMel(lower_frequency_limit);
const double mel_hi = FreqToMel(upper_frequency_limit);
const double mel_span = mel_hi - mel_low;
const double mel_spacing = mel_span / static_cast<double>(num_channels_ + 1);
for (int i = 0; i < num_channels_ + 1; ++i) {
center_frequencies_[i] = mel_low + (mel_spacing * (i + 1));
}
const double hz_per_sbin =
0.5 * sample_rate_ / static_cast<double>(input_length_ - 1);
start_index_ = static_cast<int>(1.5 + (lower_frequency_limit / hz_per_sbin));
end_index_ = static_cast<int>(upper_frequency_limit / hz_per_sbin);
band_mapper_.resize(input_length_);
int channel = 0;
for (int i = 0; i < input_length_; ++i) {
double melf = FreqToMel(i * hz_per_sbin);
if ((i < start_index_) || (i > end_index_)) {
band_mapper_[i] = -2;
} else {
while ((channel < num_channels_) &&
(center_frequencies_[channel] < melf)) {
++channel;
}
band_mapper_[i] = channel - 1;
}
}
weights_.resize(input_length_);
for (int i = 0; i < input_length_; ++i) {
channel = band_mapper_[i];
if ((i < start_index_) || (i > end_index_)) {
weights_[i] = 0.0;
} else {
if (channel >= 0) {
weights_[i] =
(center_frequencies_[channel + 1] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[channel + 1] - center_frequencies_[channel]);
} else {
weights_[i] = (center_frequencies_[0] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[0] - mel_low);
}
}
}
std::vector<int> bad_channels;
for (int c = 0; c < num_channels_; ++c) {
float band_weights_sum = 0.0;
for (int i = 0; i < input_length_; ++i) {
if (band_mapper_[i] == c - 1) {
band_weights_sum += (1.0 - weights_[i]);
} else if (band_mapper_[i] == c) {
band_weights_sum += weights_[i];
}
}
if (band_weights_sum < 0.5) {
bad_channels.push_back(c);
}
}
if (!bad_channels.empty()) {
LOG(ERROR) << "Missing " << bad_channels.size() << " bands "
<< " starting at " << bad_channels[0]
<< " in mel-frequency design. "
<< "Perhaps too many channels or "
<< "not enough frequency resolution in spectrum. ("
<< "input_length: " << input_length
<< " input_sample_rate: " << input_sample_rate
<< " output_channel_count: " << output_channel_count
<< " lower_frequency_limit: " << lower_frequency_limit
<< " upper_frequency_limit: " << upper_frequency_limit;
}
initialized_ = true;
return true;
}
void MfccMelFilterbank::Compute(const std::vector<double> &input,
std::vector<double> *output) const {
if (!initialized_) {
LOG(ERROR) << "Mel Filterbank not initialized.";
return;
}
if (input.size() <= end_index_) {
LOG(ERROR) << "Input too short to compute filterbank";
return;
}
output->assign(num_channels_, 0.0);
for (int i = start_index_; i <= end_index_; i++) {
double spec_val = sqrt(input[i]);
double weighted = spec_val * weights_[i];
int channel = band_mapper_[i];
if (channel >= 0)
(*output)[channel] += weighted;
channel++;
if (channel < num_channels_)
(*output)[channel] += spec_val - weighted;
}
}
double MfccMelFilterbank::FreqToMel(double freq) const {
return 1127.0 * log1p(freq / 700.0);
}
} | #include "tensorflow/core/kernels/mfcc_mel_filterbank.h"
#include <limits>
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(MfccMelFilterbankTest, AgreesWithPythonGoldenValues) {
MfccMelFilterbank filterbank;
std::vector<double> input;
const int kSampleCount = 513;
input.reserve(kSampleCount);
for (int i = 0; i < kSampleCount; ++i) {
input.push_back(i + 1);
}
const int kChannelCount = 20;
filterbank.Initialize(
input.size(), 22050 , kChannelCount ,
20.0 , 4000.0 );
std::vector<double> output;
filterbank.Compute(input, &output);
std::vector<double> expected = {
7.38894574, 10.30330648, 13.72703292, 17.24158686, 21.35253118,
25.77781089, 31.30624108, 37.05877236, 43.9436536, 51.80306637,
60.79867148, 71.14363376, 82.90910141, 96.50069158, 112.08428368,
129.96721968, 150.4277597, 173.74997634, 200.86037462, 231.59802942};
ASSERT_EQ(output.size(), kChannelCount);
for (int i = 0; i < kChannelCount; ++i) {
EXPECT_NEAR(output[i], expected[i], 1e-04);
}
}
TEST(MfccMelFilterbankTest, IgnoresExistingContentOfOutputVector) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::vector<double> input;
std::vector<double> output;
filterbank.Initialize(kSampleCount, 22050 ,
20 , 20.0 ,
4000.0 );
input.assign(kSampleCount, 1.0);
filterbank.Compute(input, &output);
for (const double value : output) {
EXPECT_LE(0.0, value);
}
input.assign(kSampleCount, 0.0);
filterbank.Compute(input, &output);
for (const double value : output) {
EXPECT_EQ(0.0, value);
}
}
TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxIntValue) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::size_t num_channels = std::numeric_limits<int>::max();
bool initialized = filterbank.Initialize(
kSampleCount, 2 , num_channels ,
1.0 , 5.0 );
EXPECT_FALSE(initialized);
}
TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxSize) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::size_t num_channels = std::vector<double>().max_size() + 1;
bool initialized = filterbank.Initialize(
kSampleCount, 2 , num_channels ,
1.0 , 5.0 );
EXPECT_FALSE(initialized);
}
} |
939 | cpp | tensorflow/tensorflow | spectrogram | tensorflow/lite/kernels/internal/spectrogram.cc | tensorflow/core/kernels/spectrogram_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SPECTROGRAM_H_
#define TENSORFLOW_CORE_KERNELS_SPECTROGRAM_H_
#include <complex>
#include <deque>
#include <vector>
#include "third_party/fft2d/fft.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
class Spectrogram {
public:
Spectrogram() : initialized_(false) {}
~Spectrogram() {}
bool Initialize(int window_length, int step_length);
bool Initialize(const std::vector<double>& window, int step_length);
bool Reset();
template <class InputSample, class OutputSample>
bool ComputeComplexSpectrogram(
const std::vector<InputSample>& input,
std::vector<std::vector<std::complex<OutputSample>>>* output);
template <class InputSample, class OutputSample>
bool ComputeSquaredMagnitudeSpectrogram(
const std::vector<InputSample>& input,
std::vector<std::vector<OutputSample>>* output);
const std::vector<double>& GetWindow() const { return window_; }
int output_frequency_channels() const { return output_frequency_channels_; }
private:
template <class InputSample>
bool GetNextWindowOfSamples(const std::vector<InputSample>& input,
int* input_start);
void ProcessCoreFFT();
int fft_length_;
int output_frequency_channels_;
int window_length_;
int step_length_;
bool initialized_;
int samples_to_next_step_;
std::vector<double> window_;
std::vector<double> fft_input_output_;
std::deque<double> input_queue_;
std::vector<int> fft_integer_working_area_;
std::vector<double> fft_double_working_area_;
Spectrogram(const Spectrogram&) = delete;
void operator=(const Spectrogram&) = delete;
};
}
#endif
#include "tensorflow/core/kernels/spectrogram.h"
#include <math.h>
#include "third_party/fft2d/fft.h"
#include "tensorflow/core/lib/core/bits.h"
namespace tensorflow {
using std::complex;
namespace {
void GetPeriodicHann(int window_length, std::vector<double>* window) {
const double pi = std::atan(1) * 4;
window->resize(window_length);
for (int i = 0; i < window_length; ++i) {
(*window)[i] = 0.5 - 0.5 * cos((2 * pi * i) / window_length);
}
}
}
bool Spectrogram::Initialize(int window_length, int step_length) {
std::vector<double> window;
GetPeriodicHann(window_length, &window);
return Initialize(window, step_length);
}
bool Spectrogram::Initialize(const std::vector<double>& window,
int step_length) {
window_length_ = window.size();
window_ = window;
if (window_length_ < 2) {
LOG(ERROR) << "Window length too short.";
initialized_ = false;
return false;
}
step_length_ = step_length;
if (step_length_ < 1) {
LOG(ERROR) << "Step length must be positive.";
initialized_ = false;
return false;
}
fft_length_ = NextPowerOfTwo(window_length_);
CHECK(fft_length_ >= window_length_);
output_frequency_channels_ = 1 + fft_length_ / 2;
fft_input_output_.resize(fft_length_ + 2);
int half_fft_length = fft_length_ / 2;
fft_double_working_area_.resize(half_fft_length);
fft_integer_working_area_.resize(2 + static_cast<int>(sqrt(half_fft_length)));
initialized_ = true;
if (!Reset()) {
LOG(ERROR) << "Failed to Reset()";
return false;
}
return true;
}
bool Spectrogram::Reset() {
if (!initialized_) {
LOG(ERROR) << "Initialize() has to be called, before Reset().";
return false;
}
std::fill(fft_double_working_area_.begin(), fft_double_working_area_.end(),
0.0);
std::fill(fft_integer_working_area_.begin(), fft_integer_working_area_.end(),
0);
fft_integer_working_area_[0] = 0;
input_queue_.clear();
samples_to_next_step_ = window_length_;
return true;
}
template <class InputSample, class OutputSample>
bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<InputSample>& input,
std::vector<std::vector<complex<OutputSample>>>* output) {
if (!initialized_) {
LOG(ERROR) << "ComputeComplexSpectrogram() called before successful call "
<< "to Initialize().";
return false;
}
CHECK(output);
output->clear();
int input_start = 0;
while (GetNextWindowOfSamples(input, &input_start)) {
DCHECK_EQ(input_queue_.size(), window_length_);
ProcessCoreFFT();
output->resize(output->size() + 1);
auto& spectrogram_slice = output->back();
spectrogram_slice.resize(output_frequency_channels_);
for (int i = 0; i < output_frequency_channels_; ++i) {
spectrogram_slice[i] = complex<OutputSample>(
fft_input_output_[2 * i], fft_input_output_[2 * i + 1]);
}
}
return true;
}
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<float>& input, std::vector<std::vector<complex<float>>>*);
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<double>& input,
std::vector<std::vector<complex<float>>>*);
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<float>& input,
std::vector<std::vector<complex<double>>>*);
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<double>& input,
std::vector<std::vector<complex<double>>>*);
template <class InputSample, class OutputSample>
bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<InputSample>& input,
std::vector<std::vector<OutputSample>>* output) {
if (!initialized_) {
LOG(ERROR) << "ComputeSquaredMagnitudeSpectrogram() called before "
<< "successful call to Initialize().";
return false;
}
CHECK(output);
output->clear();
int input_start = 0;
while (GetNextWindowOfSamples(input, &input_start)) {
DCHECK_EQ(input_queue_.size(), window_length_);
ProcessCoreFFT();
output->resize(output->size() + 1);
auto& spectrogram_slice = output->back();
spectrogram_slice.resize(output_frequency_channels_);
for (int i = 0; i < output_frequency_channels_; ++i) {
const double re = fft_input_output_[2 * i];
const double im = fft_input_output_[2 * i + 1];
spectrogram_slice[i] = re * re + im * im;
}
}
return true;
}
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<float>& input, std::vector<std::vector<float>>*);
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<double>& input, std::vector<std::vector<float>>*);
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<float>& input, std::vector<std::vector<double>>*);
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<double>& input, std::vector<std::vector<double>>*);
template <class InputSample>
bool Spectrogram::GetNextWindowOfSamples(const std::vector<InputSample>& input,
int* input_start) {
auto input_it = input.begin() + *input_start;
int input_remaining = input.end() - input_it;
if (samples_to_next_step_ > input_remaining) {
input_queue_.insert(input_queue_.end(), input_it, input.end());
*input_start += input_remaining;
samples_to_next_step_ -= input_remaining;
return false;
} else {
input_queue_.insert(input_queue_.end(), input_it,
input_it + samples_to_next_step_);
*input_start += samples_to_next_step_;
input_queue_.erase(
input_queue_.begin(),
input_queue_.begin() + input_queue_.size() - window_length_);
DCHECK_EQ(window_length_, input_queue_.size());
samples_to_next_step_ = step_length_;
return true;
}
}
void Spectrogram::ProcessCoreFFT() {
for (int j = 0; j < window_length_; ++j) {
fft_input_output_[j] = input_queue_[j] * window_[j];
}
for (int j = window_length_; j < fft_length_; ++j) {
fft_input_output_[j] = 0.0;
}
const int kForwardFFT = 1;
rdft(fft_length_, kForwardFFT, &fft_input_output_[0],
&fft_integer_working_area_[0], &fft_double_working_area_[0]);
fft_input_output_[fft_length_] = fft_input_output_[1];
fft_input_output_[fft_length_ + 1] = 0;
fft_input_output_[1] = 0;
}
} | #include "tensorflow/core/kernels/spectrogram.h"
#include <complex>
#include <vector>
#include "tensorflow/core/kernels/spectrogram_test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using ::std::complex;
string InputFilename() {
return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data",
"short_test_segment.wav");
}
string ExpectedFilename() {
return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data",
"short_test_segment_spectrogram.csv.bin");
}
const int kDataVectorLength = 257;
const int kNumberOfFramesInTestData = 178;
string ExpectedNonPowerOfTwoFilename() {
return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data",
"short_test_segment_spectrogram_400_200.csv.bin");
}
const int kNonPowerOfTwoDataVectorLength = 257;
const int kNumberOfFramesInNonPowerOfTwoTestData = 228;
TEST(SpectrogramTest, TooLittleDataYieldsNoFrames) {
Spectrogram sgram;
sgram.Initialize(400, 200);
std::vector<double> input;
SineWave(44100, 1000.0, 0.001, &input);
EXPECT_EQ(44, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(0, output.size());
}
TEST(SpectrogramTest, StepSizeSmallerThanWindow) {
Spectrogram sgram;
EXPECT_TRUE(sgram.Initialize(400, 200));
std::vector<double> input;
SineWave(44100, 1000.0, 0.015, &input);
EXPECT_EQ(661, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(2, output.size());
}
TEST(SpectrogramTest, StepSizeBiggerThanWindow) {
Spectrogram sgram;
EXPECT_TRUE(sgram.Initialize(200, 400));
std::vector<double> input;
SineWave(44100, 1000.0, 0.02, &input);
EXPECT_EQ(882, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(2, output.size());
}
TEST(SpectrogramTest, StepSizeBiggerThanWindow2) {
Spectrogram sgram;
EXPECT_TRUE(sgram.Initialize(200, 400));
std::vector<double> input;
SineWave(44100, 1000.0, 0.016, &input);
EXPECT_GT(input.size(), 600);
EXPECT_LT(input.size(), 800);
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(2, output.size());
}
TEST(SpectrogramTest,
MultipleCallsToComputeComplexSpectrogramMayYieldDifferentNumbersOfFrames) {
Spectrogram sgram;
sgram.Initialize(200, 400);
std::vector<double> input;
SineWave(44100, 1000.0, 0.02, &input);
EXPECT_EQ(882, input.size());
std::vector<std::vector<complex<double>>> output;
const std::vector<int> expected_output_sizes = {
2,
2,
3,
};
for (int expected_output_size : expected_output_sizes) {
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(expected_output_size, output.size());
}
}
TEST(SpectrogramTest, CumulatingExcessInputsForOverlappingFrames) {
Spectrogram sgram;
sgram.Initialize(400, 200);
std::vector<double> input;
SineWave(44100, 1000.0, 0.02, &input);
EXPECT_EQ(882, input.size());
std::vector<std::vector<complex<double>>> output;
const std::vector<int> expected_output_sizes = {
3,
4,
5,
};
for (int expected_output_size : expected_output_sizes) {
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(expected_output_size, output.size());
}
}
TEST(SpectrogramTest, StepSizeEqualToWindowWorks) {
Spectrogram sgram;
sgram.Initialize(200, 200);
std::vector<double> input;
SineWave(44100, 1000.0, 0.05, &input);
EXPECT_EQ(2205, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(11, output.size());
}
template <class ExpectedSample, class ActualSample>
void CompareComplexData(
const std::vector<std::vector<complex<ExpectedSample>>>& expected,
const std::vector<std::vector<complex<ActualSample>>>& actual,
double tolerance) {
ASSERT_EQ(actual.size(), expected.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i].size(), actual[i].size());
for (int j = 0; j < expected[i].size(); ++j) {
ASSERT_NEAR(real(expected[i][j]), real(actual[i][j]), tolerance)
<< ": where i=" << i << " and j=" << j << ".";
ASSERT_NEAR(imag(expected[i][j]), imag(actual[i][j]), tolerance)
<< ": where i=" << i << " and j=" << j << ".";
}
}
}
template <class Sample>
double GetMaximumAbsolute(const std::vector<std::vector<Sample>>& spectrogram) {
double max_absolute = 0.0;
for (int i = 0; i < spectrogram.size(); ++i) {
for (int j = 0; j < spectrogram[i].size(); ++j) {
double absolute_value = std::abs(spectrogram[i][j]);
if (absolute_value > max_absolute) {
max_absolute = absolute_value;
}
}
}
return max_absolute;
}
template <class ExpectedSample, class ActualSample>
void CompareMagnitudeData(
const std::vector<std::vector<complex<ExpectedSample>>>&
expected_complex_output,
const std::vector<std::vector<ActualSample>>& actual_squared_magnitude,
double tolerance) {
ASSERT_EQ(actual_squared_magnitude.size(), expected_complex_output.size());
for (int i = 0; i < expected_complex_output.size(); ++i) {
ASSERT_EQ(expected_complex_output[i].size(),
actual_squared_magnitude[i].size());
for (int j = 0; j < expected_complex_output[i].size(); ++j) {
ASSERT_NEAR(norm(expected_complex_output[i][j]),
actual_squared_magnitude[i][j], tolerance)
<< ": where i=" << i << " and j=" << j << ".";
}
}
}
TEST(SpectrogramTest, ReInitializationWorks) {
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
std::vector<std::vector<complex<double>>> first_output;
std::vector<std::vector<complex<double>>> second_output;
sgram.Initialize(512, 256);
sgram.ComputeComplexSpectrogram(input, &first_output);
sgram.Initialize(512, 256);
sgram.ComputeComplexSpectrogram(input, &second_output);
ASSERT_EQ(first_output.size(), second_output.size());
int slice_size = first_output[0].size();
for (int i = 0; i < first_output.size(); ++i) {
ASSERT_EQ(slice_size, first_output[i].size());
ASSERT_EQ(slice_size, second_output[i].size());
for (int j = 0; j < slice_size; ++j) {
ASSERT_EQ(first_output[i][j], second_output[i][j]);
}
}
}
TEST(SpectrogramTest, ComputedComplexDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
CompareComplexData(expected_output, output, 1e-5);
}
TEST(SpectrogramTest, ComputedFloatComplexDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> double_input;
CHECK(ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()),
&double_input));
std::vector<float> input;
input.assign(double_input.begin(), double_input.end());
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<complex<float>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
CompareComplexData(expected_output, output, 1e-4);
}
TEST(SpectrogramTest, ComputedSquaredMagnitudeDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<double>> output;
sgram.ComputeSquaredMagnitudeSpectrogram(input, &output);
CompareMagnitudeData(expected_output, output, 1e-3);
}
TEST(SpectrogramTest, ComputedFloatSquaredMagnitudeDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> double_input;
CHECK(ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()),
&double_input));
EXPECT_EQ(kInputDataLength, double_input.size());
std::vector<float> input;
input.assign(double_input.begin(), double_input.end());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<float>> output;
sgram.ComputeSquaredMagnitudeSpectrogram(input, &output);
double max_absolute = GetMaximumAbsolute(output);
EXPECT_GT(max_absolute, 2300.0);
CompareMagnitudeData(expected_output, output, 2e-4);
}
TEST(SpectrogramTest, ComputedNonPowerOfTwoComplexDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(400, 200);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedNonPowerOfTwoFilename()),
kNonPowerOfTwoDataVectorLength, &expected_output));
EXPECT_EQ(kNumberOfFramesInNonPowerOfTwoTestData, expected_output.size());
EXPECT_EQ(kNonPowerOfTwoDataVectorLength, expected_output[0].size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
CompareComplexData(expected_output, output, 1e-5);
}
} |
940 | cpp | tensorflow/tensorflow | common | tensorflow/lite/core/c/common.cc | tensorflow/lite/core/c/common_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_CLIENT_COMMON_H_
#define TENSORFLOW_CORE_DATA_SERVICE_CLIENT_COMMON_H_
#include <cstdint>
#include <optional>
#include <string>
#include "absl/time/time.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
struct DataServiceParams final {
std::string dataset_id;
ProcessingModeDef processing_mode;
std::string address;
std::string protocol;
std::string data_transfer_protocol;
std::string job_name;
int64_t repetition = 0;
std::optional<int64_t> num_consumers;
std::optional<int64_t> consumer_index;
int64_t max_outstanding_requests = 0;
absl::Duration task_refresh_interval;
TargetWorkers target_workers = TargetWorkers::TARGET_WORKERS_UNSPECIFIED;
DataServiceMetadata metadata;
std::optional<CrossTrainerCacheOptions> cross_trainer_cache_options;
};
}
}
#endif
#include "tensorflow/core/data/service/common.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr const char kAuto[] = "AUTO";
constexpr const char kAny[] = "ANY";
constexpr const char kLocal[] = "LOCAL";
constexpr const char kColocated[] = "COLOCATED";
constexpr const char kRemote[] = "REMOTE";
constexpr const char kHybrid[] = "HYBRID";
}
bool IsNoShard(const ProcessingModeDef& processing_mode) {
return processing_mode.sharding_policy() == ProcessingModeDef::OFF;
}
bool IsDynamicShard(const ProcessingModeDef& processing_mode) {
return processing_mode.sharding_policy() == ProcessingModeDef::DYNAMIC;
}
bool IsStaticShard(const ProcessingModeDef& processing_mode) {
return processing_mode.sharding_policy() == ProcessingModeDef::FILE ||
processing_mode.sharding_policy() == ProcessingModeDef::DATA ||
processing_mode.sharding_policy() == ProcessingModeDef::FILE_OR_DATA ||
processing_mode.sharding_policy() == ProcessingModeDef::HINT;
}
Status ValidateProcessingMode(const ProcessingModeDef& processing_mode) {
if (!IsNoShard(processing_mode) && !IsDynamicShard(processing_mode) &&
!IsStaticShard(processing_mode)) {
return errors::Internal(
"ProcessingMode ", processing_mode.ShortDebugString(),
" does not "
"specify a valid sharding policy. Please add the policy to either "
"`IsDynamicShard` or `IsStaticShard` (i.e., auto-shard).");
}
return absl::OkStatus();
}
absl::StatusOr<AutoShardPolicy> ToAutoShardPolicy(
const ProcessingModeDef::ShardingPolicy sharding_policy) {
switch (sharding_policy) {
case ProcessingModeDef::FILE:
return AutoShardPolicy::FILE;
case ProcessingModeDef::DATA:
return AutoShardPolicy::DATA;
case ProcessingModeDef::FILE_OR_DATA:
return AutoShardPolicy::AUTO;
case ProcessingModeDef::HINT:
return AutoShardPolicy::HINT;
case ProcessingModeDef::DYNAMIC:
case ProcessingModeDef::OFF:
return AutoShardPolicy::OFF;
default:
return errors::Internal(
"tf.data service sharding policy ",
ProcessingModeDef::ShardingPolicy_Name(sharding_policy),
" is not convertible to a valid auto-shard policy. If you're "
"defining a new sharding policy, please update the policy mapping.");
}
}
absl::StatusOr<TargetWorkers> ParseTargetWorkers(absl::string_view s) {
std::string str_upper = absl::AsciiStrToUpper(s);
if (str_upper.empty() || str_upper == kAuto) {
return TARGET_WORKERS_AUTO;
}
if (str_upper == kAny) {
return TARGET_WORKERS_ANY;
}
if (str_upper == kLocal) {
return TARGET_WORKERS_LOCAL;
}
return errors::InvalidArgument("Unrecognized target workers: ", s);
}
std::string TargetWorkersToString(TargetWorkers target_workers) {
switch (target_workers) {
case TARGET_WORKERS_AUTO:
return kAuto;
case TARGET_WORKERS_ANY:
return kAny;
case TARGET_WORKERS_LOCAL:
return kLocal;
default:
DCHECK(false);
return "UNKNOWN";
}
}
absl::StatusOr<DeploymentMode> ParseDeploymentMode(absl::string_view s) {
std::string str_upper = absl::AsciiStrToUpper(s);
if (str_upper == kColocated) {
return DEPLOYMENT_MODE_COLOCATED;
}
if (str_upper == kRemote) {
return DEPLOYMENT_MODE_REMOTE;
}
if (str_upper == kHybrid) {
return DEPLOYMENT_MODE_HYBRID;
}
return errors::InvalidArgument("Invalid tf.data service deployment mode: ", s,
". Supported modes are "
"COLOCATED, REMOTE, and HYBRID.");
}
bool IsPreemptedError(const Status& status) {
return errors::IsAborted(status) || errors::IsCancelled(status) ||
errors::IsUnavailable(status);
}
}
} | #include "tensorflow/core/data/service/common.h"
#include <vector>
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
std::vector<ProcessingModeDef::ShardingPolicy> EnumerateShardingPolicies() {
std::vector<ProcessingModeDef::ShardingPolicy> result;
const ::tensorflow::protobuf::EnumDescriptor* enum_descriptor =
::tensorflow::protobuf::GetEnumDescriptor<
ProcessingModeDef::ShardingPolicy>();
for (int i = 0; i < enum_descriptor->value_count(); ++i) {
result.push_back(static_cast<ProcessingModeDef::ShardingPolicy>(
enum_descriptor->value(i)->number()));
}
return result;
}
TEST(CommonTest, NoShard) {
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
EXPECT_TRUE(IsNoShard(processing_mode));
EXPECT_FALSE(IsDynamicShard(processing_mode));
EXPECT_FALSE(IsStaticShard(processing_mode));
}
TEST(CommonTest, DynamicShard) {
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::DYNAMIC);
EXPECT_FALSE(IsNoShard(processing_mode));
EXPECT_TRUE(IsDynamicShard(processing_mode));
EXPECT_FALSE(IsStaticShard(processing_mode));
}
TEST(CommonTest, StaticShard) {
ProcessingModeDef processing_mode;
std::vector<ProcessingModeDef::ShardingPolicy> policies = {
ProcessingModeDef::FILE, ProcessingModeDef::DATA,
ProcessingModeDef::FILE_OR_DATA, ProcessingModeDef::HINT};
for (const ProcessingModeDef::ShardingPolicy policy : policies) {
processing_mode.set_sharding_policy(policy);
EXPECT_FALSE(IsNoShard(processing_mode));
EXPECT_FALSE(IsDynamicShard(processing_mode));
EXPECT_TRUE(IsStaticShard(processing_mode));
}
}
TEST(CommonTest, DefaultShardingPolicyIsNoShard) {
ProcessingModeDef processing_mode;
EXPECT_TRUE(IsNoShard(processing_mode));
EXPECT_FALSE(IsDynamicShard(processing_mode));
EXPECT_FALSE(IsStaticShard(processing_mode));
}
TEST(CommonTest, ToAutoShardPolicy) {
EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::FILE_OR_DATA),
IsOkAndHolds(AutoShardPolicy::AUTO));
EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::HINT),
IsOkAndHolds(AutoShardPolicy::HINT));
EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::OFF),
IsOkAndHolds(AutoShardPolicy::OFF));
EXPECT_THAT(ToAutoShardPolicy(ProcessingModeDef::DYNAMIC),
IsOkAndHolds(AutoShardPolicy::OFF));
}
TEST(CommonTest, ConvertValidShardingPolicyToAutoShardPolicy) {
for (const ProcessingModeDef::ShardingPolicy sharding_policy :
EnumerateShardingPolicies()) {
TF_EXPECT_OK(ToAutoShardPolicy(sharding_policy).status());
}
}
TEST(CommonTest, ConvertInvalidShardingPolicyToAutoShardPolicy) {
const ProcessingModeDef::ShardingPolicy sharding_policy =
static_cast<ProcessingModeDef::ShardingPolicy>(-100);
EXPECT_THAT(ToAutoShardPolicy(sharding_policy),
StatusIs(error::INTERNAL,
HasSubstr("please update the policy mapping.")));
}
TEST(CommonTest, ValidateProcessingMode) {
for (const ProcessingModeDef::ShardingPolicy policy :
EnumerateShardingPolicies()) {
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(policy);
TF_EXPECT_OK(ValidateProcessingMode(processing_mode));
}
}
TEST(CommonTest, InvalidProcessingMode) {
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(
static_cast<ProcessingModeDef::ShardingPolicy>(100));
EXPECT_THAT(ValidateProcessingMode(processing_mode),
StatusIs(error::INTERNAL,
HasSubstr("does not specify a valid sharding policy.")));
}
TEST(CommonTest, ParseTargetWorkers) {
EXPECT_THAT(ParseTargetWorkers("AUTO"), IsOkAndHolds(TARGET_WORKERS_AUTO));
EXPECT_THAT(ParseTargetWorkers("Auto"), IsOkAndHolds(TARGET_WORKERS_AUTO));
EXPECT_THAT(ParseTargetWorkers("ANY"), IsOkAndHolds(TARGET_WORKERS_ANY));
EXPECT_THAT(ParseTargetWorkers("any"), IsOkAndHolds(TARGET_WORKERS_ANY));
EXPECT_THAT(ParseTargetWorkers("LOCAL"), IsOkAndHolds(TARGET_WORKERS_LOCAL));
EXPECT_THAT(ParseTargetWorkers("local"), IsOkAndHolds(TARGET_WORKERS_LOCAL));
EXPECT_THAT(ParseTargetWorkers(""), IsOkAndHolds(TARGET_WORKERS_AUTO));
}
TEST(CommonTest, ParseInvalidTargetWorkers) {
EXPECT_THAT(ParseTargetWorkers("TARGET_WORKERS_UNSPECIFIED"),
testing::StatusIs(error::INVALID_ARGUMENT));
EXPECT_THAT(ParseTargetWorkers("UNSET"),
testing::StatusIs(error::INVALID_ARGUMENT));
}
TEST(CommonTest, TargetWorkersToString) {
EXPECT_EQ(TargetWorkersToString(TARGET_WORKERS_AUTO), "AUTO");
EXPECT_EQ(TargetWorkersToString(TARGET_WORKERS_ANY), "ANY");
EXPECT_EQ(TargetWorkersToString(TARGET_WORKERS_LOCAL), "LOCAL");
}
TEST(CommonTest, ParseDeploymentMode) {
EXPECT_THAT(ParseDeploymentMode("COLOCATED"),
IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_COLOCATED));
EXPECT_THAT(ParseDeploymentMode("Colocated"),
IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_COLOCATED));
EXPECT_THAT(ParseDeploymentMode("REMOTE"),
IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_REMOTE));
EXPECT_THAT(ParseDeploymentMode("remote"),
IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_REMOTE));
EXPECT_THAT(ParseDeploymentMode("HYBRID"),
IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_HYBRID));
EXPECT_THAT(ParseDeploymentMode("hybrid"),
IsOkAndHolds(DeploymentMode::DEPLOYMENT_MODE_HYBRID));
}
TEST(CommonTest, ParseInvalidDeploymentMode) {
EXPECT_THAT(ParseDeploymentMode("DEPLOYMENT_MODE_UNSPECIFIED"),
testing::StatusIs(error::INVALID_ARGUMENT));
}
TEST(CommonTest, IsPreemptedError) {
EXPECT_TRUE(IsPreemptedError(errors::Aborted("Aborted")));
EXPECT_TRUE(IsPreemptedError(errors::Cancelled("Cancelled")));
EXPECT_TRUE(IsPreemptedError(errors::Unavailable("Unavailable")));
EXPECT_FALSE(IsPreemptedError(absl::OkStatus()));
}
TEST(CommonTest, IsPermanentError) {
EXPECT_FALSE(
IsPreemptedError(errors::FailedPrecondition("Failed precondition")));
EXPECT_FALSE(IsPreemptedError(errors::Internal("Internal")));
EXPECT_FALSE(IsPreemptedError(errors::InvalidArgument("Invalid argument")));
EXPECT_FALSE(IsPreemptedError(errors::NotFound("Not found")));
EXPECT_FALSE(IsPreemptedError(errors::OutOfRange("Out of range")));
EXPECT_FALSE(IsPreemptedError(errors::Unknown("Unknown")));
}
}
}
} |
941 | cpp | tensorflow/tensorflow | transpose_utils | tensorflow/lite/kernels/internal/transpose_utils.cc | tensorflow/lite/kernels/internal/transpose_utils_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TRANSPOSE_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_TRANSPOSE_UTILS_H_
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace transpose_utils {
bool IsTranspose2DApplicable(const TransposeParams& params,
const RuntimeShape& input_shape, int* dim0,
int* dim1);
void RemoveOneSizeDimensions(RuntimeShape* input_shape,
RuntimeShape* output_shape,
TransposeParams* params);
size_t Flatten(const RuntimeShape& input_shape,
const RuntimeShape& output_shape, const TransposeParams& params,
RuntimeShape* non_flatten_input_shape,
RuntimeShape* non_flatten_output_shape,
TransposeParams* non_flatten_params);
}
}
#endif
#include "tensorflow/lite/kernels/internal/transpose_utils.h"
namespace tflite {
namespace transpose_utils {
bool IsTranspose2DApplicable(const TransposeParams& params,
const RuntimeShape& input_shape, int* dim0,
int* dim1) {
const int dims_cnt = input_shape.DimensionsCount();
if (dims_cnt == 2) {
*dim0 = input_shape.Dims(0);
*dim1 = input_shape.Dims(1);
return true;
}
const int first_perm = params.perm[0];
for (int i = 1; i < dims_cnt; ++i) {
int rebased = params.perm[i] - first_perm;
if (rebased < 0) {
rebased += dims_cnt;
}
if (rebased != i) {
return false;
}
}
*dim0 = 1;
*dim1 = 1;
for (int i = 0; i < dims_cnt; ++i) {
if (i < first_perm) {
*dim0 *= input_shape.Dims(i);
} else {
*dim1 *= input_shape.Dims(i);
}
}
return true;
}
void RemoveOneSizeDimensions(RuntimeShape* input_shape,
RuntimeShape* output_shape,
TransposeParams* params) {
const int dims_cnt = input_shape->DimensionsCount();
TFLITE_DCHECK_EQ(params->perm_count, dims_cnt);
bool foundOneSizeDim = false;
for (int i = 0; i < dims_cnt; ++i) {
if (input_shape->Dims(i) == 1) {
foundOneSizeDim = true;
break;
}
}
if (!foundOneSizeDim) return;
if (input_shape->FlatSize() == 1) {
input_shape->Resize(1);
input_shape->SetDim(0, 1);
output_shape->Resize(1);
output_shape->SetDim(0, 1);
params->perm_count = 1;
params->perm[0] = 0;
return;
}
int new_dims_cnt = 0;
for (int i = 0; i < dims_cnt; ++i) {
if (input_shape->Dims(i) == 1) {
continue;
}
input_shape->SetDim(new_dims_cnt, input_shape->Dims(i));
++new_dims_cnt;
}
input_shape->Resize(new_dims_cnt);
TransposeParams new_params;
new_dims_cnt = 0;
for (int i = 0; i < dims_cnt; ++i) {
if (output_shape->Dims(i) == 1) {
continue;
}
new_params.perm[new_dims_cnt] = params->perm[i];
output_shape->SetDim(new_dims_cnt, output_shape->Dims(i));
++new_dims_cnt;
}
output_shape->Resize(new_dims_cnt);
new_params.perm_count = new_dims_cnt;
for (int i = 0; i < new_dims_cnt; ++i) {
int min_val_idx = -1;
for (int j = 0; j < new_dims_cnt; ++j) {
if (new_params.perm[j] >= i &&
(min_val_idx == -1 ||
new_params.perm[min_val_idx] > new_params.perm[j])) {
min_val_idx = j;
}
}
new_params.perm[min_val_idx] = i;
}
*params = new_params;
}
size_t Flatten(const RuntimeShape& input_shape,
const RuntimeShape& output_shape, const TransposeParams& params,
RuntimeShape* non_flatten_input_shape,
RuntimeShape* non_flatten_output_shape,
TransposeParams* non_flatten_params) {
int skip_dims_cnt = 0;
size_t flat_size = input_shape.FlatSize();
for (int i = 0; i < params.perm_count; ++i) {
if (params.perm[i] == i) {
flat_size /= input_shape.Dims(i);
++skip_dims_cnt;
} else {
break;
}
}
const int new_dims_cnt = params.perm_count - skip_dims_cnt;
non_flatten_input_shape->Resize(new_dims_cnt);
non_flatten_output_shape->Resize(new_dims_cnt);
non_flatten_params->perm_count = new_dims_cnt;
for (int i = skip_dims_cnt; i < params.perm_count; ++i) {
non_flatten_input_shape->SetDim(i - skip_dims_cnt, input_shape.Dims(i));
non_flatten_output_shape->SetDim(i - skip_dims_cnt, output_shape.Dims(i));
non_flatten_params->perm[i - skip_dims_cnt] =
params.perm[i] - skip_dims_cnt;
}
return flat_size;
}
}
} | #include "tensorflow/lite/kernels/internal/transpose_utils.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_1DNoChanges) {
RuntimeShape input_shape({9});
RuntimeShape output_shape({9});
TransposeParams params;
params.perm_count = 1;
params.perm[0] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9}));
EXPECT_EQ(output_shape, RuntimeShape({9}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_2DNoChanges) {
RuntimeShape input_shape({9, 3});
RuntimeShape output_shape({3, 9});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9}));
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_2DShrinking) {
RuntimeShape input_shape({9, 1});
RuntimeShape output_shape({1, 9});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9}));
EXPECT_EQ(output_shape, RuntimeShape({9}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DNoChanges) {
RuntimeShape input_shape({4, 3, 8});
RuntimeShape output_shape({8, 4, 3});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4, 3, 8}));
EXPECT_EQ(output_shape, RuntimeShape({8, 4, 3}));
EXPECT_EQ(params.perm_count, 3);
EXPECT_EQ(params.perm[0], 2);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 1);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DShrinkingOnce) {
RuntimeShape input_shape({4, 1, 8});
RuntimeShape output_shape({8, 4, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4, 8}));
EXPECT_EQ(output_shape, RuntimeShape({8, 4}));
EXPECT_EQ(output_shape.Dims(1), 4);
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DShrinkingTwice) {
RuntimeShape input_shape({4, 1, 1});
RuntimeShape output_shape({1, 4, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4}));
EXPECT_EQ(output_shape, RuntimeShape({4}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DAllOnes) {
RuntimeShape input_shape({1, 1, 1});
RuntimeShape output_shape({1, 1, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({1}));
EXPECT_EQ(output_shape, RuntimeShape({1}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DNoChanges) {
RuntimeShape input_shape({9, 3, 2, 4});
RuntimeShape output_shape({3, 9, 4, 2});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 0;
params.perm[2] = 3;
params.perm[3] = 2;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3, 2, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9, 4, 2}));
EXPECT_EQ(params.perm_count, 4);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 3);
EXPECT_EQ(params.perm[3], 2);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingOnce) {
RuntimeShape input_shape({9, 3, 1, 4});
RuntimeShape output_shape({3, 9, 4, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 0;
params.perm[2] = 3;
params.perm[3] = 2;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9, 4}));
EXPECT_EQ(params.perm_count, 3);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 2);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingTwice) {
RuntimeShape input_shape({1, 3, 1, 4});
RuntimeShape output_shape({3, 1, 4, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 3;
params.perm[3] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({3, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 4}));
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 0);
EXPECT_EQ(params.perm[1], 1);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingThirdTimes) {
RuntimeShape input_shape({1, 1, 7, 1});
RuntimeShape output_shape({1, 7, 1, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({7}));
EXPECT_EQ(output_shape, RuntimeShape({7}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DAllOnes) {
RuntimeShape input_shape({1, 1, 1, 1});
RuntimeShape output_shape({1, 1, 1, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({1}));
EXPECT_EQ(output_shape, RuntimeShape({1}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, Flatten3D) {
RuntimeShape input_shape({3, 5, 7});
RuntimeShape output_shape({3, 7, 5});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({5, 7}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({7, 5}));
EXPECT_EQ(non_flatten_size, 5 * 7);
EXPECT_EQ(non_flatten_params.perm_count, 2);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
}
TEST(TransposeUtilsTest, Flatten4DFlattenOnce) {
RuntimeShape input_shape({3, 5, 7, 9});
RuntimeShape output_shape({3, 7, 5, 9});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({5, 7, 9}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({7, 5, 9}));
EXPECT_EQ(non_flatten_size, 5 * 7 * 9);
EXPECT_EQ(non_flatten_params.perm_count, 3);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
EXPECT_EQ(non_flatten_params.perm[2], 2);
}
TEST(TransposeUtilsTest, Flatten4DFlattenTwice) {
RuntimeShape input_shape({3, 5, 7, 9});
RuntimeShape output_shape({3, 5, 9, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 1;
params.perm[2] = 3;
params.perm[3] = 2;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({7, 9}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({9, 7}));
EXPECT_EQ(non_flatten_size, 7 * 9);
EXPECT_EQ(non_flatten_params.perm_count, 2);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable2D) {
RuntimeShape input_shape({4, 5});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 5);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DOne) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 30);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DTwo) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 20);
EXPECT_EQ(dim1, 6);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DNotApplicable) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 1;
params.perm[2] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_FALSE(applicable);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DOne) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 3;
params.perm[3] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 210);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DTwo) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 2;
params.perm[1] = 3;
params.perm[2] = 0;
params.perm[3] = 1;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 20);
EXPECT_EQ(dim1, 42);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DThird) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 3;
params.perm[1] = 0;
params.perm[2] = 1;
params.perm[3] = 2;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 120);
EXPECT_EQ(dim1, 7);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DNotApplicable) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 3;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_FALSE(applicable);
}
}
} |
942 | cpp | tensorflow/tensorflow | sparsity_format_converter | tensorflow/lite/kernels/internal/utils/sparsity_format_converter.cc | tensorflow/lite/kernels/internal/utils/sparsity_format_converter_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_UTILS_SPARSITY_FORMAT_CONVERTER_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_UTILS_SPARSITY_FORMAT_CONVERTER_H_
#include <vector>
#include "Eigen/Core"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace internal {
namespace sparsity {
template <typename T>
class FormatConverter {
public:
FormatConverter(const std::vector<int>& shape,
const std::vector<int>& traversal_order,
const std::vector<TfLiteDimensionType>& format,
const std::vector<int>& block_size = {},
const std::vector<int>& block_map = {});
FormatConverter(const std::vector<int>& shape,
const std::vector<int>& traversal_order,
const std::vector<TfLiteDimensionType>& format,
const std::vector<int>& dense_size,
const std::vector<std::vector<int>>& segments,
const std::vector<std::vector<int>>& indices,
const std::vector<int>& block_map = {});
FormatConverter(const std::vector<int>& shape,
const TfLiteSparsity& sparsity);
const std::vector<T>& GetData() { return data_; }
const std::vector<std::vector<int>>& GetDimMetadata() {
return dim_metadata_;
}
TfLiteStatus DenseToSparse(const T* src_data);
TfLiteStatus SparseToDense(const T* src_data);
TfLiteStatus SparseToDense(const T* src_data, const size_t dest_size,
T* dest_data, TfLiteContext* context = nullptr);
private:
void InitSparseToDenseConverter(std::vector<int> shape,
std::vector<int> traversal_order,
std::vector<TfLiteDimensionType> format,
std::vector<int> dense_size,
std::vector<std::vector<int>> segments,
std::vector<std::vector<int>> indices,
std::vector<int> block_map);
void Populate(const T* src_data, std::vector<int> indices, int level,
int prev_idx, int* src_data_ptr, T* dest_data);
bool IsZero(const T val);
std::vector<int> dense_shape_;
std::vector<int> blocked_shape_;
size_t dense_size_;
std::vector<int> traversal_order_;
std::vector<TfLiteDimensionType> format_;
std::vector<int> block_size_;
std::vector<int> block_map_;
std::vector<std::vector<int>> dim_metadata_;
std::vector<T> data_;
};
extern template class FormatConverter<int32_t>;
extern template class FormatConverter<int8_t>;
extern template class FormatConverter<float>;
extern template class FormatConverter<Eigen::half>;
}
}
}
#endif
#include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
namespace tflite {
namespace internal {
namespace sparsity {
namespace {
uint64_t GetFlattenedIndex(const std::vector<int>& indices,
const std::vector<int>& shape) {
uint64_t index = 0;
int sub_elements = 1;
for (int i = shape.size() - 1; i >= 0; i--) {
index += indices[i] * sub_elements;
sub_elements *= shape[i];
}
return index;
}
std::vector<int> TfLiteIntArrayToVector(const TfLiteIntArray* int_array) {
std::vector<int> values;
if (!int_array) {
return values;
}
values.resize(int_array->size);
for (size_t i = 0; i < int_array->size; i++) {
values[i] = int_array->data[i];
}
return values;
}
}
template <typename T>
FormatConverter<T>::FormatConverter(
const std::vector<int>& shape, const std::vector<int>& traversal_order,
const std::vector<TfLiteDimensionType>& format,
const std::vector<int>& block_size, const std::vector<int>& block_map)
: dense_shape_(shape),
traversal_order_(traversal_order),
block_size_(block_size),
block_map_(block_map) {
dense_size_ = 1;
int block_dim = 0;
blocked_shape_.resize(shape.size());
format_.resize(shape.size() + block_map.size());
for (int i = 0; i < shape.size(); i++) {
format_[i] = format[traversal_order[i]];
dense_size_ *= shape[i];
if (block_dim < block_map.size() && block_map[block_dim] == i) {
blocked_shape_[i] = shape[i] / block_size[block_dim];
block_dim++;
} else {
blocked_shape_[i] = shape[i];
}
}
for (int i = 0; i < block_map.size(); i++) {
format_[i + shape.size()] = kTfLiteDimDense;
}
}
template <typename T>
TfLiteStatus FormatConverter<T>::DenseToSparse(const T* src_data) {
int num_original_dims = dense_shape_.size();
int num_block_dims = block_map_.size();
int num_expanded_dims = num_original_dims + num_block_dims;
std::vector<int> expanded_shape(num_expanded_dims);
for (int i = 0; i < num_expanded_dims; i++) {
if (i < num_original_dims) {
expanded_shape[i] = blocked_shape_[i];
} else {
expanded_shape[i] = block_size_[i - num_original_dims];
}
}
std::vector<int> shape_offset(num_original_dims);
shape_offset[shape_offset.size() - 1] = 1;
for (int i = num_original_dims - 1; i > 0; --i) {
shape_offset[i - 1] = shape_offset[i] * dense_shape_[i];
}
std::vector<int> expanded_shape_offset(num_expanded_dims);
for (int i = 0; i < num_original_dims; ++i) {
expanded_shape_offset[i] = shape_offset[i];
}
for (int i = 0; i < num_block_dims; ++i) {
int mapped_dim = block_map_[i];
expanded_shape_offset[num_original_dims + i] = shape_offset[mapped_dim];
expanded_shape_offset[mapped_dim] *= block_size_[i];
}
std::vector<int> dst_ordered_offset(num_expanded_dims);
for (int i = 0; i < num_expanded_dims; ++i) {
dst_ordered_offset[i] = expanded_shape_offset[traversal_order_[i]];
}
std::vector<bool> dst_dim_has_nonzeroes(num_expanded_dims);
std::fill(dst_dim_has_nonzeroes.begin(), dst_dim_has_nonzeroes.end(), false);
std::vector<int> inner_compressed_dim(num_expanded_dims);
int most_recent_compressed_dim = -1;
std::vector<int> num_segments_of_next_compressed_dim(num_expanded_dims);
int segment_count = 1;
for (int i = num_expanded_dims - 1; i >= 0; --i) {
inner_compressed_dim[i] = most_recent_compressed_dim;
if (format_[i] == kTfLiteDimSparseCSR) {
most_recent_compressed_dim = i;
num_segments_of_next_compressed_dim[i] = segment_count;
segment_count = 1;
} else {
num_segments_of_next_compressed_dim[i] = -1;
segment_count *= expanded_shape[traversal_order_[i]];
}
}
dim_metadata_.resize(num_expanded_dims * 2);
std::vector<int> dst_sparse_dims;
dst_sparse_dims.reserve(num_expanded_dims);
for (int i = 0; i < num_expanded_dims; ++i) {
dim_metadata_[i * 2].clear();
dim_metadata_[i * 2 + 1].clear();
if (format_[i] == kTfLiteDimDense) {
dim_metadata_[i * 2].push_back(expanded_shape[traversal_order_[i]]);
} else {
dim_metadata_[i * 2].push_back(0);
dst_sparse_dims.push_back(i);
}
}
int dst_dim_idx = num_expanded_dims;
std::vector<int> coordinate(num_expanded_dims, 0);
int dense_tensor_idx = 0;
while (dst_dim_idx >= 0) {
if (dst_dim_idx == num_expanded_dims) {
if (!IsZero(src_data[dense_tensor_idx])) {
data_.push_back(src_data[dense_tensor_idx]);
for (auto dst_dim : dst_sparse_dims) {
if (!dst_dim_has_nonzeroes[dst_dim]) {
dim_metadata_[2 * dst_dim + 1].push_back(coordinate[dst_dim]);
dst_dim_has_nonzeroes[dst_dim] = true;
}
}
} else if (format_[num_expanded_dims - 1] == kTfLiteDimDense) {
data_.push_back(src_data[dense_tensor_idx]);
}
--dst_dim_idx;
} else {
int original_dim_idx = traversal_order_[dst_dim_idx];
int dim_size = expanded_shape[original_dim_idx];
if (dst_dim_has_nonzeroes[dst_dim_idx]) {
dst_dim_has_nonzeroes[dst_dim_idx] = false;
} else if (format_[dst_dim_idx] == kTfLiteDimSparseCSR) {
int next_compressed_dim = inner_compressed_dim[dst_dim_idx];
int erase_offset = dim_metadata_[2 * dst_dim_idx + 1].size() *
num_segments_of_next_compressed_dim[dst_dim_idx];
if (next_compressed_dim >= 0) {
auto& segments = dim_metadata_[2 * inner_compressed_dim[dst_dim_idx]];
segments.erase(segments.begin() + 1 + erase_offset, segments.end());
} else {
data_.erase(data_.begin() + erase_offset, data_.end());
}
}
if (++coordinate[dst_dim_idx] < dim_size) {
dense_tensor_idx += dst_ordered_offset[dst_dim_idx];
++dst_dim_idx;
} else {
if (format_[dst_dim_idx] == kTfLiteDimSparseCSR) {
dim_metadata_[2 * dst_dim_idx].push_back(
dim_metadata_[2 * dst_dim_idx + 1].size());
}
coordinate[dst_dim_idx] = -1;
dense_tensor_idx -= dst_ordered_offset[dst_dim_idx] * dim_size;
--dst_dim_idx;
}
}
}
return kTfLiteOk;
}
template <typename T>
FormatConverter<T>::FormatConverter(
const std::vector<int>& shape, const std::vector<int>& traversal_order,
const std::vector<TfLiteDimensionType>& format,
const std::vector<int>& dense_size,
const std::vector<std::vector<int>>& segments,
const std::vector<std::vector<int>>& indices,
const std::vector<int>& block_map) {
InitSparseToDenseConverter(shape, traversal_order, format, dense_size,
segments, indices, block_map);
}
template <typename T>
FormatConverter<T>::FormatConverter(const std::vector<int>& shape,
const TfLiteSparsity& sparsity) {
auto traversal_order = TfLiteIntArrayToVector(sparsity.traversal_order);
auto block_map = TfLiteIntArrayToVector(sparsity.block_map);
std::vector<TfLiteDimensionType> format(sparsity.dim_metadata_size);
std::vector<int> dense_size(sparsity.dim_metadata_size);
std::vector<std::vector<int>> segments(sparsity.dim_metadata_size);
std::vector<std::vector<int>> indices(sparsity.dim_metadata_size);
for (int i = 0; i < sparsity.dim_metadata_size; i++) {
format[i] = sparsity.dim_metadata[i].format;
dense_size[i] = sparsity.dim_metadata[i].dense_size;
segments[i] =
TfLiteIntArrayToVector(sparsity.dim_metadata[i].array_segments);
indices[i] = TfLiteIntArrayToVector(sparsity.dim_metadata[i].array_indices);
}
InitSparseToDenseConverter(shape, std::move(traversal_order),
std::move(format), std::move(dense_size),
std::move(segments), std::move(indices),
std::move(block_map));
}
template <typename T>
void FormatConverter<T>::InitSparseToDenseConverter(
std::vector<int> shape, std::vector<int> traversal_order,
std::vector<TfLiteDimensionType> format, std::vector<int> dense_size,
std::vector<std::vector<int>> segments,
std::vector<std::vector<int>> indices, std::vector<int> block_map) {
dense_shape_ = std::move(shape);
traversal_order_ = std::move(traversal_order);
block_map_ = std::move(block_map);
format_ = std::move(format);
dense_size_ = 1;
for (int i = 0; i < dense_shape_.size(); i++) {
dense_size_ *= dense_shape_[i];
}
dim_metadata_.resize(2 * format_.size());
for (int i = 0; i < format_.size(); i++) {
if (format_[i] == kTfLiteDimDense) {
dim_metadata_[2 * i] = {dense_size[i]};
} else {
dim_metadata_[2 * i] = std::move(segments[i]);
dim_metadata_[2 * i + 1] = std::move(indices[i]);
}
}
int original_rank = dense_shape_.size();
int block_dim = 0;
blocked_shape_.resize(original_rank);
block_size_.resize(block_map_.size());
for (int i = 0; i < original_rank; i++) {
if (block_dim < block_map_.size() && block_map_[block_dim] == i) {
if (original_rank + block_dim < traversal_order_.size()) {
int orig_dim = traversal_order_[original_rank + block_dim];
block_size_[block_dim] = dense_size[orig_dim];
blocked_shape_[i] = dense_shape_[i] / dense_size[orig_dim];
block_dim++;
}
} else {
blocked_shape_[i] = dense_shape_[i];
}
}
}
template <typename T>
void FormatConverter<T>::Populate(const T* src_data, std::vector<int> indices,
int level, int prev_idx, int* src_data_ptr,
T* dest_data) {
if (level == indices.size()) {
int orig_rank = dense_shape_.size();
std::vector<int> orig_idx;
orig_idx.resize(orig_rank);
int i = 0;
for (; i < orig_idx.size(); i++) {
int orig_dim = traversal_order_[i];
orig_idx[orig_dim] = indices[i];
}
for (; i < indices.size(); i++) {
const int block_idx = traversal_order_[i] - orig_rank;
const int orig_dim = block_map_[block_idx];
orig_idx[orig_dim] =
orig_idx[orig_dim] * block_size_[block_idx] + indices[i];
}
dest_data[GetFlattenedIndex(orig_idx, dense_shape_)] =
src_data[*src_data_ptr];
*src_data_ptr = *src_data_ptr + 1;
return;
}
const int metadata_idx = 2 * level;
const int shape_of_level = dim_metadata_[metadata_idx][0];
if (format_[level] == kTfLiteDimDense) {
for (int i = 0; i < shape_of_level; i++) {
indices[level] = i;
Populate(src_data, indices, level + 1, prev_idx * shape_of_level + i,
src_data_ptr, dest_data);
}
} else if (prev_idx + 1 < dim_metadata_[metadata_idx].size()) {
const auto& array_segments = dim_metadata_[metadata_idx];
const auto& array_indices = dim_metadata_[metadata_idx + 1];
for (int i = array_segments[prev_idx]; i < array_segments[prev_idx + 1];
i++) {
if (i < array_indices.size() && level < indices.size()) {
indices[level] = array_indices[i];
Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data);
}
}
}
}
template <typename T>
TfLiteStatus FormatConverter<T>::SparseToDense(const T* src_data) {
data_.resize(dense_size_);
std::fill(data_.begin(), data_.end(), T(0));
int total_rank = traversal_order_.size();
int src_data_ptr = 0;
std::vector<int> indices(total_rank);
Populate(src_data, indices, 0, 0, &src_data_ptr, data_.data());
return kTfLiteOk;
}
template <typename T>
TfLiteStatus FormatConverter<T>::SparseToDense(const T* src_data,
const size_t dest_size,
T* dest_data,
TfLiteContext* context) {
if (dest_size != dense_size_) {
TF_LITE_MAYBE_KERNEL_LOG(
context, "unexpected buffer size for densified data, expected %zu.\n",
dense_size_);
return kTfLiteError;
}
for (auto i = 0; i < dest_size; i++) {
dest_data[i] = T(0);
}
const int total_rank = traversal_order_.size();
int src_data_ptr = 0;
std::vector<int> indices(total_rank);
Populate(src_data, indices, 0, 0, &src_data_ptr, dest_data);
return kTfLiteOk;
}
template <typename T>
bool FormatConverter<T>::IsZero(const T val) {
return (val == static_cast<T>(0));
}
template class FormatConverter<int32_t>;
template class FormatConverter<int8_t>;
template class FormatConverter<float>;
template class FormatConverter<Eigen::half>;
}
}
} | #include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/model.h"
namespace tflite {
namespace internal {
namespace sparsity {
namespace {
TEST(FormatConverterTest, SimpleTestD0D1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {3};
const std::vector<int> dm1 = {4};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS0D1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1 = {4};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 9, 8, 5, 0, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestD0S1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {3};
const std::vector<int> dm1_0 = {0, 3, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 3, 0, 3};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS0S1) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {0, 1};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1_0 = {0, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 3, 0, 3};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestD1D0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {4};
const std::vector<int> dm1 = {3};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 5, 0, 0, 0, 9, 0, 0, 8, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS1D0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 3};
const std::vector<int> dm0_1 = {0, 2, 3};
const std::vector<int> dm1 = {3};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1, dim_metadata[2]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 0, 5, 9, 0, 0, 8, 0, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestD1S0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimDense};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {4};
const std::vector<int> dm1_0 = {0, 2, 2, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 0, 2};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 5, 9, 8, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, SimpleTestS1S0) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 4};
const std::vector<int> traversal_order = {1, 0};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 3};
const std::vector<int> dm0_1 = {0, 2, 3};
const std::vector<int> dm1_0 = {0, 2, 3, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 0, 2};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 5, 9, 8, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestS0D1S2) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 2, 2};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimSparseCSR, kTfLiteDimDense, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1 = {2};
const std::vector<int> dm2_0 = {0, 1, 3, 4, 5};
const std::vector<int> dm2_1 = {0, 0, 1, 0, 1};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1, dim_metadata[2]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestD0D1S2) {
const std::vector<int> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
const std::vector<int> dense_shape = {3, 2, 2};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimDense, kTfLiteDimDense, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {3};
const std::vector<int> dm1 = {2};
const std::vector<int> dm2_0 = {0, 1, 3, 3, 3, 4, 5};
const std::vector<int> dm2_1 = {0, 0, 1, 0, 1};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1, dim_metadata[2]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {6, 9, 8, 5, 7};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestS0S1S2) {
const std::vector<int> dense_values = {1, 7, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 2, 0, 0, 4, 8, 3, 9};
const std::vector<int> dense_shape = {3, 4, 2};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimSparseCSR, kTfLiteDimSparseCSR, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1_0 = {0, 2, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 2, 3};
const std::vector<int> dm2_0 = {0, 2, 3, 4, 6, 8};
const std::vector<int> dm2_1 = {0, 1, 1, 1, 0, 1, 0, 1};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 7, 5, 2, 4, 8, 3, 9};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, 3DTestS0S2S1) {
const std::vector<int> dense_values = {1, 0, 0, 0, 7, 0, 5, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 4, 3, 2, 0, 8, 9};
const std::vector<int> dense_shape = {3, 2, 4};
const std::vector<int> traversal_order = {0, 2, 1};
const std::vector<TfLiteDimensionType> format = {
kTfLiteDimSparseCSR, kTfLiteDimSparseCSR, kTfLiteDimSparseCSR};
FormatConverter<int> converter(dense_shape, traversal_order, format);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0_0 = {0, 2};
const std::vector<int> dm0_1 = {0, 2};
const std::vector<int> dm1_0 = {0, 2, 5};
const std::vector<int> dm1_1 = {0, 2, 0, 2, 3};
const std::vector<int> dm2_0 = {0, 2, 3, 4, 6, 8};
const std::vector<int> dm2_1 = {0, 1, 1, 1, 0, 1, 0, 1};
EXPECT_EQ(dm0_0, dim_metadata[0]);
EXPECT_EQ(dm0_1, dim_metadata[1]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm2_0, dim_metadata[4]);
EXPECT_EQ(dm2_1, dim_metadata[5]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 7, 5, 2, 4, 8, 3, 9};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0D1) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2, 3};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimDense};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm, dim_metadata[2]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 3, 0, 0,
0, 0, 0, 0, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S11DBlock) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2};
const std::vector<int> block_map = {1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm0 = {4};
const std::vector<int> dm2 = {2};
const std::vector<int> dm1_0 = {0, 2, 3, 4, 5};
const std::vector<int> dm1_1 = {0, 1, 0, 1, 1};
EXPECT_EQ(dm0, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm2, dim_metadata[4]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 2, 3, 0, 4, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S12DBlock) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2, 3};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 2, 3};
const std::vector<int> dm1_1 = {0, 1, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 3, 0, 0, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD1S0) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 5, 0, 0, 0, 0, 6};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {1, 0, 3, 2};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimSparseCSR,
kTfLiteDimDense};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 1, 3};
const std::vector<int> dm1_1 = {0, 0, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 0, 3, 0, 5, 0, 0, 6};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S1LastBlockEmpty) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0};
const std::vector<int> dense_shape = {4, 4};
const std::vector<int> traversal_order = {0, 1, 2, 3};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 2, 2};
const std::vector<int> dm1_1 = {0, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 0, 0, 4, 2, 3, 0, 0};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
TEST(FormatConverterTest, BlockTestD0S1ColMajorBlock) {
const std::vector<int> dense_values = {1, 0, 2, 3, 0, 4, 0, 0, 1, 0, 2,
3, 0, 4, 0, 0, 0, 0, 5, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const std::vector<int> dense_shape = {4, 8};
const std::vector<int> traversal_order = {0, 1, 3, 2};
const std::vector<TfLiteDimensionType> format = {kTfLiteDimDense,
kTfLiteDimSparseCSR};
const std::vector<int> block_size = {2, 2};
const std::vector<int> block_map = {0, 1};
FormatConverter<int> converter(dense_shape, traversal_order, format,
block_size, block_map);
converter.DenseToSparse(dense_values.data());
const auto& dim_metadata = converter.GetDimMetadata();
const std::vector<int> dm = {2};
const std::vector<int> dm1_0 = {0, 3, 4};
const std::vector<int> dm1_1 = {0, 1, 2, 1};
EXPECT_EQ(dm, dim_metadata[0]);
EXPECT_EQ(dm1_0, dim_metadata[2]);
EXPECT_EQ(dm1_1, dim_metadata[3]);
EXPECT_EQ(dm, dim_metadata[4]);
EXPECT_EQ(dm, dim_metadata[6]);
const auto& data = converter.GetData();
const std::vector<int> expected_data = {1, 1, 0, 0, 2, 2, 3, 3,
0, 0, 4, 4, 5, 0, 0, 0};
EXPECT_EQ(expected_data, data);
converter.SparseToDense(expected_data.data());
const auto& data_back = converter.GetData();
EXPECT_EQ(data_back, dense_values);
std::vector<int> dense_data(dense_values.size());
converter.SparseToDense(expected_data.data(), dense_data.size(),
dense_data.data(), nullptr);
EXPECT_EQ(dense_data, dense_values);
}
}
}
}
} |
943 | cpp | tensorflow/tensorflow | parse_example | tensorflow/lite/kernels/parse_example/parse_example.cc | tensorflow/lite/kernels/parse_example/parse_example_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_PARSE_EXAMPLE_H_
#define TENSORFLOW_LITE_KERNELS_PARSE_EXAMPLE_PARSE_EXAMPLE_H_
#include "tensorflow/lite/mutable_op_resolver.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_PARSE_EXAMPLE();
TfLiteRegistration* Register_PARSE_EXAMPLE_V2();
extern "C" void AddParseExampleOp(::tflite::MutableOpResolver* resolver);
}
}
}
#endif
#include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/example_proto_fast_parsing.h"
#include "tensorflow/core/util/presized_cuckoo_map.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace parse_example {
namespace {
namespace tf = ::tensorflow;
using tf::Status;
using tf::StringPiece;
using tf::tstring;
using tf::example::CopyOrMoveBlock;
using tf::example::FastParseExampleConfig;
using tf::example::GetListFromBuffer;
using tf::example::LimitedArraySlice;
using tf::example::ParseExample;
using tf::example::SeededHasher;
using tf::example::SmallVector;
using tf::example::SparseBuffer;
using tf::example::Type;
using tf::example::parsed::Example;
using ConfigIndex = tf::PresizedCuckooMap<std::pair<int32_t, Type>>;
struct TfLiteResult {
std::vector<TfLiteTensor*> dense_values;
std::vector<TfLiteTensor*> sparse_values;
std::vector<TfLiteTensor*> sparse_indices;
std::vector<TfLiteTensor*> sparse_shapes;
std::map<int, tf::Tensor> dense_tensors;
};
template <typename T>
void FillAndCopyVarLen(const int d, const size_t num_elements,
const size_t num_elements_per_minibatch,
const FastParseExampleConfig& config,
std::vector<SparseBuffer>& varlen_dense_buffers,
TfLiteTensor* values) {
const tf::Tensor& default_value = config.dense[d].default_value;
std::fill(reinterpret_cast<T*>(values->data.raw),
reinterpret_cast<T*>(values->data.raw) + num_elements,
default_value.flat<T>()(0));
auto data = reinterpret_cast<T*>(values->data.raw);
const SparseBuffer& buffer = varlen_dense_buffers[d];
const auto& end_indices = buffer.example_end_indices;
const size_t examples_in_buffer = end_indices.size();
const auto& list = GetListFromBuffer<T>(buffer);
auto list_ptr = list.begin();
size_t elements_tally = 0;
for (size_t j = 0; j < examples_in_buffer; ++j) {
const size_t num_elems = end_indices[j] - elements_tally;
CopyOrMoveBlock(list_ptr, list_ptr + num_elems, data);
list_ptr += num_elems;
data += num_elements_per_minibatch;
elements_tally = end_indices[j];
}
DCHECK(elements_tally == list.size());
}
bool ParseExample(StringRef serialized, Example* example) {
DCHECK(example != nullptr);
tf::protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized.str), serialized.len);
tensorflow::example::EnableAliasing(&stream);
return ParseExample(&stream, example);
}
Status FastParseSerializedExample(
StringRef serialized_example, const tstring& example_name,
const size_t example_index, const FastParseExampleConfig& config,
bool* quick_filter, int quick_filter_size,
const std::unique_ptr<ConfigIndex>& config_index, int config_index_size,
SeededHasher* hasher, std::vector<TfLiteTensor*>* output_dense,
std::vector<SparseBuffer>* output_varlen_dense,
std::vector<SparseBuffer>* output_sparse,
std::map<absl::string_view, int>& stats, TfLiteResult* result) {
DCHECK(output_dense != nullptr);
tensorflow::example::parsed::Example parsed_example;
if (!ParseExample(serialized_example, &parsed_example)) {
return tf::errors::Internal("Failed to parse example");
}
std::vector<int64_t> dense_feature_last_example(config.dense.size(), -1);
std::vector<int64_t> sparse_feature_last_example(config.sparse.size(), -1);
const size_t parsed_example_size = parsed_example.size();
for (size_t i = 0; i < parsed_example_size; ++i) {
tensorflow::example::parsed::FeatureMapEntry& name_and_feature =
parsed_example[parsed_example_size - i - 1];
const StringPiece feature_name = name_and_feature.first;
tensorflow::example::parsed::Feature& feature = name_and_feature.second;
if (feature_name.length() >= quick_filter_size ||
!quick_filter[feature_name.length()]) {
continue;
}
const uint64_t h = (*hasher)(feature_name);
std::pair<int32_t, Type> d_and_type;
if (!config_index->Find(h, &d_and_type)) {
continue;
}
size_t d = d_and_type.first;
bool is_dense = d_and_type.second == Type::Dense;
auto example_error = [&](StringPiece suffix) {
return tf::errors::Internal("Name: ", example_name,
", Key: ", feature_name,
", Index: ", example_index, ". ", suffix);
};
auto parse_error = [&] {
return example_error("Can't parse serialized Example.");
};
tf::DataType example_dtype;
if (feature.ParseDataType(&example_dtype) != absl::OkStatus()) {
return parse_error();
}
if (is_dense) {
if (example_dtype == tf::DT_INVALID) continue;
dense_feature_last_example[d] = example_index;
if (example_dtype != config.dense[d].dtype) {
return example_error(absl::StrCat(
"Data types don't match. Data type: ",
DataTypeString(example_dtype),
" but expected type: ", DataTypeString(config.dense[d].dtype)));
}
if (!config.dense[d].variable_length) {
TfLiteTensor* out = (*output_dense)[d];
const std::size_t num_elements = config.dense[d].elements_per_stride;
const std::size_t offset = example_index * num_elements;
auto shape_error = [&](size_t size, StringPiece type_str) {
return example_error(absl::StrCat(
"Number of ", type_str,
" values != expected. "
"Values size:",
size,
" but output shape: ", config.dense[d].shape.DebugString()));
};
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
auto out_p = reinterpret_cast<int64_t*>(out->data.raw) + offset;
LimitedArraySlice<int64_t> slice(out_p, num_elements);
if (!feature.ParseInt64List(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "int64");
}
break;
}
case tf::DT_FLOAT: {
auto out_p = reinterpret_cast<float*>(out->data.raw) + offset;
LimitedArraySlice<float> slice(out_p, num_elements);
if (!feature.ParseFloatList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "float");
}
break;
}
case tf::DT_STRING: {
auto& out_tensor = result->dense_tensors[d];
auto out_p = out_tensor.flat<tstring>().data() + offset;
LimitedArraySlice<tstring> slice(out_p, num_elements);
if (!feature.ParseBytesList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "bytes");
}
break;
}
default:
return tf::errors::Internal("Unrecognized dense type: ",
config.dense[d].dtype);
}
} else {
SparseBuffer& out = (*output_varlen_dense)[d];
const std::size_t num_elements = config.dense[d].elements_per_stride;
if (example_dtype != tf::DT_INVALID &&
example_dtype != config.dense[d].dtype) {
return example_error(absl::StrCat(
"Data types don't match. ",
"Expected type: ", DataTypeString(config.dense[d].dtype)));
}
auto shape_error = [&](size_t size, StringPiece type_str) {
return example_error(
absl::StrCat("Number of ", type_str,
" values is not a multiple of stride length. Saw ",
size, " values but output shape is: ",
config.dense[d].shape.DebugString()));
};
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseInt64List(&out.int64_list)) {
return parse_error();
}
if (out.int64_list.size() % num_elements != 0) {
return shape_error(out.int64_list.size(), "int64");
}
}
out.example_end_indices.push_back(out.int64_list.size());
break;
}
case tf::DT_FLOAT: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseFloatList(&out.float_list)) {
return parse_error();
}
if (out.float_list.size() % num_elements != 0) {
return shape_error(out.float_list.size(), "float");
}
}
out.example_end_indices.push_back(out.float_list.size());
break;
}
case tf::DT_STRING: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseBytesList(&out.bytes_list)) {
return parse_error();
}
if (out.bytes_list.size() % num_elements != 0) {
return shape_error(out.bytes_list.size(), "byte");
}
}
out.example_end_indices.push_back(out.bytes_list.size());
break;
}
default:
return tf::errors::Internal("Should not happen: ",
config.dense[d].dtype);
}
}
} else {
auto& last_example = sparse_feature_last_example;
if (last_example[d] == example_index) {
continue;
}
last_example[d] = example_index;
SparseBuffer& out = (*output_sparse)[d];
tf::DataType feature_dtype = config.sparse[d].dtype;
if (example_dtype != tf::DT_INVALID && example_dtype != feature_dtype) {
return tf::errors::Internal("Data types don't match:", example_dtype,
" != ", feature_dtype);
}
switch (feature_dtype) {
case tf::DT_INT64: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseInt64List(&out.int64_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.int64_list.size());
break;
}
case tf::DT_FLOAT: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseFloatList(&out.float_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.float_list.size());
break;
}
case tf::DT_STRING: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseBytesList(&out.bytes_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.bytes_list.size());
break;
}
default:
return tf::errors::Internal("Should not happen: ", feature_dtype);
}
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (config.dense[d].variable_length) continue;
if (dense_feature_last_example[d] == example_index) continue;
if (config.dense[d].default_value.NumElements() == 0) {
return tf::errors::Internal(
"Name: ", example_name, ", Feature: ", config.dense[d].feature_name,
" (data type: ", DataTypeString(config.dense[d].dtype), ")",
" is required but could not be found.");
}
const tf::Tensor& in = config.dense[d].default_value;
TfLiteTensor* out = result->dense_values[d];
const std::size_t num_elements = in.shape().num_elements();
const std::size_t offset = example_index * num_elements;
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
out->data.i64 + offset);
break;
}
case tf::DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
out->data.f + offset);
break;
}
case tf::DT_STRING: {
auto& out_tensor = result->dense_tensors[d];
std::copy_n(in.flat<tstring>().data(), num_elements,
out_tensor.flat<tstring>().data() + offset);
break;
}
default:
return tf::errors::Internal("Should not happen: ",
config.dense[d].dtype);
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (!config.dense[d].variable_length) continue;
if (dense_feature_last_example[d] == example_index) continue;
SparseBuffer& out = (*output_varlen_dense)[d];
size_t prev_example_end_index =
out.example_end_indices.empty() ? 0 : out.example_end_indices.back();
out.example_end_indices.push_back(prev_example_end_index);
}
for (size_t d = 0; d < config.sparse.size(); ++d) {
if (sparse_feature_last_example[d] == example_index) continue;
SparseBuffer& out = (*output_sparse)[d];
size_t prev_example_end_index =
out.example_end_indices.empty() ? 0 : out.example_end_indices.back();
out.example_end_indices.push_back(prev_example_end_index);
}
return absl::OkStatus();
}
void CountSparseFeatures(const SparseBuffer& sparse_buffer,
size_t* total_num_features, size_t* max_num_features) {
const std::vector<size_t>& end_indices = sparse_buffer.example_end_indices;
*total_num_features += end_indices.back();
*max_num_features = std::max(*max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
*max_num_features = std::max(*max_num_features, example_size);
}
}
void CopySparseBufferToTensor(tf::DataType dtype, size_t offset,
SparseBuffer* src, TfLiteTensor* dst) {
switch (dtype) {
case tf::DT_INT64: {
std::copy(src->int64_list.begin(), src->int64_list.end(),
reinterpret_cast<int64_t*>(dst->data.raw) + offset);
break;
}
case tf::DT_FLOAT: {
std::copy(src->float_list.begin(), src->float_list.end(),
reinterpret_cast<float*>(dst->data.raw) + offset);
break;
}
case tf::DT_STRING: {
DynamicBuffer buffer;
for (auto* begin = src->bytes_list.begin();
begin != src->bytes_list.end(); begin++) {
buffer.AddString(begin->c_str(), begin->size());
}
buffer.WriteToTensor(dst, nullptr);
break;
}
default:
DCHECK(false) << "Encountered unexpected DataType "
<< DataTypeString(dtype)
<< "in variable that should have been checked.";
}
}
inline void CopyToBuffer(absl::Span<const tstring> vec, char* tensor_buffer,
int num_examples, int batch_size,
int elements_per_stride) {
int i = 0, k = 0;
int start = 0;
for (; i < num_examples; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
memcpy(tensor_buffer + start, vec[k].c_str(), vec[k].size());
start += vec[k].size();
k++;
}
}
for (; i < batch_size; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
memcpy(tensor_buffer + start, vec[k].c_str(), vec[k].size());
start += vec[k].size();
k++;
}
}
}
Status FastParseExampleLite(
const FastParseExampleConfig& config, const TfLiteTensor* serialized,
absl::Span<const tstring> example_names, bool* quick_filter,
int quick_filter_size, const std::unique_ptr<ConfigIndex>& config_index,
int config_index_size, SeededHasher* hasher, TfLiteResult* result,
std::map<absl::string_view, int>& stats, TfLiteContext* context) {
if (result == nullptr) {
return tf::errors::Internal("Result is null");
}
const int count = GetStringCount(serialized);
std::vector<tf::Tensor> fixed_dense_values(config.dense.size());
std::vector<SparseBuffer> sparse_buffers(config.sparse.size());
std::vector<SparseBuffer> varlen_dense_buffers(config.dense.size());
Status status_of_minibatch;
for (size_t e = 0; e < count; ++e) {
status_of_minibatch = FastParseSerializedExample(
GetString(serialized, e),
(!example_names.empty() ? example_names[e] : "<unknown>"), e, config,
quick_filter, quick_filter_size, config_index, config_index_size,
hasher, &result->dense_values, &varlen_dense_buffers, &sparse_buffers,
stats, result);
if (!status_of_minibatch.ok()) break;
}
if (!status_of_minibatch.ok()) {
return status_of_minibatch;
}
for (size_t d = 0; d < config.sparse.size(); ++d) {
size_t total_num_features = 0;
size_t max_num_features = 0;
CountSparseFeatures(sparse_buffers[d], &total_num_features,
&max_num_features);
tf::TensorShape indices_shape;
TfLiteTensor* indices = result->sparse_indices[d];
TfLiteTensor* values = result->sparse_values[d];
TfLiteTensor* sparse_shape = result->sparse_shapes[d];
auto* sparse_shape_ptr = reinterpret_cast<int64_t*>(sparse_shape->data.raw);
sparse_shape_ptr[1] = max_num_features;
TfLiteIntArray* index_shape = TfLiteIntArrayCreate(2);
index_shape->data[0] = total_num_features;
index_shape->data[1] = 2;
context->ResizeTensor(context, indices, index_shape);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(1);
output_shape->data[0] = total_num_features;
context->ResizeTensor(context, values, output_shape);
SparseBuffer& buffer = sparse_buffers[d];
auto* indices_p = reinterpret_cast<int64_t*>(indices->data.raw);
if (!indices_p) {
return tf::errors::Internal("Indices tensor not allocated!");
}
if (total_num_features > 0) {
int64_t* ix_p = indices_p;
size_t example_index = 0;
int idx0 = 0;
size_t delta = 0;
for (size_t example_end_index : buffer.example_end_indices) {
size_t feature_index = 0;
for (; delta < example_end_index; ++delta) {
if (idx0 < total_num_features) {
*ix_p = example_index;
*(ix_p + 1) = feature_index;
ix_p += 2;
}
++feature_index;
++idx0;
}
++example_index;
}
CopySparseBufferToTensor(config.sparse[d].dtype, 0, &buffer, values);
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (!config.dense[d].variable_length) {
continue;
}
size_t max_num_features = 0;
std::vector<size_t>& end_indices =
varlen_dense_buffers[d].example_end_indices;
max_num_features = std::max(max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
max_num_features = std::max(max_num_features, example_size);
}
const size_t stride_size = config.dense[d].elements_per_stride;
const size_t max_num_elements = max_num_features / stride_size;
tf::TensorShape values_shape;
DCHECK_EQ(max_num_features % config.dense[d].elements_per_stride, 0);
const size_t batch_size = GetStringCount(serialized);
TF_RETURN_IF_ERROR(values_shape.AddDimWithStatus(batch_size));
TF_RETURN_IF_ERROR(values_shape.AddDimWithStatus(max_num_elements));
for (int i = 1; i < config.dense[d].shape.dims(); ++i) {
TF_RETURN_IF_ERROR(
values_shape.AddDimWithStatus(config.dense[d].shape.dim_size(i)));
}
TfLiteTensor* values = result->dense_values[d];
const size_t num_elements = GetTensorShape(values).FlatSize();
if (num_elements == 0) {
continue;
}
const size_t num_elements_per_minibatch = num_elements / batch_size;
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
FillAndCopyVarLen<int64_t>(d, num_elements, num_elements_per_minibatch,
config, varlen_dense_buffers, values);
break;
}
case tf::DT_FLOAT: {
FillAndCopyVarLen<float>(d, num_elements, num_elements_per_minibatch,
config, varlen_dense_buffers, values);
break;
}
default:
DCHECK(false) << "Encountered unexpected DataType "
<< config.dense[d].dtype
<< "in variable that should have been checked";
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (config.dense[d].variable_length) {
continue;
}
if (result->dense_values[d]->type == kTfLiteString) {
auto& in = result->dense_tensors[d];
auto vec = in.vec<tstring>();
const int batch_size = result->dense_values[d]->dims->data[0];
const int elements_per_stride = config.dense[d].elements_per_stride;
int total_size = 0;
std::vector<int32_t> offsets;
offsets.reserve(vec.size() + 1);
offsets.push_back(0);
int k = 0;
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
if (i < count) {
total_size += vec(k++).size();
offsets.push_back(total_size);
} else {
offsets.push_back(total_size);
}
}
}
const int32_t num_strings = offsets.size() - 1;
const size_t required_bytes = sizeof(int32_t) * (num_strings + 2) +
total_size;
char* tensor_buffer =
reinterpret_cast<char*>(result->dense_values[d]->data.raw);
if (result->dense_values[d]->bytes < required_bytes) {
if (result->dense_values[d]->data.raw) {
free(result->dense_values[d]->data.raw);
}
tensor_buffer = reinterpret_cast<char*>(malloc(required_bytes));
result->dense_values[d]->data.raw = tensor_buffer;
result->dense_values[d]->bytes = required_bytes;
}
const int32_t start = sizeof(int32_t) * (num_strings + 2);
memcpy(tensor_buffer, &num_strings, sizeof(int32_t));
for (size_t i = 0; i < offsets.size(); i++) {
int32_t offset_i = start + offsets[i];
memcpy(tensor_buffer + sizeof(int32_t) * (i + 1), &offset_i,
sizeof(int32_t));
}
absl::Span<const tstring> slice(vec.data(), vec.size());
CopyToBuffer(slice, tensor_buffer + start, count, batch_size,
elements_per_stride);
}
}
return absl::OkStatus();
}
}
enum InputTensor {
kExampleTensor = 0,
kNamesTensor = 1,
kSparseKeysTensor = 2,
kDenseKeysTensor = 3,
kRaggedKeysTensor = 4,
};
struct OpData {
FastParseExampleConfig config;
std::vector<tf::TensorShape> dense_shapes;
int dense_size = 0;
int sparse_size = 0;
std::unique_ptr<ConfigIndex> config_index;
int config_index_size;
SeededHasher hasher;
TfLiteResult got;
bool* quick_filter = nullptr;
int quick_filter_size;
bool created = false;
~OpData() {
if (quick_filter) {
free(quick_filter);
}
}
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
template <typename T>
tf::Tensor AsTensor(const std::vector<T>& val) {
tf::Tensor ret(tf::DataTypeToEnum<T>::value,
{static_cast<int64_t>(val.size())});
std::copy_n(val.begin(), val.size(), ret.flat<T>().data());
return ret;
}
enum Version {
V1,
V2,
};
tf::TensorShape TfLiteToTfShape(TfLiteIntArray* array) {
tf::TensorShape shape;
for (int i = 0; i < array->size; i++) {
shape.AddDim(array->data[i]);
}
return shape;
}
template <Version version>
TfLiteStatus PrepareParseExample(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->custom_initial_data);
data->config.dense.clear();
data->config.sparse.clear();
data->got.dense_values.clear();
const flexbuffers::Vector& v =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(node->custom_initial_data),
node->custom_initial_data_size)
.AsVector();
if (v.size() == 2) {
tf::NodeDef nodedef;
TF_LITE_ENSURE_EQ(context, nodedef.ParseFromString(v[1].AsString().str()),
true);
if (version == V1) {
data->dense_size = nodedef.attr().at("Ndense").i();
data->sparse_size = nodedef.attr().at("Nsparse").i();
} else if (version == V2) {
data->dense_size = nodedef.attr().at("Tdense").list().type_size();
data->sparse_size = nodedef.attr().at("num_sparse").i();
}
auto dense_shapes = nodedef.attr().at("dense_shapes").list();
if (data->dense_shapes.empty()) {
for (int i = 0; i < dense_shapes.shape_size(); ++i) {
data->dense_shapes.push_back(dense_shapes.shape(i));
}
}
} else {
const flexbuffers::Map& m =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(node->custom_initial_data),
node->custom_initial_data_size)
.AsMap();
const flexbuffers::TypedVector keys = m.Keys();
int num_sparse = 0;
int num_dense = 0;
for (int k = 0; k < keys.size(); ++k) {
const std::string key = keys[k].ToString();
const auto value = m[key];
if (key == "Nsparse" || key == "num_sparse") {
num_sparse = value.AsInt32();
}
if (key == "Ndense") {
num_dense = value.AsInt32();
}
}
data->sparse_size = num_sparse;
data->dense_size = num_dense;
if (version == V2) {
const TfLiteTensor* dense_key_tensor =
GetInput(context, node, kDenseKeysTensor);
data->dense_size = GetTensorShape(dense_key_tensor).FlatSize();
}
}
data->config.dense.reserve(data->dense_size);
data->config.sparse.reserve(data->sparse_size);
data->dense_shapes.reserve(data->dense_size);
const auto* serialized = GetInput(context, node, 0);
const int batch_size =
serialized->dims->size > 0 ? serialized->dims->data[0] : 1;
const bool missing_shape_info = data->dense_shapes.empty();
for (int i = 0; i < data->dense_size; i++) {
TfLiteTensor* dense_key_tensor =
GetOutput(context, node, data->sparse_size * 3 + i);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(dense_key_tensor->dims);
if (missing_shape_info) {
data->dense_shapes.push_back(TfLiteToTfShape(output_size));
}
const int original_size = data->dense_shapes[i].dims() > 0
? data->dense_shapes[i].dim_size(0)
: 1;
output_size->data[0] = batch_size * original_size;
context->ResizeTensor(context, dense_key_tensor, output_size);
}
size_t offset = 0;
for (int i = 0; i < data->sparse_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(2);
sparse_size->data[0] = batch_size;
sparse_size->data[1] = 2;
context->ResizeTensor(context, parse_output, sparse_size);
data->got.sparse_indices.push_back(parse_output);
}
offset += data->sparse_size;
for (int i = 0; i < data->sparse_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(1);
sparse_size->data[0] = 0;
context->ResizeTensor(context, parse_output, sparse_size);
data->got.sparse_values.push_back(parse_output);
}
offset += data->sparse_size;
for (int i = 0; i < data->sparse_size; i++) {
TfLiteTensor* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(1);
sparse_size->data[0] = 2;
context->ResizeTensor(context, parse_output, sparse_size);
auto* shapes_shape_t = reinterpret_cast<int64_t*>(parse_output->data.i64);
shapes_shape_t[0] = batch_size;
shapes_shape_t[1] = 1;
data->got.sparse_shapes.push_back(parse_output);
}
data->created = false;
return kTfLiteOk;
}
template <Version version>
TfLiteStatus EvalParseExample(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (!data->created) {
for (int i = 0; i < data->sparse_size; i++) {
int input_index =
version == V1 ? kSparseKeysTensor + i : kSparseKeysTensor;
int string_index = version == V1 ? 0 : i;
const TfLiteTensor* sparse_key_tensor =
GetInput(context, node, input_index);
const auto key = GetString(sparse_key_tensor, string_index);
const auto* sparse_output =
GetOutput(context, node, i + data->sparse_size);
std::string k(key.str, key.len);
switch (sparse_output->type) | #include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include <cstdint>
#include <initializer_list>
#include <string>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/example/feature_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace tf = ::tensorflow;
const char* kNodeDefTxt = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/dense_keys_0"
input: "ParseExample/Const"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_FLOAT } }
}
attr {
key: "dense_shapes"
value { list { shape { dim { size: 2 } } } }
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt2 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 0 }
}
attr {
key: "Nsparse"
value { i: 1 }
}
attr {
key: "Tdense"
value {}
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt3 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_STRING } }
}
attr {
key: "dense_shapes"
value { list { shape { dim { size: 1 } } } }
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt4 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 0 }
}
attr {
key: "Nsparse"
value { i: 1 }
}
attr {
key: "Tdense"
value {}
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_STRING } }
}
)pb";
const char* kNodeDefTxt5 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/dense_keys_0"
input: "ParseExample/Const"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_FLOAT } }
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
template <typename DefaultType>
class ParseExampleOpModel : public SingleOpModel {
public:
ParseExampleOpModel(std::vector<std::string> serialized_examples,
std::vector<std::string> sparse_keys,
std::vector<std::string> dense_keys,
std::initializer_list<DefaultType> dense_defaults,
std::vector<TensorType> dense_types,
std::vector<TensorType> sparse_types,
const char* text_def, int dense_size = 2) {
const int input_size = serialized_examples.size();
auto input_tensor_data = TensorData(TensorType_STRING, {input_size});
string_indices_.push_back(AddInput(input_tensor_data));
string_indices_.push_back(
AddConstInput<std::string>(TensorData(TensorType_STRING, {0}), {""}));
std::for_each(sparse_keys.begin(), sparse_keys.end(), [&](auto&&) {
string_indices_.push_back(AddInput(TensorData(TensorType_STRING, {1})));
});
std::for_each(dense_keys.begin(), dense_keys.end(), [&](auto&&) {
string_indices_.push_back(AddInput(TensorData(TensorType_STRING, {1})));
});
if (dense_size > 0) {
dense_defaults_ = AddConstInput<DefaultType>(
TensorData(dense_types[0], {dense_size}), dense_defaults);
}
if (!sparse_keys.empty()) {
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_indices_outputs_.push_back(AddOutput(TensorType_INT64));
}
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_values_outputs_.push_back(AddOutput(sparse_types[i]));
}
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_shapes_outputs_.push_back(AddOutput({TensorType_INT64, {2}}));
}
}
for (int i = 0; i < dense_keys.size(); i++) {
dense_outputs_.push_back(AddOutput({dense_types[i], {dense_size}}));
}
tf::NodeDef nodedef;
tf::protobuf::TextFormat::Parser parser;
tf::protobuf::io::ArrayInputStream input_stream(text_def, strlen(text_def));
if (!parser.Parse(&input_stream, &nodedef)) {
abort();
}
std::string serialized_nodedef;
nodedef.SerializeToString(&serialized_nodedef);
flexbuffers::Builder fbb;
fbb.Vector([&]() {
fbb.String(nodedef.op());
fbb.String(serialized_nodedef);
});
fbb.Finish();
const auto buffer = fbb.GetBuffer();
SetCustomOp("ParseExample", buffer, Register_PARSE_EXAMPLE);
BuildInterpreter({{input_size}});
int idx = 0;
PopulateStringTensor(string_indices_[idx++], serialized_examples);
PopulateStringTensor(string_indices_[idx++], {""});
for (const auto& key : sparse_keys) {
PopulateStringTensor(string_indices_[idx++], {key});
}
for (const auto& key : dense_keys) {
PopulateStringTensor(string_indices_[idx++], {key});
}
}
void ResizeInputTensor(std::vector<std::vector<int>> input_shapes) {
for (size_t i = 0; i < input_shapes.size(); ++i) {
const int input_idx = interpreter_->inputs()[i];
if (input_idx == kTfLiteOptionalTensor) continue;
const auto& shape = input_shapes[i];
if (shape.empty()) continue;
CHECK(interpreter_->ResizeInputTensor(input_idx, shape) == kTfLiteOk);
}
}
template <typename T>
std::vector<T> GetSparseIndicesOutput(int i) {
return ExtractVector<T>(sparse_indices_outputs_[i]);
}
template <typename T>
std::vector<T> GetSparseValuesOutput(int i) {
return ExtractVector<T>(sparse_values_outputs_[i]);
}
template <typename T>
std::vector<T> GetSparseShapesOutput(int i) {
return ExtractVector<T>(sparse_shapes_outputs_[i]);
}
template <typename T>
std::vector<T> GetDenseOutput(int i) {
return ExtractVector<T>(dense_outputs_[i]);
}
std::vector<std::string> GetStringOutput(int i) {
auto* t = interpreter_->tensor(i);
int count = GetStringCount(t);
std::vector<std::string> v;
for (int i = 0; i < count; ++i) {
auto ref = GetString(t, i);
v.emplace_back(ref.str, ref.len);
}
return v;
}
int DenseDefaults() { return dense_defaults_; }
int SparseValuesOutputs(int i) { return sparse_values_outputs_[i]; }
int DenseOutputs(int i) { return dense_outputs_[i]; }
std::vector<int> dense_outputs_;
std::vector<int> sparse_indices_outputs_;
std::vector<int> sparse_shapes_outputs_;
std::vector<int> sparse_values_outputs_;
std::vector<int> string_indices_;
int dense_defaults_ = -1;
};
TEST(ParseExampleOpsTest, SimpleTest) {
tf::Example example;
tf::AppendFeatureValues<float>({1.5f, 1.5f}, "time", &example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
ParseExampleOpModel<float> m({example.SerializeAsString()}, {}, {"time"},
{0.f, 0.f}, {TensorType_FLOAT32}, {},
kNodeDefTxt);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear({1.5f, 1.5f})));
}
TEST(ParseExampleOpsTest, SparseTest) {
tf::Example example;
tf::AppendFeatureValues<float>({1.5f}, "time", &example);
ParseExampleOpModel<float> m({example.SerializeAsString()}, {"time"}, {}, {},
{}, {TensorType_FLOAT32}, kNodeDefTxt2, 0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetSparseIndicesOutput<int64_t>(0),
ElementsAreArray(ArrayFloatNear({0, 0})));
EXPECT_THAT(m.GetSparseValuesOutput<float>(0),
ElementsAreArray(ArrayFloatNear({1.5f})));
EXPECT_THAT(m.GetSparseShapesOutput<int64_t>(0),
ElementsAreArray(ArrayFloatNear({1, 1})));
}
TEST(ParseExampleOpsTest, SimpleBytesTest) {
tf::Example example;
const std::string test_data = "simpletest";
tf::AppendFeatureValues<tensorflow::tstring>({test_data}, "time", &example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
std::string default_value = "missing";
ParseExampleOpModel<std::string> m({example.SerializeAsString()}, {},
{"time"}, {default_value},
{TensorType_STRING}, {}, kNodeDefTxt3, 1);
m.PopulateStringTensor(m.DenseDefaults(), {default_value});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<string> c = m.GetStringOutput(m.DenseOutputs(0));
EXPECT_EQ(1, c.size());
EXPECT_EQ(test_data, c[0]);
}
TEST(ParseExampleOpsTest, SparseBytesTest) {
tf::Example example;
const std::string test_data = "simpletest";
tf::AppendFeatureValues<tensorflow::tstring>({test_data, test_data}, "time",
&example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
ParseExampleOpModel<std::string> m({example.SerializeAsString()}, {"time"},
{}, {}, {}, {TensorType_STRING},
kNodeDefTxt4, 0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetSparseIndicesOutput<int64_t>(0),
testing::ElementsAreArray({0, 0, 0, 1}));
auto values = m.GetStringOutput(m.SparseValuesOutputs(0));
EXPECT_EQ(2, values.size());
EXPECT_EQ(test_data, values[0]);
EXPECT_EQ(test_data, values[1]);
EXPECT_THAT(m.GetSparseShapesOutput<int64_t>(0),
testing::ElementsAreArray({1, 2}));
}
TEST(ParseExampleOpsTest, ResizeTest) {
const int num_tests = 3;
std::vector<tf::Example> examples(num_tests);
std::vector<std::vector<float>> expected(num_tests);
std::vector<std::vector<std::string>> inputs(num_tests);
std::vector<int> sizes;
for (int i = 0; i < num_tests; ++i) {
float val = i;
std::initializer_list<float> floats = {val + val / 10.f, -val - val / 10.f};
tf::AppendFeatureValues<float>({val, val}, "num", &examples[i]);
tf::AppendFeatureValues<float>(floats, "time", &examples[i]);
sizes.push_back((num_tests - i) * 2);
for (int j = 0; j < sizes.back(); ++j) {
inputs[i].push_back(examples[i].SerializeAsString());
expected[i].insert(expected[i].end(), floats.begin(), floats.end());
}
}
ParseExampleOpModel<float> m(inputs[0], {}, {"time"}, {0.f, 0.f},
{TensorType_FLOAT32}, {}, kNodeDefTxt);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[0])));
for (int i = 1; i < num_tests; ++i) {
m.ResizeInputTensor({{sizes[i]}});
m.AllocateAndDelegate(false);
m.PopulateStringTensor(0, inputs[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[i])));
}
}
TEST(ParseExampleOpsTest, ResizeMissingInfoTest) {
const int num_tests = 3;
std::vector<tf::Example> examples(num_tests);
std::vector<std::vector<float>> expected(num_tests);
std::vector<std::vector<std::string>> inputs(num_tests);
std::vector<int> sizes;
for (int i = 0; i < num_tests; ++i) {
float val = i;
std::initializer_list<float> floats = {val + val / 10.f, -val - val / 10.f};
tf::AppendFeatureValues<float>({val, val}, "num", &examples[i]);
tf::AppendFeatureValues<float>(floats, "time", &examples[i]);
sizes.push_back((num_tests - i) * 2);
for (int j = 0; j < sizes.back(); ++j) {
inputs[i].push_back(examples[i].SerializeAsString());
expected[i].insert(expected[i].end(), floats.begin(), floats.end());
}
}
ParseExampleOpModel<float> m(inputs[0], {}, {"time"}, {0.f, 0.f},
{TensorType_FLOAT32}, {}, kNodeDefTxt5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[0])));
for (int i = 1; i < num_tests; ++i) {
m.ResizeInputTensor({{sizes[i]}});
m.AllocateAndDelegate(false);
m.PopulateStringTensor(0, inputs[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[i])));
}
}
}
}
} |
944 | cpp | tensorflow/tensorflow | example_proto_fast_parsing | tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.cc | tensorflow/core/util/example_proto_fast_parsing_test.cc | #ifndef TENSORFLOW_CORE_UTIL_EXAMPLE_PROTO_FAST_PARSING_H_
#define TENSORFLOW_CORE_UTIL_EXAMPLE_PROTO_FAST_PARSING_H_
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
namespace example {
struct FastParseExampleConfig {
struct Dense {
Dense(StringPiece feature_name, DataType dtype, PartialTensorShape shape,
Tensor default_value, bool variable_length,
std::size_t elements_per_stride)
: feature_name(feature_name),
dtype(dtype),
shape(std::move(shape)),
default_value(std::move(default_value)),
variable_length(variable_length),
elements_per_stride(elements_per_stride) {}
Dense() = default;
tstring feature_name;
DataType dtype;
PartialTensorShape shape;
Tensor default_value;
bool variable_length;
std::size_t elements_per_stride;
};
struct Sparse {
Sparse(StringPiece feature_name, DataType dtype)
: feature_name(feature_name),
dtype(dtype) {}
Sparse() = default;
tstring feature_name;
DataType dtype;
};
struct Ragged {
Ragged(StringPiece feature_name, DataType dtype, DataType splits_dtype)
: feature_name(feature_name),
dtype(dtype),
splits_dtype(splits_dtype) {}
Ragged() = default;
tstring feature_name;
DataType dtype;
DataType splits_dtype;
};
std::vector<Dense> dense;
std::vector<Sparse> sparse;
std::vector<Ragged> ragged;
bool collect_feature_stats = false;
};
struct PerExampleFeatureStats {
size_t features_count = 0;
size_t feature_values_count = 0;
};
struct Result {
std::vector<Tensor> sparse_indices;
std::vector<Tensor> sparse_values;
std::vector<Tensor> sparse_shapes;
std::vector<Tensor> dense_values;
std::vector<Tensor> ragged_values;
std::vector<Tensor> ragged_splits;
std::vector<Tensor> ragged_outer_splits;
std::vector<PerExampleFeatureStats> feature_stats;
};
Status FastParseExample(const FastParseExampleConfig& config,
absl::Span<const tstring> serialized,
absl::Span<const tstring> example_names,
thread::ThreadPool* thread_pool, Result* result);
typedef FastParseExampleConfig FastParseSingleExampleConfig;
Status FastParseSingleExample(const FastParseSingleExampleConfig& config,
StringPiece serialized, Result* result);
Status FastParseSequenceExample(
const example::FastParseExampleConfig& context_config,
const example::FastParseExampleConfig& sequence_config,
absl::Span<const tstring> serialized,
absl::Span<const tstring> example_names, thread::ThreadPool* thread_pool,
example::Result* context_result, example::Result* sequence_result,
std::vector<Tensor>* dense_feature_lengths, bool is_batch = true);
bool TestFastParse(const string& serialized, Example* example);
}
}
#endif
#include "tensorflow/core/util/example_proto_fast_parsing.h"
#include <algorithm>
#include <functional>
#include <optional>
#include <utility>
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/presized_cuckoo_map.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
namespace example {
namespace {
template <typename T>
using SmallVector = gtl::InlinedVector<T, 4>;
template <typename T>
class LimitedArraySlice {
public:
using value_type = T;
LimitedArraySlice(T* begin, size_t num_elements)
: current_(begin), begin_(begin), end_(begin + num_elements) {}
int64_t EndDistance() const { return end_ - current_; }
void push_back(T&& value) {
if (EndDistance() > 0) *current_ = std::move(value);
++current_;
}
T& construct_at_end() {
DCHECK_GT(EndDistance(), 0);
return *(current_++);
}
T& back() { return *(current_ - 1); }
size_t size() const { return std::min(current_ - begin_, end_ - begin_); }
void resize(size_t size) { current_ = begin_ + size; }
T* data() { return begin_; }
private:
T* current_;
T* begin_;
T* end_;
};
template <typename A>
auto EnableAliasing(A* a) -> decltype(a->EnableAliasing(true), void()) {
a->EnableAliasing(true);
}
template <typename A>
void EnableAliasing(A&& a) {}
uint8 PeekTag(protobuf::io::CodedInputStream* stream) {
DCHECK(stream != nullptr);
const void* ptr;
int size;
if (!stream->GetDirectBufferPointer(&ptr, &size)) return 0;
return *static_cast<const uint8*>(ptr);
}
constexpr uint8 kVarintTag(uint32 tag) { return (tag << 3) | 0; }
constexpr uint8 kDelimitedTag(uint32 tag) { return (tag << 3) | 2; }
constexpr uint8 kFixed32Tag(uint32 tag) { return (tag << 3) | 5; }
namespace parsed {
class Feature {
public:
Feature() = default;
explicit Feature(StringPiece serialized) : serialized_(serialized) {}
Status ParseDataType(DataType* dtype) {
DCHECK(dtype != nullptr);
if (serialized_.empty()) {
*dtype = DT_INVALID;
return absl::OkStatus();
}
uint8 oneof_tag = static_cast<uint8>(*serialized_.data());
serialized_.remove_prefix(1);
switch (oneof_tag) {
case kDelimitedTag(1):
*dtype = DT_STRING;
break;
case kDelimitedTag(2):
*dtype = DT_FLOAT;
break;
case kDelimitedTag(3):
*dtype = DT_INT64;
break;
default:
*dtype = DT_INVALID;
return errors::InvalidArgument("Unsupported datatype.");
}
return absl::OkStatus();
}
bool GetNumElementsInBytesList(int* num_elements) {
protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size());
EnableAliasing(&stream);
uint32 length = 0;
if (!stream.ReadVarint32(&length)) return false;
auto limit = stream.PushLimit(length);
*num_elements = 0;
while (!stream.ExpectAtEnd()) {
if (!stream.ExpectTag(kDelimitedTag(1))) return false;
uint32 bytes_length = 0;
if (!stream.ReadVarint32(&bytes_length)) return false;
if (!stream.Skip(bytes_length)) return false;
++*num_elements;
}
stream.PopLimit(limit);
return true;
}
tstring* construct_at_end(LimitedArraySlice<tstring>* bytes_list) {
if (bytes_list->EndDistance() <= 0) {
return nullptr;
}
return &bytes_list->construct_at_end();
}
tstring* construct_at_end(SmallVector<tstring>* bytes_list) {
return &bytes_list->emplace_back();
}
template <typename Result>
bool ParseBytesList(Result* bytes_list) {
DCHECK(bytes_list != nullptr);
protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size());
EnableAliasing(&stream);
uint32 length;
if (!stream.ReadVarint32(&length)) return false;
auto limit = stream.PushLimit(length);
while (!stream.ExpectAtEnd()) {
if (!stream.ExpectTag(kDelimitedTag(1))) return false;
uint32 bytes_length;
if (!stream.ReadVarint32(&bytes_length)) return false;
tstring* bytes = construct_at_end(bytes_list);
if (bytes == nullptr) return false;
bytes->resize_uninitialized(bytes_length);
if (!stream.ReadRaw(bytes->data(), bytes_length)) return false;
}
stream.PopLimit(limit);
return true;
}
template <typename Result>
bool ParseFloatList(Result* float_list) {
DCHECK(float_list != nullptr);
protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size());
EnableAliasing(&stream);
uint32 length;
if (!stream.ReadVarint32(&length)) return false;
auto limit = stream.PushLimit(length);
if (!stream.ExpectAtEnd()) {
uint8 peek_tag = PeekTag(&stream);
if (peek_tag != kDelimitedTag(1) && peek_tag != kFixed32Tag(1)) {
return false;
}
constexpr int32_t kNumFloatBytes = 4;
if (peek_tag == kDelimitedTag(1)) {
if (!stream.ExpectTag(kDelimitedTag(1))) return false;
uint32 packed_length;
if (!stream.ReadVarint32(&packed_length)) return false;
auto packed_limit = stream.PushLimit(packed_length);
const size_t initial_size = float_list->size();
float_list->resize(initial_size + packed_length / kNumFloatBytes);
if (port::kLittleEndian &&
sizeof(typename Result::value_type) == kNumFloatBytes) {
const uint32 bytes_to_copy =
std::min(static_cast<uint32>((float_list->size() - initial_size) *
kNumFloatBytes),
packed_length);
if (!stream.ReadRaw(float_list->data() + initial_size, bytes_to_copy))
return false;
} else {
int64_t index = initial_size;
while (!stream.ExpectAtEnd()) {
uint32 buffer32;
if (!stream.ReadLittleEndian32(&buffer32)) return false;
if (index < float_list->size()) {
float_list->data()[index] = absl::bit_cast<float>(buffer32);
++index;
}
}
}
stream.PopLimit(packed_limit);
} else {
const size_t initial_size = float_list->size();
const int64_t num_elements =
stream.BytesUntilLimit() / (1 + kNumFloatBytes);
float_list->resize(initial_size + num_elements);
int64_t index = initial_size;
while (!stream.ExpectAtEnd()) {
if (!stream.ExpectTag(kFixed32Tag(1))) return false;
uint32 buffer32;
if (!stream.ReadLittleEndian32(&buffer32)) return false;
float_list->data()[index] = absl::bit_cast<float>(buffer32);
++index;
}
}
}
stream.PopLimit(limit);
return true;
}
template <typename Result>
bool ParseInt64List(Result* int64_list) {
DCHECK(int64_list != nullptr);
protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized_.data()), serialized_.size());
EnableAliasing(&stream);
uint32 length;
if (!stream.ReadVarint32(&length)) return false;
auto limit = stream.PushLimit(length);
if (!stream.ExpectAtEnd()) {
uint8 peek_tag = PeekTag(&stream);
if (peek_tag != kDelimitedTag(1) && peek_tag != kVarintTag(1)) {
return false;
}
if (peek_tag == kDelimitedTag(1)) {
if (!stream.ExpectTag(kDelimitedTag(1))) return false;
uint32 packed_length;
if (!stream.ReadVarint32(&packed_length)) return false;
auto packed_limit = stream.PushLimit(packed_length);
while (!stream.ExpectAtEnd()) {
protobuf_uint64 n;
if (!stream.ReadVarint64(&n)) return false;
int64_list->push_back(static_cast<int64_t>(n));
}
stream.PopLimit(packed_limit);
} else {
while (!stream.ExpectAtEnd()) {
if (!stream.ExpectTag(kVarintTag(1))) return false;
protobuf_uint64 n;
if (!stream.ReadVarint64(&n)) return false;
int64_list->push_back(static_cast<int64_t>(n));
}
}
}
stream.PopLimit(limit);
return true;
}
StringPiece GetSerialized() const { return serialized_; }
private:
StringPiece serialized_;
};
using FeatureMapEntry = std::pair<StringPiece, Feature>;
using Example = std::vector<FeatureMapEntry>;
}
inline bool SkipExtraneousTag(protobuf::io::CodedInputStream* stream) {
uint32 data;
protobuf_uint64 dummy;
switch (stream->ReadTag() & 0x7) {
case 0:
if (!stream->ReadVarint32(&data)) return false;
return true;
case 1:
if (!stream->ReadLittleEndian64(&dummy)) return false;
return true;
case 2:
if (!stream->ReadVarint32(&data)) return false;
stream->Skip(data);
return true;
case 3:
return false;
case 4:
return false;
case 5:
if (!stream->ReadLittleEndian32(&data)) return false;
return true;
}
return false;
}
bool ParseString(protobuf::io::CodedInputStream* stream, StringPiece* result) {
DCHECK(stream != nullptr);
DCHECK(result != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
if (length == 0) {
*result = StringPiece(nullptr, 0);
return true;
}
const void* stream_alias;
int stream_size;
if (!stream->GetDirectBufferPointer(&stream_alias, &stream_size)) {
return false;
}
if (static_cast<uint32>(stream_size) < length) return false;
*result = StringPiece(static_cast<const char*>(stream_alias), length);
stream->Skip(length);
return true;
}
bool ParseFeatureMapEntry(protobuf::io::CodedInputStream* stream,
parsed::FeatureMapEntry* feature_map_entry) {
DCHECK(stream != nullptr);
DCHECK(feature_map_entry != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
auto limit = stream->PushLimit(length);
for (int n = 0; n < 2; ++n) {
const uint32_t tag = stream->ReadTag();
switch (tag) {
case kDelimitedTag(1):
if (!ParseString(stream, &feature_map_entry->first)) return false;
break;
case kDelimitedTag(2): {
StringPiece feature_string_piece;
if (!ParseString(stream, &feature_string_piece)) return false;
feature_map_entry->second = parsed::Feature(feature_string_piece);
break;
}
default:
return false;
}
}
if (!stream->ExpectAtEnd()) return false;
stream->PopLimit(limit);
return true;
}
bool ParseFeatures(protobuf::io::CodedInputStream* stream,
parsed::Example* example) {
DCHECK(stream != nullptr);
DCHECK(example != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
auto limit = stream->PushLimit(length);
while (!stream->ExpectAtEnd()) {
parsed::FeatureMapEntry feature_map_entry;
if (!stream->ExpectTag(kDelimitedTag(1))) return false;
if (!ParseFeatureMapEntry(stream, &feature_map_entry)) return false;
example->push_back(std::move(feature_map_entry));
}
stream->PopLimit(limit);
return true;
}
bool ParseExample(protobuf::io::CodedInputStream* stream,
parsed::Example* example) {
DCHECK(stream != nullptr);
DCHECK(example != nullptr);
while (!stream->ExpectAtEnd()) {
if (!stream->ExpectTag(kDelimitedTag(1))) {
if (!SkipExtraneousTag(stream)) return false;
} else {
if (!ParseFeatures(stream, example)) return false;
}
}
return true;
}
bool ParseExample(StringPiece serialized, parsed::Example* example) {
DCHECK(example != nullptr);
protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized.data()), serialized.size());
EnableAliasing(&stream);
return ParseExample(&stream, example);
}
}
bool TestFastParse(const string& serialized, Example* example) {
DCHECK(example != nullptr);
parsed::Example parsed_example;
if (!ParseExample(serialized, &parsed_example)) return false;
auto& features = *example->mutable_features();
size_t parsed_example_size = parsed_example.size();
for (size_t i = 0; i < parsed_example_size; ++i) {
parsed::FeatureMapEntry& name_and_feature =
parsed_example[parsed_example_size - i - 1];
string name(name_and_feature.first);
if ((*features.mutable_feature()).count(name) > 0) continue;
auto& value = (*features.mutable_feature())[name];
DataType dtype;
if (!name_and_feature.second.ParseDataType(&dtype).ok()) return false;
switch (dtype) {
case DT_INVALID:
break;
case DT_STRING: {
SmallVector<tstring> list;
if (!name_and_feature.second.ParseBytesList(&list)) return false;
auto* result_list = value.mutable_bytes_list();
for (auto& bytes : list) {
result_list->add_value(bytes.data(), bytes.size());
}
break;
}
case DT_FLOAT: {
SmallVector<float> list;
if (!name_and_feature.second.ParseFloatList(&list)) return false;
auto* result_list = value.mutable_float_list();
for (float f : list) {
result_list->add_value(f);
}
break;
}
case DT_INT64: {
SmallVector<int64_t> list;
if (!name_and_feature.second.ParseInt64List(&list)) return false;
auto* result_list = value.mutable_int64_list();
for (int64_t i : list) {
result_list->add_value(i);
}
break;
}
default:
LOG(FATAL) << "Should not happen.";
}
}
return true;
}
namespace {
using Config = FastParseExampleConfig;
void ParallelFor(const std::function<void(size_t)>& f, size_t n,
thread::ThreadPool* thread_pool) {
if (n == 0) return;
if (thread_pool == nullptr) {
for (size_t i = 0; i < n; ++i) {
f(i);
}
} else {
BlockingCounter counter(n - 1);
for (size_t i = 1; i < n; ++i) {
thread_pool->Schedule([i, &f, &counter] {
f(i);
counter.DecrementCount();
});
}
f(0);
counter.Wait();
}
}
enum class Type { Dense, Sparse, Ragged };
struct SparseBuffer {
SmallVector<tstring> bytes_list;
SmallVector<float> float_list;
SmallVector<int64_t> int64_list;
std::vector<size_t> example_end_indices;
};
struct SeededHasher {
uint64 operator()(StringPiece s) const {
return Hash64(s.data(), s.size(), seed);
}
uint64 seed{0xDECAFCAFFE};
};
void LogDenseFeatureDataLoss(StringPiece feature_name) {
LOG(WARNING) << "Data loss! Feature '" << feature_name
<< "' is present in multiple concatenated "
"tf.Examples. Ignoring all but last one.";
static auto* duplicated_dense_feature = monitoring::Counter<0>::New(
"/tensorflow/core/util/example_proto_fast_parsing/"
"duplicated_dense_feature",
"Dense feature appears twice in a tf.Example");
duplicated_dense_feature->GetCell()->IncrementBy(1);
}
void LogSparseFeatureDataLoss(StringPiece feature_name) {
LOG(WARNING) << "Data loss! Feature '" << feature_name
<< "' is present in multiple concatenated "
"tf.Examples. Ignoring all but last one.";
static auto* duplicated_sparse_feature = monitoring::Counter<0>::New(
"/tensorflow/core/util/example_proto_fast_parsing/"
"duplicated_sparse_feature",
"Sparse feature appears twice in a tf.Example");
duplicated_sparse_feature->GetCell()->IncrementBy(1);
}
Status FastParseSerializedExample(
const tstring& serialized_example, const tstring& example_name,
const size_t example_index, const Config& config,
const PresizedCuckooMap<std::pair<size_t, Type>>& config_index,
SeededHasher hasher, std::vector<Tensor>* output_dense,
std::vector<SparseBuffer>* output_varlen_dense,
std::vector<SparseBuffer>* output_sparse,
std::vector<SparseBuffer>* output_ragged,
PerExampleFeatureStats* output_stats) {
DCHECK(output_dense != nullptr);
DCHECK(output_sparse != nullptr);
DCHECK(output_ragged != nullptr);
parsed::Example parsed_example;
if (!ParseExample(serialized_example, &parsed_example)) {
return errors::InvalidArgument("Could not parse example input, value: '",
serialized_example, "'");
}
std::vector<int64_t> sparse_feature_last_example(config.sparse.size(), -1);
std::vector<int64_t> dense_feature_last_example(config.dense.size(), -1);
std::vector<int64_t> ragged_feature_last_example(config.ragged.size(), -1);
const size_t parsed_example_size = parsed_example.size();
if (output_stats) {
output_stats->features_count = parsed_example_size;
}
for (size_t i = 0; i < parsed_example_size; ++i) {
parsed::FeatureMapEntry& name_and_feature =
parsed_example[parsed_example_size - i - 1];
const StringPiece feature_name = name_and_feature.first;
parsed::Feature& feature = name_and_feature.second;
std::pair<size_t, Type> d_and_type;
uint64 h = hasher(feature_name);
if (!config_index.Find(h, &d_and_type)) continue;
size_t d = d_and_type.first;
bool is_dense = d_and_type.second == Type::Dense;
bool is_ragged = d_and_type.second == Type::Ragged;
{
const tstring& config_feature_name =
is_dense ? config.dense[d].feature_name
: (is_ragged ? config.ragged[d].feature_name
: config.sparse[d].feature_name);
if (feature_name != config_feature_name) continue;
}
auto example_error = [&](StringPiece suffix) {
return errors::InvalidArgument("Name: ", example_name,
", Key: ", feature_name,
", Index: ", example_index, ". ", suffix);
};
auto parse_error = [&] {
return example_error("Can't parse serialized Example.");
};
DataType example_dtype;
TF_RETURN_IF_ERROR(feature.ParseDataType(&example_dtype));
if (is_dense) {
if (example_dtype == DT_INVALID) continue;
if (dense_feature_last_example[d] == example_index) {
LogDenseFeatureDataLoss(feature_name);
continue;
}
dense_feature_last_example[d] = example_index;
if (example_dtype != config.dense[d].dtype) {
return example_error(strings::StrCat(
"Data types don't match. Data type: ",
DataTypeString(example_dtype),
" but expected type: ", DataTypeString(config.dense[d].dtype)));
}
if (!config.dense[d].variable_length) {
Tensor& out = (*output_dense)[d];
const std::size_t num_elements = config.dense[d].elements_per_stride;
if (output_stats) {
output_stats->feature_values_count += num_elements;
}
const std::size_t offset = example_index * num_elements;
auto shape_error = [&](size_t size, StringPiece type_str) {
return example_error(strings::StrCat(
"Number of ", type_str,
" values != expected. "
"Values size: ",
size,
" but output shape: ", config.dense[d].shape.DebugString()));
};
switch (config.dense[d].dtype) {
case DT_INT64: {
auto out_p = out.flat<int64_t>().data() + offset;
LimitedArraySlice<int64_t> slice(out_p, num_elements);
if (!feature.ParseInt64List(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "int64");
}
break;
}
case DT_FLOAT: {
auto out_p = out.flat<float>().data() + offset;
LimitedArraySlice<float> slice(out_p, num_elements);
if (!feature.ParseFloatList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "float");
}
break;
}
case DT_STRING: {
auto out_p = out.flat<tstring>().data() + offset;
LimitedArraySlice<tstring> slice(out_p, num_elements);
if (!feature.ParseBytesList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "bytes");
}
break;
}
default | #include "tensorflow/core/util/example_proto_fast_parsing.h"
#include <unordered_set>
#include <utility>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/util/example_proto_fast_parsing_test.pb.h"
namespace tensorflow {
namespace example {
namespace {
constexpr char kDenseInt64Key[] = "dense_int64";
constexpr char kDenseFloatKey[] = "dense_float";
constexpr char kDenseStringKey[] = "dense_string";
constexpr char kSparseInt64Key[] = "sparse_int64";
constexpr char kSparseFloatKey[] = "sparse_float";
constexpr char kSparseStringKey[] = "sparse_string";
string SerializedToReadable(string serialized) {
string result;
result += '"';
for (char c : serialized)
result += strings::StrCat("\\x", strings::Hex(c, strings::kZeroPad2));
result += '"';
return result;
}
template <class T>
string Serialize(const T& example) {
string serialized;
example.SerializeToString(&serialized);
return serialized;
}
void TestCorrectness(const string& serialized) {
Example example;
Example fast_example;
EXPECT_TRUE(example.ParseFromString(serialized));
example.DiscardUnknownFields();
EXPECT_TRUE(TestFastParse(serialized, &fast_example));
EXPECT_EQ(example.DebugString(), fast_example.DebugString());
if (example.DebugString() != fast_example.DebugString()) {
LOG(ERROR) << "Bad serialized: " << SerializedToReadable(serialized);
}
}
TEST(FastParse, IgnoresPrecedingUnknownTopLevelFields) {
ExampleWithExtras example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
example.set_extra1("some_str");
example.set_extra2(123);
example.set_extra3(234);
example.set_extra4(345);
example.set_extra5(4.56);
example.add_extra6(5.67);
example.add_extra6(6.78);
(*example.mutable_extra7()->mutable_feature())["extra7"]
.mutable_int64_list()
->add_value(1337);
Example context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, IgnoresTrailingUnknownTopLevelFields) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
ExampleWithExtras context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
context.set_extra1("some_str");
context.set_extra2(123);
context.set_extra3(234);
context.set_extra4(345);
context.set_extra5(4.56);
context.add_extra6(5.67);
context.add_extra6(6.78);
(*context.mutable_extra7()->mutable_feature())["extra7"]
.mutable_int64_list()
->add_value(1337);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, SingleInt64WithContext) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
Example context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, DenseInt64WithContext) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(0);
Example context;
(*context.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(15);
string serialized = Serialize(example) + Serialize(context);
{
Example deserialized;
EXPECT_TRUE(deserialized.ParseFromString(serialized));
EXPECT_EQ(deserialized.DebugString(), context.DebugString());
}
TestCorrectness(serialized);
}
TEST(FastParse, NonPacked) {
TestCorrectness(
"\x0a\x0e\x0a\x0c\x0a\x03\x61\x67\x65\x12\x05\x1a\x03\x0a\x01\x0d");
}
TEST(FastParse, Packed) {
TestCorrectness(
"\x0a\x0d\x0a\x0b\x0a\x03\x61\x67\x65\x12\x04\x1a\x02\x08\x0d");
}
TEST(FastParse, ValueBeforeKeyInMap) {
TestCorrectness("\x0a\x12\x0a\x10\x12\x09\x0a\x07\x0a\x05value\x0a\x03key");
}
TEST(FastParse, EmptyFeatures) {
Example example;
example.mutable_features();
TestCorrectness(Serialize(example));
}
void TestCorrectnessJson(const string& json) {
auto resolver = protobuf::util::NewTypeResolverForDescriptorPool(
"type.googleapis.com", protobuf::DescriptorPool::generated_pool());
string serialized;
auto s = protobuf::util::JsonToBinaryString(
resolver, "type.googleapis.com/tensorflow.Example", json, &serialized);
EXPECT_TRUE(s.ok()) << s;
delete resolver;
TestCorrectness(serialized);
}
TEST(FastParse, JsonUnivalent) {
TestCorrectnessJson(
"{'features': {"
" 'feature': {'age': {'int64_list': {'value': [0]} }}, "
" 'feature': {'flo': {'float_list': {'value': [1.1]} }}, "
" 'feature': {'byt': {'bytes_list': {'value': ['WW8='] }}}"
"}}");
}
TEST(FastParse, JsonMultivalent) {
TestCorrectnessJson(
"{'features': {"
" 'feature': {'age': {'int64_list': {'value': [0, 13, 23]} }}, "
" 'feature': {'flo': {'float_list': {'value': [1.1, 1.2, 1.3]} }}, "
" 'feature': {'byt': {'bytes_list': {'value': ['WW8=', 'WW8K'] }}}"
"}}");
}
TEST(FastParse, SingleInt64) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
TestCorrectness(Serialize(example));
}
static string ExampleWithSomeFeatures() {
Example example;
(*example.mutable_features()->mutable_feature())[""];
(*example.mutable_features()->mutable_feature())["empty_bytes_list"]
.mutable_bytes_list();
(*example.mutable_features()->mutable_feature())["empty_float_list"]
.mutable_float_list();
(*example.mutable_features()->mutable_feature())["empty_int64_list"]
.mutable_int64_list();
BytesList* bytes_list =
(*example.mutable_features()->mutable_feature())["bytes_list"]
.mutable_bytes_list();
bytes_list->add_value("bytes1");
bytes_list->add_value("bytes2");
FloatList* float_list =
(*example.mutable_features()->mutable_feature())["float_list"]
.mutable_float_list();
float_list->add_value(1.0);
float_list->add_value(2.0);
Int64List* int64_list =
(*example.mutable_features()->mutable_feature())["int64_list"]
.mutable_int64_list();
int64_list->add_value(3);
int64_list->add_value(270);
int64_list->add_value(86942);
return Serialize(example);
}
TEST(FastParse, SomeFeatures) { TestCorrectness(ExampleWithSomeFeatures()); }
static void AddDenseFeature(const char* feature_name, DataType dtype,
PartialTensorShape shape, bool variable_length,
size_t elements_per_stride,
FastParseExampleConfig* out_config) {
out_config->dense.emplace_back();
auto& new_feature = out_config->dense.back();
new_feature.feature_name = feature_name;
new_feature.dtype = dtype;
new_feature.shape = std::move(shape);
new_feature.default_value = Tensor(dtype, {});
new_feature.variable_length = variable_length;
new_feature.elements_per_stride = elements_per_stride;
}
static void AddSparseFeature(const char* feature_name, DataType dtype,
FastParseExampleConfig* out_config) {
out_config->sparse.emplace_back();
auto& new_feature = out_config->sparse.back();
new_feature.feature_name = feature_name;
new_feature.dtype = dtype;
}
TEST(FastParse, StatsCollection) {
const size_t kNumExamples = 13;
std::vector<tstring> serialized(kNumExamples, ExampleWithSomeFeatures());
FastParseExampleConfig config_dense;
AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_dense);
AddDenseFeature("float_list", DT_FLOAT, {2}, false, 2, &config_dense);
AddDenseFeature("int64_list", DT_INT64, {3}, false, 3, &config_dense);
config_dense.collect_feature_stats = true;
FastParseExampleConfig config_varlen;
AddDenseFeature("bytes_list", DT_STRING, {-1}, true, 1, &config_varlen);
AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_varlen);
AddDenseFeature("int64_list", DT_INT64, {-1}, true, 1, &config_varlen);
config_varlen.collect_feature_stats = true;
FastParseExampleConfig config_sparse;
AddSparseFeature("bytes_list", DT_STRING, &config_sparse);
AddSparseFeature("float_list", DT_FLOAT, &config_sparse);
AddSparseFeature("int64_list", DT_INT64, &config_sparse);
config_sparse.collect_feature_stats = true;
FastParseExampleConfig config_mixed;
AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_mixed);
AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_mixed);
AddSparseFeature("int64_list", DT_INT64, &config_mixed);
config_mixed.collect_feature_stats = true;
for (const FastParseExampleConfig& config :
{config_dense, config_varlen, config_sparse, config_mixed}) {
{
Result result;
TF_CHECK_OK(FastParseExample(config, serialized, {}, nullptr, &result));
EXPECT_EQ(kNumExamples, result.feature_stats.size());
for (const PerExampleFeatureStats& stats : result.feature_stats) {
EXPECT_EQ(7, stats.features_count);
EXPECT_EQ(7, stats.feature_values_count);
}
}
{
Result result;
TF_CHECK_OK(FastParseSingleExample(config, serialized[0], &result));
EXPECT_EQ(1, result.feature_stats.size());
EXPECT_EQ(7, result.feature_stats[0].features_count);
EXPECT_EQ(7, result.feature_stats[0].feature_values_count);
}
}
}
string RandStr(random::SimplePhilox* rng) {
static const char key_char_lookup[] =
"0123456789{}~`!@#$%^&*()"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
auto len = 1 + rng->Rand32() % 200;
string str;
str.reserve(len);
while (len-- > 0) {
str.push_back(
key_char_lookup[rng->Rand32() % (sizeof(key_char_lookup) /
sizeof(key_char_lookup[0]))]);
}
return str;
}
void Fuzz(random::SimplePhilox* rng) {
auto num_keys = 1 + rng->Rand32() % 100;
std::unordered_set<string> unique_keys;
for (auto i = 0; i < num_keys; ++i) {
unique_keys.emplace(RandStr(rng));
}
Example example;
string serialized_example;
auto num_concats = 1 + rng->Rand32() % 4;
std::vector<Feature::KindCase> feat_types(
{Feature::kBytesList, Feature::kFloatList, Feature::kInt64List});
std::vector<string> all_keys(unique_keys.begin(), unique_keys.end());
while (num_concats--) {
example.Clear();
auto num_active_keys = 1 + rng->Rand32() % all_keys.size();
for (auto i = 0; i < num_active_keys; ++i) {
auto fkey = all_keys[rng->Rand32() % all_keys.size()];
auto ftype_idx = rng->Rand32() % feat_types.size();
auto num_features = 1 + rng->Rand32() % 5;
switch (static_cast<Feature::KindCase>(feat_types[ftype_idx])) {
case Feature::kBytesList: {
BytesList* bytes_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_bytes_list();
while (num_features--) {
bytes_list->add_value(RandStr(rng));
}
break;
}
case Feature::kFloatList: {
FloatList* float_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_float_list();
while (num_features--) {
float_list->add_value(rng->RandFloat());
}
break;
}
case Feature::kInt64List: {
Int64List* int64_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_int64_list();
while (num_features--) {
int64_list->add_value(rng->Rand64());
}
break;
}
default: {
LOG(QFATAL);
break;
}
}
}
serialized_example += example.SerializeAsString();
}
TestCorrectness(serialized_example);
}
TEST(FastParse, FuzzTest) {
const uint64 seed = 1337;
random::PhiloxRandom philox(seed);
random::SimplePhilox rng(&philox);
auto num_runs = 200;
while (num_runs--) {
LOG(INFO) << "runs left: " << num_runs;
Fuzz(&rng);
}
}
TEST(TestFastParseExample, Empty) {
Result result;
FastParseExampleConfig config;
config.sparse.push_back({"test", DT_STRING});
Status status =
FastParseExample(config, absl::Span<const tstring>(),
absl::Span<const tstring>(), nullptr, &result);
EXPECT_TRUE(status.ok()) << status;
}
}
}
} |
945 | cpp | tensorflow/tensorflow | signature_runner | tensorflow/lite/core/signature_runner.cc | tensorflow/lite/core/signature_runner_test.cc | #ifndef TENSORFLOW_LITE_CORE_SIGNATURE_RUNNER_H_
#define TENSORFLOW_LITE_CORE_SIGNATURE_RUNNER_H_
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/internal/signature_def.h"
namespace tflite {
namespace impl {
class Interpreter;
}
class SignatureRunnerHelper;
class SignatureRunnerJNIHelper;
class TensorHandle;
namespace impl {
class SignatureRunner {
public:
const std::string& signature_key() { return signature_def_->signature_key; }
size_t input_size() const { return subgraph_->inputs().size(); }
size_t output_size() const { return subgraph_->outputs().size(); }
const std::vector<const char*>& input_names() { return input_names_; }
const std::vector<const char*>& output_names() { return output_names_; }
TfLiteTensor* input_tensor(const char* input_name);
const TfLiteTensor* output_tensor(const char* output_name) const;
TfLiteStatus ResizeInputTensor(const char* input_name,
const std::vector<int>& new_size);
TfLiteStatus ResizeInputTensorStrict(const char* input_name,
const std::vector<int>& new_size);
TfLiteStatus AllocateTensors() { return subgraph_->AllocateTensors(); }
TfLiteStatus Invoke();
TfLiteStatus Cancel() { return subgraph_->Cancel(); }
TfLiteStatus SetCustomAllocationForInputTensor(
const char* input_name, const TfLiteCustomAllocation& allocation,
int64_t flags = kTfLiteCustomAllocationFlagsNone);
TfLiteStatus SetCustomAllocationForOutputTensor(
const char* output_name, const TfLiteCustomAllocation& allocation,
int64_t flags = kTfLiteCustomAllocationFlagsNone);
void SetAllowBufferHandleOutput(bool allow_buffer_handle_output) {
allow_buffer_handle_output_ = allow_buffer_handle_output;
}
private:
SignatureRunner(const internal::SignatureDef* signature_def,
Subgraph* subgraph);
friend class ::tflite::impl::Interpreter;
friend class ::tflite::SignatureRunnerHelper;
friend class ::tflite::SignatureRunnerJNIHelper;
friend class ::tflite::TensorHandle;
const internal::SignatureDef* signature_def_;
Subgraph* subgraph_;
std::vector<const char*> input_names_;
std::vector<const char*> output_names_;
bool allow_buffer_handle_output_ = false;
};
}
}
#endif
#include "tensorflow/lite/core/signature_runner.h"
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
namespace tflite {
namespace impl {
SignatureRunner::SignatureRunner(const internal::SignatureDef* signature_def,
Subgraph* subgraph)
: signature_def_(signature_def), subgraph_(subgraph) {
for (const auto& it : signature_def_->inputs) {
input_names_.push_back(it.first.c_str());
}
for (const auto& it : signature_def_->outputs) {
output_names_.push_back(it.first.c_str());
}
}
TfLiteTensor* SignatureRunner::input_tensor(const char* input_name) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
const TfLiteTensor* SignatureRunner::output_tensor(
const char* output_name) const {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return nullptr;
}
return subgraph_->tensor(it->second);
}
TfLiteStatus SignatureRunner::ResizeInputTensor(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensor(it->second, new_size);
}
TfLiteStatus SignatureRunner::ResizeInputTensorStrict(
const char* input_name, const std::vector<int>& new_size) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->ResizeInputTensorStrict(it->second, new_size);
}
TfLiteStatus SignatureRunner::Invoke() {
if (subgraph_->continue_invocation_)
(void)subgraph_->continue_invocation_->test_and_set();
TF_LITE_ENSURE_STATUS(subgraph_->Invoke());
if (!allow_buffer_handle_output_) {
for (int tensor_index : subgraph_->outputs()) {
TF_LITE_ENSURE_STATUS(
subgraph_->EnsureTensorDataIsReadable(tensor_index));
}
}
return kTfLiteOk;
}
TfLiteStatus SignatureRunner::SetCustomAllocationForInputTensor(
const char* input_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->inputs.find(input_name);
if (it == signature_def_->inputs.end()) {
subgraph_->ReportError("Input name %s was not found", input_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
TfLiteStatus SignatureRunner::SetCustomAllocationForOutputTensor(
const char* output_name, const TfLiteCustomAllocation& allocation,
int64_t flags) {
const auto& it = signature_def_->outputs.find(output_name);
if (it == signature_def_->outputs.end()) {
subgraph_->ReportError("Output name %s was not found", output_name);
return kTfLiteError;
}
return subgraph_->SetCustomAllocationForTensor(it->second, allocation, flags);
}
}
} | #include "tensorflow/lite/core/signature_runner.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace impl {
namespace {
TEST(SignatureRunnerTest, TestMultiSignatures) {
TestErrorReporter reporter;
auto model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/multi_signatures.bin", &reporter);
ASSERT_TRUE(model);
ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
std::vector<const std::string*> signature_defs =
interpreter->signature_keys();
ASSERT_EQ(signature_defs.size(), 2);
ASSERT_EQ(*(signature_defs[0]), "add");
ASSERT_EQ(*(signature_defs[1]), "sub");
ASSERT_EQ(interpreter->GetSignatureRunner("dummy"), nullptr);
SignatureRunner* add_runner =
interpreter->GetSignatureRunner(signature_defs[0]->c_str());
ASSERT_NE(add_runner, nullptr);
ASSERT_EQ(add_runner->signature_key(), "add");
const std::vector<const char*>& input_names = add_runner->input_names();
const std::vector<const char*>& output_names = add_runner->output_names();
ASSERT_EQ(input_names.size(), 1);
ASSERT_EQ(std::string(input_names[0]), "x");
ASSERT_EQ(output_names.size(), 1);
ASSERT_EQ(std::string(output_names[0]), "output_0");
ASSERT_EQ(add_runner->ResizeInputTensor("x", {2}), kTfLiteOk);
ASSERT_EQ(add_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* add_input = add_runner->input_tensor("x");
ASSERT_EQ(add_runner->input_tensor("dummy"), nullptr);
const TfLiteTensor* add_output = add_runner->output_tensor("output_0");
ASSERT_EQ(add_runner->output_tensor("dummy"), nullptr);
ASSERT_NE(add_input, nullptr);
ASSERT_NE(add_output, nullptr);
add_input->data.f[0] = 2;
add_input->data.f[1] = 4;
ASSERT_EQ(add_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(add_output->data.f[0], 4);
ASSERT_EQ(add_output->data.f[1], 6);
SignatureRunner* sub_runner = interpreter->GetSignatureRunner("sub");
ASSERT_NE(sub_runner, nullptr);
ASSERT_EQ(sub_runner->signature_key(), "sub");
const std::vector<const char*>& input_names2 = sub_runner->input_names();
const std::vector<const char*>& output_names2 = sub_runner->output_names();
ASSERT_EQ(input_names2.size(), 1);
ASSERT_EQ(std::string(input_names2[0]), "x");
ASSERT_EQ(output_names2.size(), 1);
ASSERT_EQ(std::string(output_names2[0]), "output_0");
ASSERT_EQ(sub_runner->ResizeInputTensor("x", {3}), kTfLiteOk);
ASSERT_EQ(sub_runner->AllocateTensors(), kTfLiteOk);
TfLiteTensor* sub_input = sub_runner->input_tensor("x");
const TfLiteTensor* sub_output = sub_runner->output_tensor("output_0");
ASSERT_NE(sub_input, nullptr);
ASSERT_NE(sub_output, nullptr);
sub_input->data.f[0] = 2;
sub_input->data.f[1] = 4;
sub_input->data.f[2] = 6;
ASSERT_EQ(sub_runner->Invoke(), kTfLiteOk);
ASSERT_EQ(sub_output->data.f[0], -1);
ASSERT_EQ(sub_output->data.f[1], 1);
ASSERT_EQ(sub_output->data.f[2], 3);
}
}
}
} |
946 | cpp | tensorflow/tensorflow | model_builder | tensorflow/lite/delegates/gpu/common/model_builder.cc | tensorflow/lite/delegates/gpu/common/model_builder_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_BUILDER_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_BUILDER_H_
#include <limits>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
TfLiteIntArray* GetOpsToReplace(
TfLiteContext* context, bool allow_quant_ops = false,
int max_delegated_partitions = 1,
const absl::flat_hash_set<TfLiteBuiltinOperator>* excluded_ops = nullptr,
int start_node_index = 0,
int end_node_index = std::numeric_limits<int>::max());
absl::Status BuildModel(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph,
absl::flat_hash_map<int, int>* quant_conversion_map = nullptr);
absl::Status BuildModelEnforceIO(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
const std::vector<int>& input_ids, const std::vector<int>& output_ids,
GraphFloat32* graph,
absl::flat_hash_map<int, int>* quant_conversion_map = nullptr);
absl::Status BuildFinalModel(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph,
absl::flat_hash_map<int, int>* quant_conversion_map = nullptr);
absl::Status BuildFromFlatBuffer(const FlatBufferModel& flatbuffer,
const OpResolver& op_resolver,
GraphFloat32* graph,
bool allow_quant_ops = false);
absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
TensorRef<BHWC>* tensor_ref);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/model_builder.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/common/custom_parsers.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/lstm_parser.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_internal.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/object_reader.h"
#include "tensorflow/lite/delegates/gpu/common/operation_parser.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/model_transformations.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace gpu {
namespace {
absl::Status GetFullyConnectedAttributes(int weights_tensor_id,
int bias_tensor_id,
ObjectReader* reader,
FullyConnectedAttributes* attr) {
Tensor<HW, DataType::FLOAT32> weights;
RETURN_IF_ERROR(reader->ReadTensor(weights_tensor_id, &weights));
attr->weights.data = std::move(weights.data);
attr->weights.id = weights.id;
attr->weights.shape.h = 1;
attr->weights.shape.w = 1;
attr->weights.shape.o = weights.shape.h;
attr->weights.shape.i = weights.shape.w;
reader->ReadTensor(bias_tensor_id, &attr->bias).IgnoreError();
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveBuiltinData(const TfLiteNode* tflite_node,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(tflite_node->builtin_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve builtin_data.");
}
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveCustomInitialData(const TfLiteNode* tflite_node,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(tflite_node->custom_initial_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve custom_initial_data.");
}
return absl::OkStatus();
}
absl::Status NewConstNode(TensorFloat32 t, GraphFloat32* graph, Value** value) {
ConstTensorAttributes attr;
attr.tensor = std::move(t);
Node* node = graph->NewNode();
node->operation.attributes = attr;
node->operation.type = ToString(OperationType::CONSTANT);
*value = graph->NewValue();
RETURN_IF_ERROR(graph->SetProducer(node->id, (*value)->id));
(*value)->tensor.ref = attr.tensor.id;
(*value)->tensor.type = attr.tensor.kType;
(*value)->tensor.shape = attr.tensor.shape;
return absl::OkStatus();
}
template <DataType DataTypeT, typename T>
absl::Status ParseInputsWithConstTensorImpl(
Node* node, ObjectReader* reader,
TensorOrScalarBase<DataTypeT, T>* tensor_or_scalar) {
const std::string& opname = node->operation.type;
const TfLiteTensor* input0 = reader->GetInputTensor(0);
if (!input0) {
return absl::InvalidArgumentError("Couldn't get the 1st input tensor for " +
opname);
}
const TfLiteTensor* input1 = reader->GetInputTensor(1);
if (!input1) {
return absl::InvalidArgumentError("Couldn't get the 2nd input tensor for " +
opname);
}
const bool constant_tensor0 = IsConstantTensor(input0);
const bool constant_tensor1 = IsConstantTensor(input1);
if (constant_tensor0 && constant_tensor1) {
return absl::InvalidArgumentError("No runtime input tensors for " + opname);
}
const bool runtime_tensor0 = !constant_tensor0;
const bool runtime_tensor1 = !constant_tensor1;
if (runtime_tensor0 && runtime_tensor1) {
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
} else {
int runtime_tensor = 0;
int constant_tensor = 1;
TfLiteIntArray* constant_dims = input1->dims;
if (constant_tensor0 && runtime_tensor1) {
runtime_tensor = 1;
constant_tensor = 0;
constant_dims = input0->dims;
}
RETURN_IF_ERROR(reader->AddInput(node, runtime_tensor));
if (constant_dims->size <= 0 || NumElements(constant_dims) == 1) {
Tensor<Scalar, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
*tensor_or_scalar = static_cast<T>(tensor.data[0]);
} else {
if (CheckIfLinearConvertible(constant_dims).ok()) {
Tensor<Linear, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
*tensor_or_scalar = std::move(tensor);
} else if (constant_dims->size == 2) {
Tensor<HW, DataTypeT> tensor_hw;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor_hw));
Tensor<HWC, DataTypeT> tensor;
tensor.id = tensor_hw.id;
tensor.shape = HWC(1, tensor_hw.shape.h, tensor_hw.shape.w);
tensor.data = tensor_hw.data;
*tensor_or_scalar = std::move(tensor);
} else {
Tensor<HWC, DataTypeT> tensor;
RETURN_IF_ERROR(reader->ReadTensor(constant_tensor, &tensor));
if (tensor.data.size() == 1) {
*tensor_or_scalar = static_cast<T>(tensor.data[0]);
} else {
*tensor_or_scalar = std::move(tensor);
}
}
}
}
return absl::OkStatus();
}
absl::Status ParseInputsWithConstTensor(Node* node, ObjectReader* reader,
const TfLiteTensor* input0) {
switch (input0->type) {
case kTfLiteBool: {
ElementwiseAttributesBase<DataType::BOOL, bool> attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
case kTfLiteInt32: {
ElementwiseAttributesBase<DataType::INT32, int32_t> attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
default: {
ElementwiseAttributes attr;
RETURN_IF_ERROR(
ParseInputsWithConstTensorImpl(node, reader, &attr.param));
attr.runtime_tensor_is_second =
IsConstantTensor(reader->GetInputTensor(0));
node->operation.attributes = std::move(attr);
return absl::OkStatus();
}
}
}
absl::Status MaybeFuseActivationForElementwiseNode(
OperationType operation_type, const TfLiteNode* tflite_node,
GraphFloat32* graph, Node* node) {
TfLiteFusedActivation activation = kTfLiteActNone;
switch (operation_type) {
case OperationType::MUL: {
const TfLiteMulParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::ADD: {
const TfLiteAddParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::SUB: {
const TfLiteSubParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
case OperationType::DIV: {
const TfLiteDivParams* tf_options;
if (RetrieveBuiltinData(tflite_node, &tf_options).ok()) {
activation = tf_options->activation;
}
break;
}
default:
activation = kTfLiteActNone;
}
if (activation) {
return MaybeFuseActivation(activation, graph, node);
}
return absl::OkStatus();
}
struct TensorInfo {
std::vector<std::pair<TfLiteNode*, TfLiteRegistration*>> producers;
std::vector<std::pair<TfLiteNode*, TfLiteRegistration*>> consumers;
};
absl::Status GetTensorInfo(const TfLiteContext* context, int tensor_id,
TensorInfo* result) {
TfLiteIntArray* execution_plan = nullptr;
if (context->GetExecutionPlan(const_cast<TfLiteContext*>(context),
&execution_plan) != kTfLiteOk) {
return absl::UnavailableError("Unable to get graph execution plan.");
}
for (int i = 0; i < execution_plan->size; ++i) {
const int node_index = execution_plan->data[i];
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
if (context->GetNodeAndRegistration(const_cast<TfLiteContext*>(context),
node_index, &node,
®istration) != kTfLiteOk) {
return absl::UnavailableError(
"Unable to get node and registration for node.");
}
for (int j = 0; j < node->inputs->size; ++j) {
if (tensor_id == node->inputs->data[j]) {
result->consumers.push_back({node, registration});
}
}
for (int j = 0; j < node->outputs->size; ++j) {
if (tensor_id == node->outputs->data[j]) {
result->producers.push_back({node, registration});
}
}
}
return absl::OkStatus();
}
bool IsLogicalCode(int32_t builtin_code) {
return builtin_code == kTfLiteBuiltinGreater ||
builtin_code == kTfLiteBuiltinGreaterEqual ||
builtin_code == kTfLiteBuiltinLess ||
builtin_code == kTfLiteBuiltinLessEqual ||
builtin_code == kTfLiteBuiltinEqual ||
builtin_code == kTfLiteBuiltinNotEqual;
}
bool IsLogicalOp(tflite::gpu::OperationType op_type) {
return op_type == tflite::gpu::OperationType::GREATER ||
op_type == tflite::gpu::OperationType::GREATER_EQUAL ||
op_type == tflite::gpu::OperationType::LESS ||
op_type == tflite::gpu::OperationType::LESS_EQUAL ||
op_type == tflite::gpu::OperationType::EQUAL ||
op_type == tflite::gpu::OperationType::NOT_EQUAL;
}
class BatchedMatMulOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
if (reader->GetNumberOfRuntimeInputs() == 2) {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::BATCHED_MATMUL);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
} else if (reader->GetNumberOfRuntimeInputs() == 1) {
const TfLiteTensor* second_input = reader->GetInputTensor(1);
if (!IsConstantTensor(second_input) || second_input->dims->size != 2) {
return absl::UnavailableError("Not supported batched mat mul case");
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
Tensor<HW, DataType::FLOAT32> weights;
RETURN_IF_ERROR(reader->ReadTensor(1, &weights));
Convolution2DAttributes attr;
attr.weights.data.resize(weights.shape.w * weights.shape.h);
for (int i = 0; i < weights.shape.w; ++i) {
for (int j = 0; j < weights.shape.h; ++j) {
attr.weights.data[i * weights.shape.h + j] =
weights.data[j * weights.shape.w + i];
}
}
attr.weights.id = weights.id;
attr.weights.shape.h = 1;
attr.weights.shape.w = 1;
attr.weights.shape.o = weights.shape.w;
attr.weights.shape.i = weights.shape.h;
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.padding.prepended = HW(0, 0);
node->operation.attributes = std::move(attr);
return absl::OkStatus();
} else {
return absl::UnavailableError("Not supported batched mat mul case");
}
return absl::OkStatus();
}
};
class CastOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
TfLiteType src_type = context->tensors[tflite_node->inputs->data[0]].type;
TfLiteType dst_type = context->tensors[tflite_node->outputs->data[0]].type;
if (src_type == kTfLiteBool &&
(dst_type == kTfLiteFloat16 || dst_type == kTfLiteFloat32)) {
TensorInfo input_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->inputs->data[0],
&input_tensor_info));
if (input_tensor_info.producers.size() != 1 ||
input_tensor_info.consumers.size() != 1) {
return absl::UnavailableError("Not supported cast case");
}
TensorInfo output_tensor_info;
RETURN_IF_ERROR(GetTensorInfo(context, tflite_node->outputs->data[0],
&output_tensor_info));
if (output_tensor_info.consumers.size() != 1) {
return absl::UnavailableError(
"Cast from bool not supported for outputs");
}
if (IsLogicalCode(input_tensor_info.producers[0].second->builtin_code)) {
return absl::OkStatus();
}
}
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CAST);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
return absl::OkStatus();
}
};
class ClampOperationsParser : public TFLiteOperationParser {
public:
explicit ClampOperationsParser(float clamp_a, float clamp_b)
: clamp_a_(clamp_a), clamp_b_(clamp_b) {}
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
return absl::OkStatus();
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
Node* node_sub = graph->NewNode();
Node* node_relu = graph->NewNode();
Node* node_add = graph->NewNode();
ElementwiseAttributes sub_attr;
sub_attr.param = -clamp_a_;
node_sub->operation.type = ToString(OperationType::ADD);
node_sub->operation.attributes = std::move(sub_attr);
ReLUAttributes relu_attr;
relu_attr.alpha = 0.0f;
relu_attr.activation_max = clamp_b_ - clamp_a_;
node_relu->operation.type = ToString(OperationType::RELU);
node_relu->operation.attributes = relu_attr;
ElementwiseAttributes add_attr;
add_attr.param = clamp_a_;
node_add->operation.type = ToString(OperationType::ADD);
node_add->operation.attributes = std::move(add_attr);
RETURN_IF_ERROR(reader->AddInput(node_sub, 0));
auto input = graph->FindInputs(node_sub->id)[0];
Value* v0 = graph->NewValue();
Value* v1 = graph->NewValue();
v0->tensor.type = input->tensor.type;
v0->tensor.shape = input->tensor.shape;
v1->tensor.type = input->tensor.type;
v1->tensor.shape = input->tensor.shape;
RETURN_IF_ERROR(graph->SetProducer(node_sub->id, v0->id));
RETURN_IF_ERROR(graph->AddConsumer(node_relu->id, v0->id));
RETURN_IF_ERROR(graph->SetProducer(node_relu->id, v1->id));
RETURN_IF_ERROR(graph->AddConsumer(node_add->id, v1->id));
RETURN_IF_ERROR(reader->AddOutputs(node_add));
return absl::OkStatus();
}
private:
const float clamp_a_, clamp_b_;
};
class ConcatenationOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
ConcatAttributes attr;
std::vector<const Value*> inputs;
for (uint32_t idx = 0; idx < tflite_node->inputs->size; ++idx) {
Value* value;
const auto status = reader->ReadValue(idx, &value);
if (status.ok()) {
inputs.push_back(value);
} else {
TensorFloat32 tensor;
RETURN_IF_ERROR(reader->ReadTensor(idx, &tensor));
Value* value;
RETURN_IF_ERROR(NewConstNode(std::move(tensor), graph, &value));
inputs.push_back(value);
}
}
for (int i = 0; i < inputs.size(); ++i) {
for (int j = 0; j < i; ++j) {
if (inputs[i] == inputs[j]) {
Node* node_copy = graph->NewNode();
node_copy->operation.type = ToString(OperationType::COPY);
RETURN_IF_ERROR(graph->AddConsumer(node_copy->id, inputs[j]->id));
Value* copy_value = graph->NewValue();
copy_value->tensor.type = inputs[j]->tensor.type;
copy_value->tensor.shape = inputs[j]->tensor.shape;
RETURN_IF_ERROR(graph->SetProducer(node_copy->id, copy_value->id));
inputs[i] = copy_value;
break;
}
}
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONCAT);
RETURN_IF_ERROR(reader->AddOutputs(node));
for (int i = 0; i < inputs.size(); ++i) {
RETURN_IF_ERROR(graph->AddConsumer(node->id, inputs[i]->id));
}
std::vector<BHWC> input_shapes;
for (auto input : graph->FindInputs(node->id)) {
input_shapes.push_back(input->tensor.shape);
}
RETURN_IF_ERROR(SetAxis(input_shapes, &attr.axis));
BHWC output_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
for (auto input : graph->FindInputs(node->id)) {
if (input->tensor.shape.h != output_shape.h) {
attr.axis = Axis::HEIGHT;
break;
}
if (input->tensor.shape.w != output_shape.w) {
attr.axis = Axis::WIDTH;
break;
}
if (input->tensor.shape.c != output_shape.c) {
attr.axis = Axis::CHANNELS;
break;
}
}
const TfLiteConcatenationParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
node->operation.attributes = attr;
return absl::OkStatus();
}
private:
absl::Status SetAxis(const std::vector<BHWC>& input_shapes, Axis* axis) {
*axis = Axis::BATCH;
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].h != input_shapes[i].h &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::HEIGHT;
break;
}
}
if (*axis == Axis::BATCH) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::WIDTH;
break;
}
}
if (*axis == Axis::HEIGHT) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].h != input_shapes[i].h &&
input_shapes[0].c != input_shapes[i].c) {
*axis = Axis::CHANNELS;
break;
}
}
if (*axis == Axis::WIDTH) return absl::OkStatus();
for (int i = 1; i < input_shapes.size(); i++) {
if (input_shapes[0].b != input_shapes[i].b &&
input_shapes[0].w != input_shapes[i].w &&
input_shapes[0].h != input_shapes[i].h) {
return absl::UnimplementedError(
"Can concatenate tensors only by batch, height, width, or "
"channels.");
}
}
return absl::OkStatus();
}
};
class Conv2DOperationParser : public TFLiteOperationParser {
public:
absl::Status IsSupported(const TfLiteContext* context,
const TfLiteNode* tflite_node,
const TfLiteRegistration* registration) final {
RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 6));
return CheckGpuDelegateCompatibility(context, tflite_node, registration);
}
absl::Status Parse(const TfLiteNode* tflite_node,
const TfLiteRegistration* registration,
GraphFloat32* graph, ObjectReader* reader) final {
const TfLiteConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
Convolution2DAttributes attr;
RETURN_IF_ERROR(ReadAttributes(tflite_node, tf_options, reader, &attr));
const int runtime_inputs = reader->GetNumberOfRuntimeInputs();
if (runtime_inputs == 2) {
const TfLiteTensor* src_tensor = reader->GetInputTensor(0);
const TfLiteTensor* weights_tensor = reader->GetInputTensor(1);
BHWC src_shape, weights_shape;
RETURN_IF_ERROR(ExtractTensorShape(*src_tensor, &src_shape));
RETURN_IF_ERROR(ExtractTensorShape(*weights_tensor, &weights_shape));
if (src_shape.c != weights_shape.c) {
return absl::InternalError(
"No support of CONVOLUTION_2D with runtime grouped weights.");
}
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::CONVOLUTION_2D);
node->operation.attributes = std::move(attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddInput(node, 1));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
} else {
BHWC src_shape, dst_shape;
RETURN_IF_ERROR(
ExtractTensorShape(*reader->GetInputTensor(0), &src_shape));
RETURN_IF_ERROR(
ExtractTensorShape(*reader->GetOutputTensor(0), &dst_shape));
const int src_group_size = attr.weights.shape.i;
if (attr.weights.shape.i == 1 && src_shape.c == dst_shape.c) {
DepthwiseConvolution2DAttributes dw_attr;
dw_attr.weights.id = attr.weights.id;
dw_attr.weights.shape =
OHWI(attr.weights.shape.i, attr.weights.shape.h,
attr.weights.shape.w, attr.weights.shape.o);
dw_attr.weights.data.resize(dw_attr.weights.shape.DimensionsProduct());
for (int o = 0; o < dw_attr.weights.shape.o; ++o) {
for (int h = 0; h < dw_attr.weights.shape.h; ++h) {
for (int w = 0; w < dw_attr.weights.shape.w; ++w) {
for (int i = 0; i < dw_attr.weights.shape.i; ++i) {
dw_attr.weights
.data[dw_attr.weights.shape.LinearIndex({o, h, w, i})] =
attr.weights
.data[attr.weights.shape.LinearIndex({i, h, w, o})];
}
}
}
}
dw_attr.bias = attr.bias;
dw_attr.strides = attr.strides;
dw_attr.dilations = attr.dilations;
dw_attr.padding = attr.padding;
Node* node = graph->NewNode();
node->operation.type = ToString(OperationType::DEPTHWISE_CONVOLUTION);
node->operation.attributes = std::move(dw_attr);
RETURN_IF_ERROR(reader->AddInput(node, 0));
RETURN_IF_ERROR(reader->AddOutputs(node));
RETURN_IF_ERROR(
MaybeFuseActivation(tf_options->activation, graph, node));
return absl::OkStatus();
}
const int dst_group_size = attr.weights.shape.o / attr.groups;
const bool supported_grouped_conv =
src_group_size % 4 == 0 && dst_group_size % 4 == 0;
if (attr.groups != 1 && !supported_grouped_conv) {
return ResolveGroupedConvolution(attr, tf_options, reader, graph);
} else {
Node* node = graph->N | #include "tensorflow/lite/delegates/gpu/common/model_builder.h"
#include <stddef.h>
#include <stdint.h>
#include <cstdlib>
#include <cstring>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_internal.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace gpu {
namespace {
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank0) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(1);
tflite_tensor.dims->data[0] = 4;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::FLOAT32);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 1, 1));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank1) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteInt32;
tflite_tensor.dims = TfLiteIntArrayCreate(2);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::INT32);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 1, 5));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank2) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteInt64;
tflite_tensor.dims = TfLiteIntArrayCreate(3);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
tflite_tensor.dims->data[2] = 6;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::INT64);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 1, 5, 6));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefSucceedsForRank3) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteUInt8;
tflite_tensor.dims = TfLiteIntArrayCreate(4);
tflite_tensor.dims->data[0] = 4;
tflite_tensor.dims->data[1] = 5;
tflite_tensor.dims->data[2] = 6;
tflite_tensor.dims->data[3] = 7;
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
ASSERT_TRUE(status.ok());
EXPECT_EQ(tensor_ref.type, DataType::UINT8);
EXPECT_EQ(tensor_ref.shape, BHWC(4, 5, 6, 7));
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefFailsForRankLT0) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(0);
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
EXPECT_FALSE(status.ok());
}
TEST(ModelBuilderTest, ConvertTfLiteTensorToTensorRefFailsForRankGT3) {
TfLiteTensor tflite_tensor;
tflite_tensor.name = "tensor_name";
tflite_tensor.type = TfLiteType::kTfLiteFloat32;
tflite_tensor.dims = TfLiteIntArrayCreate(5);
TensorRef<BHWC> tensor_ref;
const auto status =
ConvertTfLiteTensorToTensorRef(tflite_tensor, &tensor_ref);
TfLiteIntArrayFree(tflite_tensor.dims);
EXPECT_FALSE(status.ok());
}
class DelegatedInterpreter {
public:
explicit DelegatedInterpreter(int num_nodes) {
exec_plan_ = TfLiteIntArrayCreate(num_nodes);
}
virtual ~DelegatedInterpreter() {
TfLiteIntArrayFree(exec_plan_);
for (auto params : delegate_params_) {
TfLiteIntArrayFree(params.nodes_to_replace);
TfLiteIntArrayFree(params.input_tensors);
TfLiteIntArrayFree(params.output_tensors);
}
}
TfLiteContext* context() { return interpreter_.primary_subgraph().context(); }
TfLiteNode* node(int index) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration =
interpreter_.primary_subgraph().node_and_registration(index);
return const_cast<TfLiteNode*>(&node_and_registration->first);
}
TfLiteRegistration* registration(int index) {
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration =
interpreter_.primary_subgraph().node_and_registration(index);
return const_cast<TfLiteRegistration*>(&node_and_registration->second);
}
TfLiteIntArray* exec_plan() {
const int num_nodes = exec_plan_->size;
TfLiteIntArray* new_array = TfLiteIntArrayCreate(num_nodes);
std::memcpy(new_array->data, exec_plan_->data, num_nodes * sizeof(int32_t));
TfLiteIntArrayFree(exec_plan_);
exec_plan_ = new_array;
return exec_plan_;
}
TfLiteDelegateParams* add_delegate_params() {
delegate_params_.push_back(TfLiteDelegateParams());
return &delegate_params_.back();
}
TfLiteDelegateParams* delegate_params() { return &delegate_params_.front(); }
int num_delegate_params() { return delegate_params_.size(); }
protected:
Interpreter interpreter_;
private:
TfLiteIntArray* exec_plan_ = nullptr;
std::vector<TfLiteDelegateParams> delegate_params_;
};
class InterpreterFp16 : public DelegatedInterpreter {
public:
explicit InterpreterFp16(TfLiteBuiltinOperator op,
bool const_dequantize_inputs = true)
: DelegatedInterpreter(3) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(5), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 1}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({4}), kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {
nullptr, nullptr, nullptr, nullptr, nullptr, kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const TfLiteRegistration reg_dequant1 = {
nullptr, nullptr, nullptr, nullptr, nullptr, kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{2}, {3}, nullptr,
0, nullptr,
®_dequant1),
kTfLiteOk);
const TfLiteRegistration reg_op0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
op};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 3}, {4}, nullptr,
0,
builtin_data,
®_op0),
kTfLiteOk);
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteFloat16, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat16, "t2", dims, quantization, false),
kTfLiteOk);
if (const_dequantize_inputs) {
auto* tensor0 = interpreter_.tensor(0);
auto* tensor2 = interpreter_.tensor(2);
tensor0->allocation_type = kTfLiteMmapRo;
tensor2->allocation_type = kTfLiteMmapRo;
}
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
}
};
InterpreterFp16* interpreter_fp16_add_op =
new InterpreterFp16(kTfLiteBuiltinAdd);
TEST(ModelBuilderTest, GetOpsToReplaceAcceptsFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_add_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_add_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_add_op->node(node_index);
*registration = interpreter_fp16_add_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_add_op->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array = interpreter_fp16_add_op->delegate_params();
*num_partitions = interpreter_fp16_add_op->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 3);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, 2, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat16);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat16);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_non_constant =
new InterpreterFp16(kTfLiteBuiltinAdd, false);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsNonConstantFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_non_constant->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_non_constant->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_non_constant->node(node_index);
*registration = interpreter_fp16_non_constant->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 1);
auto params = interpreter_fp16_non_constant->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 2;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 3;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 4;
*partition_params_array =
interpreter_fp16_non_constant->delegate_params();
*num_partitions = interpreter_fp16_non_constant->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
context->GetNodeAndRegistration(context, ops_to_replace->data[0], &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
InterpreterFp16* interpreter_fp16_gt_op =
new InterpreterFp16(kTfLiteBuiltinGreater);
TEST(ModelBuilderTest, GetOpsToReplaceRejectsFp16DequantizeNodes) {
TfLiteContext* context = interpreter_fp16_gt_op->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp16_gt_op->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp16_gt_op->node(node_index);
*registration = interpreter_fp16_gt_op->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
EXPECT_EQ(nodes_to_replace->size, 0);
*partition_params_array = nullptr;
*num_partitions = 0;
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 0);
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
const int kGreaterOpIndex = 2;
context->GetNodeAndRegistration(context, kGreaterOpIndex, &node,
®istration);
EXPECT_EQ(context->tensors[node->inputs->data[0]].type,
TfLiteType::kTfLiteFloat32);
EXPECT_EQ(context->tensors[node->inputs->data[1]].type,
TfLiteType::kTfLiteFloat32);
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterFp32 : public DelegatedInterpreter {
public:
InterpreterFp32() : DelegatedInterpreter(2) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(4), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 2}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({3}), kTfLiteOk);
const TfLiteRegistration reg_dequant0 = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant0),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {3}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat32, "t2", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
}
};
InterpreterFp32* interpreter_fp32 = new InterpreterFp32();
TEST(ModelBuilderTest, GetOpsToReplaceDoesNotPruneUint8) {
TfLiteContext* context = interpreter_fp32->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter_fp32->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter_fp32->node(node_index);
*registration = interpreter_fp32->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
auto params = interpreter_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 1;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 2;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 3;
*partition_params_array = interpreter_fp32->delegate_params();
*num_partitions = interpreter_fp32->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(context);
EXPECT_EQ(ops_to_replace->size, 1);
EXPECT_EQ(1, ops_to_replace->data[0]);
TfLiteIntArrayFree(ops_to_replace);
}
class Interpreter2Fp32 : public DelegatedInterpreter {
public:
Interpreter2Fp32() : DelegatedInterpreter(4) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(8), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 2, 4, 6}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({7}), kTfLiteOk);
const TfLiteRegistration reg_dequant = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinDequantize};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{0}, {1}, nullptr,
0, nullptr,
®_dequant),
kTfLiteOk);
const TfLiteRegistration reg_add0 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int(1));
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{1, 2}, {3}, nullptr,
0,
builtin_data,
®_add0),
kTfLiteOk);
const TfLiteRegistration reg_pack = {nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinPack};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{3, 4}, {5}, nullptr,
0, nullptr,
®_pack),
kTfLiteOk);
const TfLiteRegistration reg_add1 = {
[](TfLiteContext* context, const char* buffer, size_t length) {
return reinterpret_cast<void*>(new int[2]);
},
[](TfLiteContext* context, void* buffer) {
delete reinterpret_cast<int*>(buffer);
},
nullptr,
nullptr,
nullptr,
kTfLiteBuiltinAdd};
EXPECT_EQ(interpreter_.AddNodeWithParameters(
{5, 6}, {7}, nullptr,
0,
builtin_data,
®_add1),
kTfLiteOk);
std::vector<int> dims = {1};
TfLiteQuantization quantization;
quantization.type = kTfLiteNoQuantization;
EXPECT_EQ(interpreter_.SetTensorParametersReadWrite(
0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
2, TfLiteType::kTfLiteFloat32, "t2", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
4, TfLiteType::kTfLiteFloat32, "t4", dims, quantization, false),
kTfLiteOk);
dims.push_back(2);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
5, TfLiteType::kTfLiteFloat32, "t5", dims, quantization, false),
kTfLiteOk);
EXPECT_EQ(
interpreter_.SetTensorParametersReadWrite(
6, TfLiteType::kTfLiteFloat32, "t6", dims, quantization, false),
kTfLiteOk);
exec_plan()->data[0] = 0;
exec_plan()->data[1] = 1;
exec_plan()->data[2] = 2;
exec_plan()->data[3] = 3;
}
};
Interpreter2Fp32* interpreter2_fp32 = new Interpreter2Fp32();
TEST(ModelBuilderTest, GetOpsToReplaceMultiplePartitions) {
TfLiteContext* context = interpreter2_fp32->context();
context->GetExecutionPlan = [](struct TfLiteContext* context,
TfLiteIntArray** execution_plan) {
*execution_plan = interpreter2_fp32->exec_plan();
return kTfLiteOk;
};
context->GetNodeAndRegistration = [](struct TfLiteContext*, int node_index,
TfLiteNode** node,
TfLiteRegistration** registration) {
*node = interpreter2_fp32->node(node_index);
*registration = interpreter2_fp32->registration(node_index);
return kTfLiteOk;
};
context->PreviewDelegatePartitioning =
[](struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions) {
auto params = interpreter2_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 1;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 1;
params->input_tensors->data[1] = 2;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 3;
params = interpreter2_fp32->add_delegate_params();
params->nodes_to_replace = TfLiteIntArrayCreate(1);
params->nodes_to_replace->data[0] = 3;
params->input_tensors = TfLiteIntArrayCreate(2);
params->input_tensors->data[0] = 5;
params->input_tensors->data[1] = 6;
params->output_tensors = TfLiteIntArrayCreate(1);
params->output_tensors->data[0] = 7;
*partition_params_array = interpreter2_fp32->delegate_params();
*num_partitions = interpreter2_fp32->num_delegate_params();
return kTfLiteOk;
};
TfLiteIntArray* ops_to_replace = GetOpsToReplace(
context, false, 2);
ASSERT_EQ(ops_to_replace->size, 2);
EXPECT_THAT(absl::MakeConstSpan(ops_to_replace->data, 2),
testing::UnorderedElementsAre(1, 3));
TfLiteIntArrayFree(ops_to_replace);
}
class InterpreterMultiNode : public DelegatedInterpreter {
public:
explicit InterpreterMultiNode(bool both_ops_supported = true)
: DelegatedInterpreter(5) {
void* builtin_data = malloc(sizeof(int));
EXPECT_EQ(interpreter_.AddTensors(8), kTfLiteOk);
EXPECT_EQ(interpreter_.SetInputs({0, 1, 2}), kTfLiteOk);
EXPECT_EQ(interpreter_.SetOutputs({6, 7}), kTfLiteOk);
for (int i = 0; i < 3; ++i) {
const TfLiteRegistration reg_dequant = {nullptr,
nullptr,
nullptr,
nullptr, |
947 | cpp | tensorflow/tensorflow | interpreter | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/interpreter.cc | tensorflow/lite/interpreter_test.cc | #ifndef XLA_MLIR_TOOLS_MLIR_INTERPRETER_FRAMEWORK_INTERPRETER_H_
#define XLA_MLIR_TOOLS_MLIR_INTERPRETER_FRAMEWORK_INTERPRETER_H_
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include "absl/status/statusor.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
namespace mlir {
namespace interpreter {
class InterpreterScope;
class InterpreterListener {
public:
virtual ~InterpreterListener() = default;
virtual void BeforeOp(ArrayRef<InterpreterValue> args, mlir::Operation*) {}
virtual void AfterOp(ArrayRef<InterpreterValue> results) {}
virtual void EnterRegion(ArrayRef<InterpreterValue> args,
mlir::Region& region) {}
virtual void LeaveRegion(ArrayRef<InterpreterValue> terminator_args) {}
};
struct InterpreterStats {
int64_t heap_size = 0;
int64_t peak_heap_size = 0;
int64_t num_allocations = 0;
int64_t num_deallocations = 0;
};
struct InterpreterOptions {
InterpreterListener* listener = nullptr;
std::optional<int64_t> max_steps = std::nullopt;
bool disable_deallocations = false;
std::function<void(llvm::StringRef)> error_handler =
[](llvm::StringRef failure) {
llvm::errs() << "Interpreter failure: " << failure << "\n";
};
InterpreterStats* stats = nullptr;
};
class InterpreterState {
public:
InterpreterState(const mlir::SymbolTable& symbols,
InterpreterOptions options);
void Step() {
if (remaining_steps_ == 0) {
AddFailure("maximum number of steps exceeded");
return;
}
--remaining_steps_;
}
void AddFailure(llvm::StringRef failure);
bool HasFailure() const { return failed_; }
void CheckSuccess(LogicalResult result, llvm::StringRef failure) {
if (!result.succeeded()) {
AddFailure(failure);
}
}
InterpreterScope* GetTopScope() { return top_scope_; }
const mlir::SymbolTable& GetSymbols() const { return symbols_; }
const InterpreterOptions& GetOptions() { return options_; }
private:
const mlir::SymbolTable& symbols_;
InterpreterScope* top_scope_ = nullptr;
bool failed_ = false;
InterpreterOptions options_;
int64_t remaining_steps_ = std::numeric_limits<int64_t>::max();
friend class InterpreterScope;
friend class InterpreterScopeStash;
};
class InterpreterSideChannel {
public:
virtual ~InterpreterSideChannel() = default;
};
class InterpreterScope {
public:
InterpreterScope(InterpreterScope&&) = delete;
explicit InterpreterScope(InterpreterState& state)
: state_(state), parent_scope_(state.top_scope_) {
state.top_scope_ = this;
}
~InterpreterScope();
void Set(Value v, InterpreterValue iv) { values_[v] = std::move(iv); }
const InterpreterValue& Get(Value v) {
auto ret = values_.find(v);
if (ret == values_.end()) {
if (!parent_scope_) {
v.dump();
}
assert(parent_scope_ && "value not found");
return parent_scope_->Get(v);
}
return ret->second;
}
void Verify() const;
template <typename T>
T* GetSideChannel(bool optional = false) {
for (auto& side_channel : side_channels_) {
if (auto it = dynamic_cast<T*>(side_channel.get())) {
return it;
}
}
if (!parent_scope_ && optional) return nullptr;
assert(parent_scope_ && "side channel not found");
return parent_scope_->GetSideChannel<T>(optional);
}
void SetSideChannel(std::shared_ptr<InterpreterSideChannel> side_channel) {
side_channels_.push_back(std::move(side_channel));
}
InterpreterScope* GetParentScope() const { return parent_scope_; }
private:
DenseMap<Value, InterpreterValue> values_;
SmallVector<std::shared_ptr<InterpreterSideChannel>> side_channels_;
InterpreterState& state_;
InterpreterScope* parent_scope_;
friend class InterpreterScopeStash;
};
SmallVector<InterpreterValue> Interpret(InterpreterState& state, Region& region,
ArrayRef<InterpreterValue> bbargs);
absl::StatusOr<SmallVector<InterpreterValue>> RunInterpreter(
const mlir::SymbolTable& symbols, mlir::func::FuncOp function,
ArrayRef<InterpreterValue> args, InterpreterOptions options = {});
}
}
#endif
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include <cassert>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/registration.h"
namespace mlir {
namespace interpreter {
SmallVector<InterpreterValue> Interpret(InterpreterState& state,
Operation& op) {
auto fn = detail::GetFunction(op.getName().getStringRef());
if (!fn) {
llvm::errs() << "Unsupported op: " << op.getName().getStringRef() << "\n";
op.dump();
state.AddFailure("unsupported op");
return {};
}
SmallVector<InterpreterValue> operands;
for (auto operand : op.getOperands()) {
operands.push_back(state.GetTopScope()->Get(operand));
}
state.GetOptions().listener->BeforeOp(operands, &op);
auto results = fn(operands, &op, state);
for (auto* scope = state.GetTopScope(); scope != nullptr;
scope = scope->GetParentScope()) {
scope->Verify();
}
if (state.HasFailure()) {
llvm::errs() << "Encountered failure while executing " << op << "\n";
}
state.GetOptions().listener->AfterOp(results);
state.Step();
return results;
}
SmallVector<InterpreterValue> Interpret(InterpreterState& state, Region& region,
ArrayRef<InterpreterValue> bbargs) {
if (state.HasFailure()) return {};
assert(region.hasOneBlock() && "expected region to have one block");
state.GetOptions().listener->EnterRegion(bbargs, region);
InterpreterScope scope(state);
auto& block = region.getBlocks().front();
for (auto [value, interpreter_value] :
llvm::zip(block.getArguments(), bbargs)) {
scope.Set(value, interpreter_value);
}
std::optional<SmallVector<InterpreterValue>> block_results;
for (mlir::Operation& op : block) {
auto results = Interpret(state, op);
if (state.HasFailure()) return {};
if (op.hasTrait<OpTrait::IsTerminator>()) {
assert(!block_results.has_value() && "Expected at most one terminator");
block_results = results;
} else {
if (results.size() != op.getNumResults()) {
llvm::errs() << "Unexpected number of results while interpreting "
<< op.getName().getStringRef() << ". Interpreter bug?\n";
llvm_unreachable("unexpected number of results");
}
for (auto [v, iv] : llvm::zip(op.getResults(), results)) {
scope.Set(v, iv);
}
}
}
if (!block_results) {
block_results = SmallVector<InterpreterValue>{};
}
state.GetOptions().listener->LeaveRegion(*block_results);
return *std::move(block_results);
}
InterpreterState::InterpreterState(const mlir::SymbolTable& symbols,
InterpreterOptions options)
: symbols_(symbols), options_(options) {
if (!options_.listener) {
static auto& no_op_listener = *new InterpreterListener();
this->options_.listener = &no_op_listener;
}
if (options_.max_steps) {
remaining_steps_ = *options_.max_steps;
}
}
void InterpreterState::AddFailure(llvm::StringRef failure) {
failed_ = true;
options_.error_handler(failure);
}
void InterpreterScope::Verify() const {
for (auto& [_, value] : values_) {
if (value.IsTensor() && value.GetBuffer() &&
!value.GetBuffer()->GetFailure().empty()) {
state_.AddFailure(value.GetBuffer()->GetFailure());
break;
}
}
}
InterpreterScope::~InterpreterScope() {
Verify();
state_.top_scope_ = parent_scope_;
}
absl::StatusOr<SmallVector<InterpreterValue>> RunInterpreter(
const mlir::SymbolTable& symbols, mlir::func::FuncOp function,
ArrayRef<InterpreterValue> args, InterpreterOptions options) {
InterpreterState state{symbols, std::move(options)};
auto results = Interpret(state, function.getBody(), args);
if (state.HasFailure()) {
return absl::InvalidArgumentError("Interpreter failed, check error logs");
}
return results;
}
}
} | #include <cstdint>
#include <cstring>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "benchmark/benchmark.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/async_handle.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/builtin_kernels.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/future.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/register_span.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test_benchmark.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace mlrt {
namespace {
class AddI32Kernel : public KernelFrame {
public:
using KernelFrame::KernelFrame;
int32_t arg0() const { return arguments()[kArg0Index].Get<int32_t>(); }
int32_t arg1() const { return arguments()[kArg1Index].Get<int32_t>(); }
void set_result(int32_t result) { results()[kResultIndex].Set(result); }
void Invoke() { set_result(arg0() + arg1()); }
static constexpr char kName[] = "add";
private:
static constexpr int kArg0Index = 0;
static constexpr int kArg1Index = 1;
static constexpr int kResultIndex = 0;
};
void AddI32Const(KernelFrame frame) {
auto args = frame.arguments();
int32_t constant = frame.attributes().GetAs<int32_t>(0);
frame.results()[0].Set(args[0].Get<int32_t>() + constant);
}
bc::Buffer CreateSequentialAddExecutable(int num_add) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::SymbolTable kernels;
std::vector<std::string> names = {"add", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
auto functions_ctor = executable_ctor.construct_functions(1);
auto function_ctor = functions_ctor.ConstructAt(0);
testing::SymbolTable regs;
function_ctor.construct_name("main");
function_ctor.construct_input_regs(1).Assign({regs.Def("r0")});
function_ctor.construct_output_last_uses(1).Assign({true});
auto kernels_ctor = function_ctor.construct_kernels(num_add + 1);
{
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("add"));
kernel_ctor.construct_arguments(2).Assign(regs.Use({"r0", "r0"}));
kernel_ctor.construct_results(1).Assign({regs.Def("r1")});
}
for (int i = 1; i < num_add; ++i) {
auto kernel_ctor = kernels_ctor.ConstructAt(i);
kernel_ctor.set_code(kernels.Use("add"));
kernel_ctor.construct_arguments(2).Assign(
regs.Use({absl::StrCat("r", (i + 1) % 2 + 1), "r0"}));
kernel_ctor.construct_results(1).Assign(
{regs.Def(absl::StrCat("r", i % 2 + 1))});
}
auto kernel_ctor = kernels_ctor.ConstructAt(num_add);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign(
{regs.Use(absl::StrCat("r", (num_add - 1) % 2 + 1))});
function_ctor.construct_output_regs(1).Assign(
{regs.Use(absl::StrCat("r", (num_add - 1) % 2 + 1))});
function_ctor.set_num_regs(regs.size());
return buffer;
}
bc::Buffer CreateSequentialAddAttributesExecutable(int num_add) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::SymbolTable kernels;
std::vector<std::string> names = {"add.const", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
testing::AttributeTable attributes(executable_ctor.construct_attributes(1));
attributes.Add("op_key", 1);
auto functions_ctor = executable_ctor.construct_functions(1);
auto function_ctor = functions_ctor.ConstructAt(0);
testing::SymbolTable regs;
function_ctor.construct_name("main");
function_ctor.construct_input_regs(1).Assign({regs.Def("r0")});
auto kernels_ctor = function_ctor.construct_kernels(num_add + 1);
for (int i = 0; i < num_add; ++i) {
auto kernel_ctor = kernels_ctor.ConstructAt(i);
kernel_ctor.set_code(kernels.Use("add.const"));
kernel_ctor.construct_arguments(1).Assign({regs.Use(absl::StrCat("r", i))});
kernel_ctor.construct_results(1).Assign(
{regs.Def(absl::StrCat("r", i + 1))});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("op_key")});
}
auto kernel_ctor = kernels_ctor.ConstructAt(num_add);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign(
{regs.Use(absl::StrCat("r", num_add))});
function_ctor.construct_output_regs(1).Assign(
{regs.Use(absl::StrCat("r", num_add))});
function_ctor.set_num_regs(regs.size());
return buffer;
}
TEST(InterpreterTest, SequentialAdd) {
auto buffer = CreateSequentialAddExecutable(99);
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register<AddI32Kernel>();
LoadedExecutable loaded_executable(executable, kernel_registry);
absl::Notification notification;
ExecutionContext execution_context(&loaded_executable);
execution_context.set_exit_handler([&]() { notification.Notify(); });
int32_t v = 1;
mlrt::Value arg(v);
mlrt::Value result;
auto function = loaded_executable.GetFunction("main");
ASSERT_TRUE(function);
std::vector<uint8_t> last_uses = {true};
execution_context.Call(function, last_uses, absl::Span<Value>(&arg, 1),
absl::Span<Value>(&result, 1));
Execute(execution_context);
notification.WaitForNotification();
EXPECT_EQ(result.Get<int32_t>(), 100);
}
TEST(InterpreterTest, SequentialAddAttributes) {
auto buffer = CreateSequentialAddAttributesExecutable(99);
bc::Executable executable(buffer.Get(0));
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("add.const", &AddI32Const);
LoadedExecutable loaded_executable(executable, kernel_registry);
absl::Notification notification;
ExecutionContext execution_context(&loaded_executable);
execution_context.set_exit_handler([&]() { notification.Notify(); });
int32_t v = 1;
mlrt::Value arg(v);
mlrt::Value result;
auto function = loaded_executable.GetFunction("main");
ASSERT_TRUE(function);
std::vector<uint8_t> last_uses = {true};
execution_context.Call(function, last_uses, absl::Span<Value>(&arg, 1),
absl::Span<Value>(&result, 1));
Execute(execution_context);
notification.WaitForNotification();
EXPECT_EQ(result.Get<int32_t>(), 100);
}
bc::Buffer CreateCallExecutable() {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::AttributeTable attributes(executable_ctor.construct_attributes(1));
attributes.Add("op_key", 1);
testing::SymbolTable kernels;
std::vector<std::string> names = {"call", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
auto functions_ctor = executable_ctor.construct_functions(2);
{
testing::SymbolTable regs;
auto caller_ctor = functions_ctor.ConstructAt(0);
caller_ctor.construct_name("caller");
caller_ctor.construct_input_regs(1).Assign({regs.Def("arg")});
auto kernels_ctor = caller_ctor.construct_kernels(2);
{
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("call"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("arg")});
kernel_ctor.construct_last_uses(1).Assign({true});
kernel_ctor.construct_results(1).Assign({regs.Def("result")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("op_key")});
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(1);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("result")});
}
caller_ctor.construct_output_regs(1).Assign({regs.Use("result")});
caller_ctor.set_num_regs(regs.size());
}
{
testing::SymbolTable regs;
auto callee_ctor = functions_ctor.ConstructAt(1);
callee_ctor.construct_name("callee");
callee_ctor.construct_input_regs(1).Assign({regs.Def("arg")});
{
auto kernels_ctor = callee_ctor.construct_kernels(1);
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("arg")});
}
callee_ctor.construct_output_regs(1).Assign({regs.Use("arg")});
callee_ctor.set_num_regs(regs.size());
}
return buffer;
}
TEST(InterpreterTest, Call) {
auto buffer = CreateCallExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
LoadedExecutable loaded_executable(executable, kernel_registry);
ExecutionContext execution_context(&loaded_executable);
auto function = loaded_executable.GetFunction("caller");
ASSERT_TRUE(function);
Value input(123);
Value output;
std::vector<uint8_t> last_uses = {false};
execution_context.Call(function, last_uses, absl::Span<Value>(&input, 1),
absl::Span<Value>(&output, 1));
Execute(execution_context);
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(output.Get<int>(), 123);
EXPECT_TRUE(input.HasValue());
}
bc::Buffer CreateCondExecutable() {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::AttributeTable attributes(executable_ctor.construct_attributes(2));
attributes.Add("then_idx", 1);
attributes.Add("else_idx", 2);
testing::SymbolTable kernels;
std::vector<std::string> names = {"mlrt.cond", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
auto functions_ctor = executable_ctor.construct_functions(3);
{
auto caller_ctor = functions_ctor.ConstructAt(0);
caller_ctor.construct_name("caller");
testing::SymbolTable regs;
caller_ctor.construct_input_regs(3).Assign(regs.Def({"cond", "x", "y"}));
{
auto kernels_ctor = caller_ctor.construct_kernels(2);
{
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("mlrt.cond"));
kernel_ctor.construct_arguments(3).Assign(regs.Use({"cond", "x", "y"}));
kernel_ctor.construct_last_uses(3).Assign({true, true, true});
kernel_ctor.construct_results(1).Assign({regs.Def("z")});
kernel_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("then_idx"),
attributes.GetHandle("else_idx")});
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(1);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("z")});
}
}
caller_ctor.set_num_regs(regs.size());
caller_ctor.construct_output_regs(1).Assign({regs.Use("z")});
}
{
auto then_ctor = functions_ctor.ConstructAt(1);
then_ctor.construct_name("then");
testing::SymbolTable regs;
then_ctor.construct_input_regs(2).Assign(
regs.Def(absl::Span<const std::string>{"x", "y"}));
{
auto kernels_ctor = then_ctor.construct_kernels(1);
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("x")});
}
then_ctor.set_num_regs(regs.size());
then_ctor.construct_output_regs(1).Assign({regs.Use("x")});
}
{
auto else_ctor = functions_ctor.ConstructAt(2);
else_ctor.construct_name("else");
testing::SymbolTable regs;
else_ctor.construct_input_regs(2).Assign(
regs.Def(absl::Span<const std::string>{"x", "y"}));
{
auto kernels_ctor = else_ctor.construct_kernels(1);
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("y")});
}
else_ctor.set_num_regs(regs.size());
else_ctor.construct_output_regs(1).Assign({regs.Use("y")});
}
return buffer;
}
TEST(InterpreterTest, Cond) {
auto buffer = CreateCondExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
LoadedExecutable loaded_executable(executable, kernel_registry);
ExecutionContext execution_context(&loaded_executable);
auto function = loaded_executable.GetFunction("caller");
ASSERT_TRUE(function);
Value inputs[3];
inputs[0].Set(true);
inputs[1].Set(100);
inputs[2].Set(200);
Value output;
std::vector<uint8_t> last_uses = {true, false, false};
execution_context.Call(function, last_uses, absl::MakeSpan(inputs),
absl::Span<Value>(&output, 1));
Execute(execution_context);
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(output.Get<int>(), 100);
ASSERT_TRUE(inputs[1].HasValue());
ASSERT_TRUE(inputs[2].HasValue());
ASSERT_EQ(inputs[1].Get<int>(), 100);
ASSERT_EQ(inputs[2].Get<int>(), 200);
inputs[0].Set(false);
execution_context.Call(function, last_uses, absl::MakeSpan(inputs),
absl::Span<Value>(&output, 1));
Execute(execution_context);
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(output.Get<int>(), 200);
}
bc::Buffer CreateNestedCallExecutable(int num_calls) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::AttributeTable attributes(executable_ctor.construct_attributes(1));
for (int i = 0; i < num_calls; ++i) {
attributes.Add(absl::StrCat("f_id_", i), i);
}
testing::SymbolTable kernels;
std::vector<std::string> names = {"call", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
auto functions_ctor = executable_ctor.construct_functions(num_calls);
for (int i = 0; i < num_calls - 1; ++i) {
testing::SymbolTable regs;
auto caller_ctor = functions_ctor.ConstructAt(i);
caller_ctor.construct_name(absl::StrCat("call_", i));
caller_ctor.construct_input_regs(1).Assign({regs.Def("arg")});
auto kernels_ctor = caller_ctor.construct_kernels(2);
{
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("call"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("arg")});
kernel_ctor.construct_last_uses(1).Assign({true});
kernel_ctor.construct_results(1).Assign({regs.Def("result")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle(absl::StrCat("f_id_", i + 1))});
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(1);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("result")});
}
caller_ctor.construct_output_regs(1).Assign({regs.Use("result")});
caller_ctor.set_num_regs(regs.size());
}
{
testing::SymbolTable regs;
auto callee_ctor = functions_ctor.ConstructAt(num_calls - 1);
callee_ctor.construct_name(absl::StrCat("call_", num_calls));
callee_ctor.construct_input_regs(1).Assign({regs.Def("arg")});
{
auto kernels_ctor = callee_ctor.construct_kernels(1);
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("arg")});
}
callee_ctor.construct_output_regs(1).Assign({regs.Use("arg")});
callee_ctor.set_num_regs(regs.size());
}
return buffer;
}
TEST(InterpreterTest, NestedCall) {
auto buffer = CreateNestedCallExecutable(32);
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
LoadedExecutable loaded_executable(executable, kernel_registry);
ExecutionContext execution_context(&loaded_executable);
auto function = loaded_executable.GetFunction("call_0");
ASSERT_TRUE(function);
Value input(123);
Value output;
std::vector<uint8_t> last_uses = {true};
execution_context.Call(function, last_uses, absl::Span<Value>(&input, 1),
absl::Span<Value>(&output, 1));
Execute(execution_context);
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(output.Get<int>(), 123);
}
bc::Buffer CreateFailExecutable() {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::SymbolTable kernels;
std::vector<std::string> names = {"fail", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
auto functions_ctor = executable_ctor.construct_functions(1);
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
auto kernels_ctor = function_ctor.construct_kernels(2);
{
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("fail"));
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(1);
kernel_ctor.set_code(kernels.Use("return"));
}
return buffer;
}
void Fail(KernelFrame frame) {
frame.execution_context().Fail(absl::InternalError("test error"));
}
TEST(InterpreterTest, Fail) {
auto buffer = CreateFailExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("fail", &Fail);
LoadedExecutable loaded_executable(executable, kernel_registry);
ExecutionContext execution_context(&loaded_executable);
auto function = loaded_executable.GetFunction("main");
ASSERT_TRUE(function);
std::vector<uint8_t> last_uses;
execution_context.Call(function, last_uses, absl::Span<Value>(),
absl::Span<Value>());
Execute(execution_context);
EXPECT_THAT(
execution_context.status(),
::tsl::testing::StatusIs(absl::StatusCode::kInternal, "test error"));
}
bc::Buffer CreateAwaitExecutable() {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::SymbolTable kernels;
std::vector<std::string> names = {"await.i32", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
auto functions_ctor = executable_ctor.construct_functions(1);
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
testing::SymbolTable regs;
function_ctor.construct_input_regs(1).Assign({regs.Def("future")});
auto kernels_ctor = function_ctor.construct_kernels(2);
{
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("await.i32"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("future")});
kernel_ctor.construct_results(1).Assign({regs.Def("result")});
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(1);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(1).Assign({regs.Use("result")});
}
function_ctor.set_num_regs(regs.size());
function_ctor.construct_output_regs(1).Assign({regs.Use("result")});
function_ctor.construct_output_last_uses(1).Assign({true});
return buffer;
}
void AwaitI32(KernelFrame frame) {
auto& future = frame.arguments()[0].Get<Future>();
frame.execution_context().Await<int32_t>(future, &frame.results()[0]);
}
TEST(InterpreterTest, Await) {
auto buffer = CreateAwaitExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("await.i32", &AwaitI32);
LoadedExecutable loaded_executable(executable, kernel_registry);
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(work_queue.get());
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
auto promise = Promise::Allocate<int32_t>();
Value input(promise.GetFuture());
Value output;
std::vector<uint8_t> last_uses = {true};
execution_context.Call(executable.functions()[0], last_uses,
absl::Span<Value>(&input, 1),
absl::Span<Value>(&output, 1));
Execute(execution_context);
std::move(promise).Set<int32_t>(100);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(output.Get<int32_t>(), 100);
}
struct TestPayload {
TestPayload() = default;
TestPayload(const TestPayload& other)
: copy(other.copy + 1), move(other.move) {}
TestPayload& operator=(const TestPayload& other) {
copy = other.copy + 1;
move = other.move;
return *this;
}
TestPayload(TestPayload&& other) : copy(other.copy), move(other.move + 1) {}
TestPayload& operator=(TestPayload&& other) {
copy = other.copy;
move = other.move + 1;
return *this;
}
int copy = 0;
int move = 0;
};
void AwaitTestPayload(KernelFrame frame) {
auto& future = frame.arguments()[0].Get<Future>();
frame.execution_context().Await<TestPayload>(std::move(future),
&frame.results()[0]);
}
TEST(InterpreterTest, AwaitMove) {
auto buffer = CreateAwaitExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("await.i32", &AwaitTestPayload);
LoadedExecutable loaded_executable(executable, kernel_registry);
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(work_queue.get());
{
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
auto promise = Promise::Allocate<TestPayload>();
Value input(promise.GetFuture());
Value output;
std::vector<uint8_t> last_uses = {true};
execution_context.Call(executable.functions()[0], last_uses,
absl::Span<Value>(&input, 1),
absl::Span<Value>(&output, 1));
Execute(execution_context);
std::move(promise).Set<TestPayload>(TestPayload{});
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(output.Get<TestPayload>().copy, 0);
EXPECT_EQ(output.Get<TestPayload>().move, 4);
}
{
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
auto promise = Promise::Allocate<TestPayload>();
Value input(promise.GetFuture());
Value output;
std::vector<uint8_t> last_uses = {true};
execution_context.Call(executable.functions()[0], last_uses,
absl::Span<Value>(&input, 1),
absl::Span<Value>(&output, 1));
std::move(promise).Set<TestPayload>(TestPayload{});
Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(output.Get<TestPayload>().copy, 0);
EXPECT_EQ(output.Get<TestPayload>().move, 4);
}
}
TEST(InterpreterTest, AwaitError) {
auto buffer = CreateAwaitExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("await.i32", &AwaitI32);
LoadedExecutable loaded_executable(executable, kernel_registry);
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(work_queue.get());
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
auto promise = Promise::Allocate<int32_t>();
Value input(promise.GetFuture());
Value output;
std::vector<uint8_t> last_uses = {true};
execution_context.Call(executable.functions()[0], last_uses,
absl::Span<Value>(&input, 1),
absl::Span<Value>(&output, 1));
Execute(execution_context);
std::move(promise).SetError(absl::InternalError("test error"));
notification.WaitForNotification();
EXPECT_THAT(
execution_context.status(),
::tsl::testing::StatusIs(absl::StatusCode::kInternal, "test error"));
}
bc::Buffer CreateAwaitAllExecutable() {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
testing::SymbolTable kernels;
std::vector<std::string> names = {"await_all.i32", "return"};
executable_ctor.construct_kernel_names(2).Assign(names);
kernels.Def(names);
auto functions_ctor = executable_ctor.construct_functions(1);
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
testing::SymbolTable regs;
function_ctor.construct_input_regs(2).Assign(
regs.Def(absl::Span<const std::string>{"f0", "f1"}));
auto kernels_ctor = function_ctor.construct_kernels(2);
{
auto kernel_ctor = kernels_ctor.ConstructAt(0);
kernel_ctor.set_code(kernels.Use("await_all.i32"));
kernel_ctor.construct_arguments(2).Assign(regs.Use({"f0", "f1"}));
kernel_ctor.construct_last_uses(2).Assign({true, true});
kernel_ctor.construct_results(2).Assign(
regs.Def(absl::Span<const std::string>{"r0", "r1"}));
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(1);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(2).Assign(regs.Use({"r0", "r1"}));
}
function_ctor.set_num_regs(regs.size());
function_ctor.construct_output_regs(2).Assign(regs.Use({"r0", "r1"}));
return buffer;
}
void AwaitAllI32(KernelFrame frame) {
RegisterValueSpan<Future> futures(frame.arguments());
frame.execution_context().AwaitAll<int32_t>(futures, frame.results());
}
TEST(InterpreterTest, AwaitAll) {
auto buffer = CreateAwaitAllExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("await_all.i32", &AwaitAllI32);
LoadedExecutable loaded_executable(executable, kernel_registry);
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(work_queue.get());
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
auto p0 = Promise::Allocate<int32_t>();
auto p1 = Promise::Allocate<int32_t>();
std::vector<Value> inputs(2);
inputs[0].Set(p0.GetFuture());
inputs[1].Set(p1.GetFuture());
std::vector<Value> outputs(2);
std::vector<uint8_t> last_uses = {true, true};
execution_context.Call(loaded_executable.GetFunction("main"), last_uses,
absl::MakeSpan(inputs), absl::MakeSpan(outputs));
Execute(execution_context);
std::move(p0).Set<int32_t>(100);
std::move(p1).Set<int32_t>(200);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(outputs[0].Get<int32_t>(), 100);
EXPECT_EQ(outputs[1].Get<int32_t>(), 200);
}
void AwaitAllSharedPtrI32(KernelFrame frame) {
RegisterValueSpan<Future> futures(frame.arguments());
frame.execution_context().AwaitAll<std::shared_ptr<int32_t>>(futures,
frame.results());
for (int i = 0; i < futures.size(); ++i) {
if (frame.last_uses()[i]) {
futures.Destroy(i);
}
}
}
TEST(InterpreterTest, AwaitAllSingleProducerMultiConsumers) {
auto buffer = CreateAwaitAllExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("await_all.i32", &AwaitAllSharedPtrI32);
LoadedExecutable loaded_executable(executable, kernel_registry);
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(work_queue.get());
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
auto p = Promise::Allocate<std::shared_ptr<int32_t>>();
std::vector<Value> inputs(2);
inputs[0].Set(p.GetFuture());
inputs[1].Set(p.GetFuture());
std::vector<Value> outputs(2);
std::vector<uint8_t> last_uses = {true, true};
execution_context.Call(loaded_executable.GetFunction("main"), last_uses,
absl::MakeSpan(inputs), absl::MakeSpan(outputs));
Execute(execution_context);
work_queue->AddTask([p = std::move(p)]() mutable {
std::move(p).Set<std::shared_ptr<int32_t>>(std::make_shared<int32_t>(123));
});
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
EXPECT_EQ(*outputs[0].Get<std::shared_ptr<int32_t>>(), 123);
EXPECT_EQ(*outputs[1].Get<std::shared_ptr<int32_t>>(), 123);
}
TEST(InterpreterTest, AwaitAllError) {
auto buffer = CreateAwaitAllExecutable();
bc::Executable executable(buffer.data());
KernelRegistry kernel_registry;
RegisterBuiltinKernels(kernel_registry);
kernel_registry.Register("await_all.i32", &AwaitAllI32);
LoadedExecutable loaded_executable(executable, kernel_registry);
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(work_queue.get());
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
auto p0 = Promise::Allocate<int32_t>();
auto p1 = Promise::Allocate<int32_t>();
std::vector<Value> inputs(2);
inputs[0].Set(p0.GetFuture());
inputs[1].Set(p1.GetFuture());
std::vector<Value> outputs(2);
std::vector<uint8_t> last_uses = {true, true};
execution_context.Call(loaded_executable.GetFunction("main"), last_uses,
absl::MakeSpan(inputs), absl::MakeSpan(outputs));
Execute(execution_context);
std::move(p0).Set<int32_t>(100);
ASSERT_FALSE(notification.HasBeenNotified());
std::move(p1).SetError(absl::InternalError("test error"));
notification.WaitForNotification();
EXPECT_THAT(
execution_context.status(),
::tsl::testing::StatusIs(absl::StatusCode::kInternal, "test error"));
}
struct TestState : UserContext<TestState> {
int* state = nullptr;
};
void WriteState(KernelFrame frame) {
auto& test_ |
948 | cpp | tensorflow/tensorflow | subgraph | tensorflow/lite/core/subgraph.cc | tensorflow/lite/core/subgraph_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_SUBGRAPH_H_
#define TENSORFLOW_CORE_GRAPH_SUBGRAPH_H_
#include <string>
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace subgraph {
struct RewriteGraphMetadata {
DataTypeVector feed_types;
DataTypeVector fetch_types;
};
class PruneRewrite {
public:
PruneRewrite(const string* endpoint_name, const DeviceAttributes* device_info)
: endpoint_name_(endpoint_name), device_info_(device_info) {}
virtual ~PruneRewrite() {}
virtual Status AddNode(Graph* g, NodeBuilder::NodeOut tensor,
Node** out_node) = 0;
const string& endpoint_name() { return *endpoint_name_; }
protected:
const DeviceAttributes& device_info() { return *device_info_; }
private:
const string* const endpoint_name_;
const DeviceAttributes* const device_info_;
};
Status RewriteGraphForExecution(
Graph* g, const absl::Span<const string>& fed_outputs,
const absl::Span<const string>& fetch_outputs,
const absl::Span<const string>& target_node_names,
const DeviceAttributes& device_info, bool use_function_convention,
RewriteGraphMetadata* out_metadata);
Status RewriteGraphForExecution(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& feed_rewrites,
const std::vector<std::unique_ptr<PruneRewrite>>& fetch_rewrites,
const absl::Span<const string>& target_node_names,
RewriteGraphMetadata* out_metadata);
class ArgFeedRewrite : public PruneRewrite {
public:
ArgFeedRewrite(const string* endpoint_name,
const DeviceAttributes* device_info, int32_t arg_index)
: PruneRewrite(endpoint_name, device_info), arg_index_(arg_index) {}
Status AddNode(Graph* g, NodeBuilder::NodeOut feed_tensor,
Node** out_node) override;
private:
const int32 arg_index_;
};
class RecvFeedRewrite : public PruneRewrite {
public:
using PruneRewrite::PruneRewrite;
Status AddNode(Graph* g, NodeBuilder::NodeOut feed_tensor,
Node** out_node) override;
};
class RetvalFetchRewrite : public PruneRewrite {
public:
RetvalFetchRewrite(const string* endpoint_name,
const DeviceAttributes* device_info, int32_t retval_index)
: PruneRewrite(endpoint_name, device_info), retval_index_(retval_index) {}
Status AddNode(Graph* g, NodeBuilder::NodeOut fetch_tensor,
Node** out_node) override;
private:
const int32 retval_index_;
};
class SendFetchRewrite : public PruneRewrite {
public:
using PruneRewrite::PruneRewrite;
Status AddNode(Graph* g, NodeBuilder::NodeOut fetch_tensor,
Node** out_node) override;
};
}
}
#endif
#include "tensorflow/core/graph/subgraph.h"
#include <algorithm>
#include <deque>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace subgraph {
namespace {
typedef std::unordered_map<StringPiece, Node*, StringPieceHasher> NameIndex;
Status FeedInputs(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& feed_rewrites,
NameIndex* name_index, DataTypeVector* out_feed_types) {
out_feed_types->clear();
out_feed_types->reserve(feed_rewrites.size());
for (size_t i = 0; i < feed_rewrites.size(); ++i) {
const string& t = feed_rewrites[i]->endpoint_name();
TensorId id(ParseTensorName(t));
auto iter = name_index->find(id.first);
if (iter == name_index->end()) {
return errors::NotFound("FeedInputs: unable to find feed output ", t);
}
Node* n = iter->second;
DCHECK_EQ(n->name(), id.first);
if (id.second >= n->num_outputs()) {
return errors::InvalidArgument(
"FeedInputs: ", t, " should have output index < ", n->num_outputs());
}
Node* feed_node;
TF_RETURN_IF_ERROR(
feed_rewrites[i]->AddNode(g, {n, id.second}, &feed_node));
(*name_index)[feed_node->name()] = feed_node;
g->AddControlEdge(g->source_node(), feed_node, true);
std::vector<const Edge*> to_remove;
for (const Edge* e : n->out_edges()) {
if (e->src_output() == id.second) {
to_remove.emplace_back(e);
} else if (e->src_output() == Graph::kControlSlot &&
(n->type_string() == "Placeholder" ||
n->type_string() == "PlaceholderV2")) {
to_remove.emplace_back(e);
}
}
for (const Edge* e : to_remove) {
if (e->src_output() == id.second) {
g->AddEdge(feed_node, 0, e->dst(), e->dst_input());
} else {
CHECK_EQ(Graph::kControlSlot, e->src_output());
g->AddControlEdge(feed_node, e->dst(), true);
}
g->RemoveEdge(e);
}
out_feed_types->push_back(BaseType(n->output_type(id.second)));
}
return absl::OkStatus();
}
Status FetchOutputs(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& fetch_rewrites,
NameIndex* name_index, std::vector<Node*>* out_fetch_nodes,
DataTypeVector* out_fetch_types) {
out_fetch_nodes->clear();
out_fetch_nodes->reserve(fetch_rewrites.size());
for (size_t i = 0; i < fetch_rewrites.size(); ++i) {
const string& t = fetch_rewrites[i]->endpoint_name();
TensorId id(ParseTensorName(t));
auto iter = name_index->find(id.first);
if (iter == name_index->end()) {
return errors::NotFound("FetchOutputs node ", t, ": not found");
}
Node* n = iter->second;
DCHECK_EQ(n->name(), id.first);
VLOG(2) << "Found fetch node for " << t;
if (n->num_outputs() == 0) {
return errors::InvalidArgument(
"Tried to fetch data for '", t,
"', which produces no output. To run to a node but not fetch any "
"data, pass '",
t,
"' as an argument to the 'target_node_names' argument of the "
"Session::Run API.");
} else if (id.second >= n->num_outputs()) {
return errors::InvalidArgument("FetchOutputs ", t,
": output index too large, must be < ",
n->num_outputs());
}
Node* fetch_node;
TF_RETURN_IF_ERROR(
fetch_rewrites[i]->AddNode(g, {n, id.second}, &fetch_node));
(*name_index)[fetch_node->name()] = fetch_node;
g->AddControlEdge(fetch_node, g->sink_node(), true);
out_fetch_nodes->push_back(fetch_node);
out_fetch_types->push_back(BaseType(n->output_type(id.second)));
}
return absl::OkStatus();
}
bool AddNodeToTargets(const string& node_or_tensor_name,
const NameIndex& name_index,
std::unordered_set<const Node*>* targets) {
TensorId id = ParseTensorName(node_or_tensor_name);
auto iter = name_index.find(id.first);
if (iter == name_index.end()) {
return false;
}
const Node* n = iter->second;
CHECK_EQ(n->name(), id.first);
targets->insert(n);
return true;
}
Status PruneForTargets(Graph* g, const NameIndex& name_index,
const std::vector<Node*>& fetch_nodes,
const absl::Span<const string>& target_nodes) {
string not_found;
std::unordered_set<const Node*> targets;
for (Node* n : fetch_nodes) {
if (!AddNodeToTargets(n->name(), name_index, &targets)) {
strings::StrAppend(¬_found, n->name(), " ");
}
}
for (const string& s : target_nodes) {
if (!AddNodeToTargets(s, name_index, &targets)) {
strings::StrAppend(¬_found, s, " ");
}
}
if (!not_found.empty()) {
return errors::NotFound("PruneForTargets: Some target nodes not found: ",
not_found);
}
PruneForReverseReachability(g, std::move(targets));
FixupSourceAndSinkEdges(g);
return absl::OkStatus();
}
}
Status ArgFeedRewrite::AddNode(Graph* g, NodeBuilder::NodeOut feed_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_arg_", feed_tensor.node->name(), "_",
feed_tensor.index, "_", arg_index_),
"_Arg")
.Attr("T", BaseType(feed_tensor.node->output_type(feed_tensor.index)))
.Attr("index", arg_index_)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status RecvFeedRewrite::AddNode(Graph* g, NodeBuilder::NodeOut feed_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_recv_", feed_tensor.node->name(), "_",
feed_tensor.index),
"_Recv")
.Attr("tensor_type",
BaseType(feed_tensor.node->output_type(feed_tensor.index)))
.Attr("tensor_name", endpoint_name())
.Attr("send_device", device_info().name())
.Attr("recv_device", device_info().name())
.Attr("send_device_incarnation",
static_cast<int64_t>(device_info().incarnation()))
.Attr("client_terminated", true)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status RetvalFetchRewrite::AddNode(Graph* g, NodeBuilder::NodeOut fetch_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_retval_", fetch_tensor.node->name(), "_",
fetch_tensor.index, "_", retval_index_),
"_Retval")
.Input(fetch_tensor.node, fetch_tensor.index)
.Attr("T",
BaseType(fetch_tensor.node->output_type(fetch_tensor.index)))
.Attr("index", retval_index_)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status SendFetchRewrite::AddNode(Graph* g, NodeBuilder::NodeOut fetch_tensor,
Node** out_node) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat("_send_", fetch_tensor.node->name(), "_",
fetch_tensor.index),
"_Send")
.Input(fetch_tensor.node, fetch_tensor.index)
.Attr("tensor_name", endpoint_name())
.Attr("send_device", device_info().name())
.Attr("recv_device", device_info().name())
.Attr("send_device_incarnation",
static_cast<int64_t>(device_info().incarnation()))
.Attr("client_terminated", true)
.Finalize(g, out_node, true));
(*out_node)->set_assigned_device_name(device_info().name());
return absl::OkStatus();
}
Status RewriteGraphForExecution(
Graph* g, const absl::Span<const string>& fed_outputs,
const absl::Span<const string>& fetch_outputs,
const absl::Span<const string>& target_node_names,
const DeviceAttributes& device_info, bool use_function_convention,
RewriteGraphMetadata* out_metadata) {
std::vector<std::unique_ptr<PruneRewrite>> feed_rewrites;
feed_rewrites.reserve(fed_outputs.size());
if (use_function_convention) {
for (size_t i = 0; i < fed_outputs.size(); ++i) {
feed_rewrites.emplace_back(new ArgFeedRewrite(
&fed_outputs[i], &device_info, static_cast<int32>(i)));
}
} else {
for (const string& fed_output : fed_outputs) {
feed_rewrites.emplace_back(
new RecvFeedRewrite(&fed_output, &device_info));
}
}
std::vector<std::unique_ptr<PruneRewrite>> fetch_rewrites;
fetch_rewrites.reserve(fetch_outputs.size());
if (use_function_convention) {
for (size_t i = 0; i < fetch_outputs.size(); ++i) {
fetch_rewrites.emplace_back(new RetvalFetchRewrite(
&fetch_outputs[i], &device_info, static_cast<int32>(i)));
}
} else {
for (const string& fetch_output : fetch_outputs) {
fetch_rewrites.emplace_back(
new SendFetchRewrite(&fetch_output, &device_info));
}
}
return RewriteGraphForExecution(g, feed_rewrites, fetch_rewrites,
target_node_names, out_metadata);
}
namespace {
template <typename StringContainer>
std::vector<string> ConvertToVector(StringContainer field) {
return std::vector<string>(field.begin(), field.end());
}
}
Status RewriteGraphForExecution(
Graph* g, const std::vector<std::unique_ptr<PruneRewrite>>& feed_rewrites,
const std::vector<std::unique_ptr<PruneRewrite>>& fetch_rewrites,
const absl::Span<const string>& target_node_names,
RewriteGraphMetadata* out_metadata) {
if (fetch_rewrites.empty() && target_node_names.empty()) {
return errors::InvalidArgument(
"Must specify at least one target to fetch or execute.");
}
std::unordered_set<string> endpoints;
for (const auto& feed_rewrite : feed_rewrites) {
auto result = endpoints.insert(feed_rewrite->endpoint_name());
if (!result.second) {
return errors::InvalidArgument("Endpoint \"",
feed_rewrite->endpoint_name(),
"\" fed more than once.");
}
}
for (const auto& fetch_rewrite : fetch_rewrites) {
if (endpoints.count(fetch_rewrite->endpoint_name()) > 0) {
return errors::InvalidArgument(fetch_rewrite->endpoint_name(),
" is both fed and fetched.");
}
}
NameIndex name_index;
name_index.reserve(g->num_nodes());
for (Node* n : g->nodes()) {
name_index[n->name()] = n;
}
if (!feed_rewrites.empty()) {
TF_RETURN_IF_ERROR(
FeedInputs(g, feed_rewrites, &name_index, &out_metadata->feed_types));
}
std::vector<Node*> fetch_nodes;
if (!fetch_rewrites.empty()) {
TF_RETURN_IF_ERROR(FetchOutputs(g, fetch_rewrites, &name_index,
&fetch_nodes, &out_metadata->fetch_types));
}
if (!fetch_nodes.empty() || !target_node_names.empty()) {
TF_RETURN_IF_ERROR(
PruneForTargets(g, name_index, fetch_nodes, target_node_names));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/graph/subgraph.h"
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class SubgraphTest : public ::testing::Test {
protected:
SubgraphTest() : g_(new Graph(OpRegistry::Global())) {
device_info_.set_name("/job:a/replica:0/task:0/cpu:0");
device_info_.set_device_type(DeviceType(DEVICE_CPU).type());
device_info_.set_incarnation(0);
}
~SubgraphTest() override {}
void ExpectOK(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef_));
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef_, g_.get()));
}
Node* FindNode(const string& name) {
for (Node* n : g_->nodes()) {
if (n->name() == name) return n;
}
return nullptr;
}
bool HasNode(const string& name) { return FindNode(name) != nullptr; }
void ExpectNodes(const string& nodes) {
int count = 0;
std::vector<string> actual_nodes;
for (Node* n : g_->nodes()) {
if (n->IsOp()) {
count++;
actual_nodes.push_back(n->name());
}
}
std::sort(actual_nodes.begin(), actual_nodes.end());
LOG(INFO) << "Nodes present: " << absl::StrJoin(actual_nodes, " ");
std::vector<string> expected_nodes = str_util::Split(nodes, ',');
std::sort(expected_nodes.begin(), expected_nodes.end());
for (const string& s : expected_nodes) {
Node* n = FindNode(s);
EXPECT_TRUE(n != nullptr) << s;
if (n->type_string() == "_Send" || n->type_string() == "_Recv") {
EXPECT_EQ(device_info_.name(), n->assigned_device_name()) << s;
}
}
EXPECT_TRUE(actual_nodes.size() == expected_nodes.size())
<< "\nActual: " << absl::StrJoin(actual_nodes, ",")
<< "\nExpected: " << absl::StrJoin(expected_nodes, ",");
}
bool HasEdge(const string& src, int src_out, const string& dst, int dst_in) {
for (const Edge* e : g_->edges()) {
if (e->src()->name() == src && e->src_output() == src_out &&
e->dst()->name() == dst && e->dst_input() == dst_in)
return true;
}
return false;
}
bool HasControlEdge(const string& src, const string& dst) {
return HasEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
string Subgraph(const string& fed_str, const string& fetch_str,
const string& targets_str,
bool use_function_convention = false) {
Graph* subgraph = new Graph(OpRegistry::Global());
CopyGraph(*g_, subgraph);
std::vector<string> fed =
str_util::Split(fed_str, ',', str_util::SkipEmpty());
std::vector<string> fetch =
str_util::Split(fetch_str, ',', str_util::SkipEmpty());
std::vector<string> targets =
str_util::Split(targets_str, ',', str_util::SkipEmpty());
subgraph::RewriteGraphMetadata metadata;
Status s = subgraph::RewriteGraphForExecution(
subgraph, fed, fetch, targets, device_info_, use_function_convention,
&metadata);
if (!s.ok()) {
delete subgraph;
return s.ToString();
}
EXPECT_EQ(fed.size(), metadata.feed_types.size());
EXPECT_EQ(fetch.size(), metadata.fetch_types.size());
g_.reset(subgraph);
return "OK";
}
Graph* graph() { return g_.get(); }
private:
GraphDef gdef_;
std::unique_ptr<Graph> g_;
DeviceAttributes device_info_;
};
REGISTER_OP("TestParams").Output("o: float");
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestRelu").Input("i: float").Output("o: float");
REGISTER_OP("TestMul").Input("a: float").Input("b: float").Output("o: float");
TEST_F(SubgraphTest, Targets1) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "", "t1"));
ExpectNodes("W1,input,t1");
}
TEST_F(SubgraphTest, Targets2) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: 'W1' input: 'input:1' }"
"node { name: 't2' op: 'TestMul' input: 'W2' input: 't1' }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "", "t2,t3_a"));
ExpectNodes("W1,W2,input,t1,t2,t3_a");
}
TEST_F(SubgraphTest, FedOutputs1) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("input:1", "", "t2"));
ExpectNodes("W1,W2,_recv_input_1,t1,t2");
}
TEST_F(SubgraphTest, FedOutputs1_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK",
Subgraph("input:1", "", "t2", true ));
ExpectNodes("W1,W2,_arg_input_1_0,t1,t2");
}
TEST_F(SubgraphTest, FedRefNode) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 't1' op: 'TestMul' input: [ 'W2', 'W1' ] }");
EXPECT_EQ("OK", Subgraph("W1:0", "", "t1"));
ExpectNodes("_recv_W1_0,W2,t1");
Node* n = FindNode("_recv_W1_0");
EXPECT_FALSE(IsRefType(CHECK_NOTNULL(n)->output_type(0)));
}
TEST_F(SubgraphTest, FedRefNode_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 't1' op: 'TestMul' input: [ 'W2', 'W1' ] }");
EXPECT_EQ("OK",
Subgraph("W1:0", "", "t1", true ));
ExpectNodes("_arg_W1_0_0,W2,t1");
Node* n = FindNode("_arg_W1_0_0");
EXPECT_FALSE(IsRefType(CHECK_NOTNULL(n)->output_type(0)));
}
TEST_F(SubgraphTest, FedOutputs2_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("input:1,t1,W2", "", "t2",
true ));
ExpectNodes("_arg_t1_0_1,_arg_W2_0_2,t2");
}
TEST_F(SubgraphTest, FetchOutputs1) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "W2,input:1,t1,t2", "t2"));
ExpectNodes(
"W1,W2,input,t1,t2,_send_W2_0,_send_input_1,_send_t1_0,_send_t2_0");
}
TEST_F(SubgraphTest, FetchOutputs1_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "W2,input:1,t1,t2", "t2",
true ));
ExpectNodes(
"W1,W2,input,t1,t2,_retval_W2_0_0,_retval_input_1_1,_retval_t1_0_2,_"
"retval_t2_0_3");
}
TEST_F(SubgraphTest, FetchOutputs2) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK", Subgraph("", "t3_a", "t2"));
ExpectNodes("W1,W2,input,t1,t2,t3_a,_send_t3_a_0");
}
TEST_F(SubgraphTest, FetchOutputs2_FunctionConvention) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W2', 't1' ] }"
"node { name: 't3_a' op: 'TestRelu' input: 't2' }"
"node { name: 't3_b' op: 'TestRelu' input: 't2' }");
EXPECT_EQ("OK",
Subgraph("", "t3_a", "t2", true ));
ExpectNodes("W1,W2,input,t1,t2,t3_a,_retval_t3_a_0_0");
}
TEST_F(SubgraphTest, ChainOfFools) {
ExpectOK(
"node { name: 'a' op: 'TestParams' }"
"node { name: 'b' op: 'TestRelu' input: 'a'}"
"node { name: 'c' op: 'TestRelu' input: 'b'}"
"node { name: 'd' op: 'TestRelu' input: 'c'}"
"node { name: 'e' op: 'TestRelu' input: 'd'}"
"node { name: 'f' op: 'TestRelu' input: 'e'}");
EXPECT_EQ("OK", Subgraph("c:0", "b:0,e:0", ""));
ExpectNodes("a,b,_send_b_0,_recv_c_0,d,e,_send_e_0");
EXPECT_TRUE(HasEdge("a", 0, "b", 0));
EXPECT_TRUE(HasEdge("b", 0, "_send_b_0", 0));
EXPECT_TRUE(HasEdge("_recv_c_0", 0, "d", 0));
EXPECT_TRUE(HasEdge("d", 0, "e", 0));
EXPECT_TRUE(HasEdge("e", 0, "_send_e_0", 0));
}
static bool HasSubstr(StringPiece base, StringPiece substr) {
bool ok = absl::StrContains(base, substr);
EXPECT_TRUE(ok) << base << ", expected substring " << substr;
return ok;
}
TEST_F(SubgraphTest, Errors) {
ExpectOK(
"node { name: 'a' op: 'TestParams' }"
"node { name: 'b' op: 'TestRelu' input: 'a'}"
"node { name: 'c' op: 'TestRelu' input: 'b'}"
"node { name: 'd' op: 'TestRelu' input: 'c'}"
"node { name: 'e' op: 'TestRelu' input: 'd'}"
"node { name: 'f' op: 'TestRelu' input: 'e'}");
EXPECT_TRUE(
HasSubstr(Subgraph("c:0", "b:0,c:0", ""), "both fed and fetched"));
EXPECT_TRUE(HasSubstr(Subgraph("foo:0", "c:0", ""), "unable to find"));
EXPECT_TRUE(HasSubstr(Subgraph("", "foo:0", ""), "not found"));
EXPECT_TRUE(HasSubstr(Subgraph("", "", "foo"), "not found"));
EXPECT_TRUE(HasSubstr(Subgraph("", "", ""), "at least one target"));
}
REGISTER_OP("In").Output("o: float");
REGISTER_OP("Op").Input("i: float").Output("o: float");
void BM_SubgraphHelper(::testing::benchmark::State& state,
bool use_function_convention) {
const int num_nodes = state.range(0);
DeviceAttributes device_info;
device_info.set_name("/job:a/replica:0/task:0/cpu:0");
device_info.set_device_type(DeviceType(DEVICE_CPU).type());
device_info.set_incarnation(0);
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* last_node = nullptr;
for (int i = 0; i < num_nodes; i++) {
string name = strings::StrCat("N", i);
if (i > 0) {
last_node = ops::UnaryOp("Op", last_node, b.opts().WithName(name));
} else {
last_node = ops::SourceOp("In", b.opts().WithName(name));
}
}
TF_CHECK_OK(GraphDefBuilderToGraph(b, &g));
}
std::vector<string> fed;
if (num_nodes > 1000) {
fed.push_back(strings::StrCat("N", num_nodes - 1000));
}
std::vector<string> fetch;
std::vector<string> targets = {strings::StrCat("N", num_nodes - 1)};
for (auto s : state) {
Graph* subgraph = new Graph(OpRegistry::Global());
CopyGraph(g, subgraph);
subgraph::RewriteGraphMetadata metadata;
TF_CHECK_OK(subgraph::RewriteGraphForExecution(
subgraph, fed, fetch, targets, device_info, use_function_convention,
&metadata));
delete subgraph;
}
}
void BM_Subgraph(::testing::benchmark::State& state) {
BM_SubgraphHelper(state, false );
}
void BM_SubgraphFunctionConvention(::testing::benchmark::State& state) {
BM_SubgraphHelper(state, true );
}
BENCHMARK(BM_Subgraph)->Arg(100)->Arg(1000)->Arg(10000)->Arg(100000);
BENCHMARK(BM_SubgraphFunctionConvention)
->Arg(100)
->Arg(1000)
->Arg(10000)
->Arg(100000);
}
} |
949 | cpp | tensorflow/tensorflow | c_api | tensorflow/lite/core/experimental/acceleration/mini_benchmark/c/c_api.cc | tensorflow/lite/core/experimental/acceleration/mini_benchmark/c/c_api_test.cc | #ifndef XLA_FFI_API_C_API_H_
#define XLA_FFI_API_C_API_H_
#include <stddef.h>
#include <stdint.h>
#define XLA_FFI_STRUCT_SIZE(struct_type, last_field) \
(offsetof(struct_type, last_field) + sizeof(((struct_type*)0)->last_field))
#define XLA_FFI_DEFINE_STRUCT_TRAITS(sname, last_field) \
typedef struct sname sname; \
enum { sname##_STRUCT_SIZE = XLA_FFI_STRUCT_SIZE(sname, last_field) }
#ifdef __cplusplus
extern "C" {
#endif
typedef struct XLA_FFI_Api XLA_FFI_Api;
typedef struct XLA_FFI_InternalApi XLA_FFI_InternalApi;
#define XLA_FFI_API_MAJOR 0
#define XLA_FFI_API_MINOR 0
struct XLA_FFI_Api_Version {
size_t struct_size;
void* priv;
int major_version;
int minor_version;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Api_Version, minor_version);
typedef struct XLA_FFI_Error XLA_FFI_Error;
typedef enum {
XLA_FFI_Error_Code_OK = 0,
XLA_FFI_Error_Code_CANCELLED = 1,
XLA_FFI_Error_Code_UNKNOWN = 2,
XLA_FFI_Error_Code_INVALID_ARGUMENT = 3,
XLA_FFI_Error_Code_DEADLINE_EXCEEDED = 4,
XLA_FFI_Error_Code_NOT_FOUND = 5,
XLA_FFI_Error_Code_ALREADY_EXISTS = 6,
XLA_FFI_Error_Code_PERMISSION_DENIED = 7,
XLA_FFI_Error_Code_RESOURCE_EXHAUSTED = 8,
XLA_FFI_Error_Code_FAILED_PRECONDITION = 9,
XLA_FFI_Error_Code_ABORTED = 10,
XLA_FFI_Error_Code_OUT_OF_RANGE = 11,
XLA_FFI_Error_Code_UNIMPLEMENTED = 12,
XLA_FFI_Error_Code_INTERNAL = 13,
XLA_FFI_Error_Code_UNAVAILABLE = 14,
XLA_FFI_Error_Code_DATA_LOSS = 15,
XLA_FFI_Error_Code_UNAUTHENTICATED = 16
} XLA_FFI_Error_Code;
struct XLA_FFI_Error_Create_Args {
size_t struct_size;
void* priv;
const char* message;
XLA_FFI_Error_Code errc;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Error_Create_Args, errc);
typedef XLA_FFI_Error* XLA_FFI_Error_Create(XLA_FFI_Error_Create_Args* args);
struct XLA_FFI_Error_GetMessage_Args {
size_t struct_size;
void* priv;
XLA_FFI_Error* error;
const char* message;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Error_GetMessage_Args, message);
typedef void XLA_FFI_Error_GetMessage(XLA_FFI_Error_GetMessage_Args* args);
struct XLA_FFI_Error_Destroy_Args {
size_t struct_size;
void* priv;
XLA_FFI_Error* error;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Error_Destroy_Args, error);
typedef void XLA_FFI_Error_Destroy(XLA_FFI_Error_Destroy_Args* args);
typedef enum {
XLA_FFI_DataType_INVALID = 0,
XLA_FFI_DataType_PRED = 1,
XLA_FFI_DataType_S8 = 2,
XLA_FFI_DataType_S16 = 3,
XLA_FFI_DataType_S32 = 4,
XLA_FFI_DataType_S64 = 5,
XLA_FFI_DataType_U8 = 6,
XLA_FFI_DataType_U16 = 7,
XLA_FFI_DataType_U32 = 8,
XLA_FFI_DataType_U64 = 9,
XLA_FFI_DataType_F16 = 10,
XLA_FFI_DataType_F32 = 11,
XLA_FFI_DataType_F64 = 12,
XLA_FFI_DataType_BF16 = 16,
XLA_FFI_DataType_C64 = 15,
XLA_FFI_DataType_C128 = 18,
XLA_FFI_DataType_TOKEN = 17,
XLA_FFI_DataType_F8E5M2 = 19,
XLA_FFI_DataType_F8E4M3FN = 20,
XLA_FFI_DataType_F8E4M3B11FNUZ = 23,
XLA_FFI_DataType_F8E5M2FNUZ = 24,
XLA_FFI_DataType_F8E4M3FNUZ = 25,
} XLA_FFI_DataType;
struct XLA_FFI_Buffer {
size_t struct_size;
void* priv;
XLA_FFI_DataType dtype;
void* data;
int64_t rank;
int64_t* dims;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Buffer, dims);
typedef enum {
XLA_FFI_ArgType_BUFFER = 1,
} XLA_FFI_ArgType;
typedef enum {
XLA_FFI_RetType_BUFFER = 1,
} XLA_FFI_RetType;
typedef enum {
XLA_FFI_AttrType_ARRAY = 1,
XLA_FFI_AttrType_DICTIONARY = 2,
XLA_FFI_AttrType_SCALAR = 3,
XLA_FFI_AttrType_STRING = 4,
} XLA_FFI_AttrType;
typedef struct XLA_FFI_ExecutionContext XLA_FFI_ExecutionContext;
struct XLA_FFI_TypeId {
int64_t type_id;
};
struct XLA_FFI_ByteSpan {
const char* ptr;
size_t len;
};
struct XLA_FFI_Scalar {
XLA_FFI_DataType dtype;
void* value;
};
struct XLA_FFI_Array {
XLA_FFI_DataType dtype;
size_t size;
void* data;
};
typedef enum {
XLA_FFI_ExecutionStage_PREPARE = 0,
XLA_FFI_ExecutionStage_INITIALIZE = 1,
XLA_FFI_ExecutionStage_EXECUTE = 2,
} XLA_FFI_ExecutionStage;
struct XLA_FFI_Args {
size_t struct_size;
void* priv;
int64_t size;
XLA_FFI_ArgType* types;
void** args;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Args, args);
struct XLA_FFI_Rets {
size_t struct_size;
void* priv;
int64_t size;
XLA_FFI_RetType* types;
void** rets;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Rets, rets);
struct XLA_FFI_Attrs {
size_t struct_size;
void* priv;
int64_t size;
XLA_FFI_AttrType* types;
XLA_FFI_ByteSpan** names;
void** attrs;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Attrs, attrs);
struct XLA_FFI_CallFrame {
size_t struct_size;
void* priv;
const XLA_FFI_Api* api;
XLA_FFI_ExecutionContext* ctx;
XLA_FFI_ExecutionStage stage;
XLA_FFI_Args args;
XLA_FFI_Rets rets;
XLA_FFI_Attrs attrs;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_CallFrame, attrs);
typedef XLA_FFI_Error* XLA_FFI_Handler(XLA_FFI_CallFrame* call_frame);
struct XLA_FFI_Handler_Bundle {
XLA_FFI_Handler* prepare;
XLA_FFI_Handler* initialize;
XLA_FFI_Handler* execute;
};
enum XLA_FFI_Handler_TraitsBits {
XLA_FFI_HANDLER_TRAITS_COMMAND_BUFFER_COMPATIBLE = 1u << 0,
};
typedef uint32_t XLA_FFI_Handler_Traits;
struct XLA_FFI_Handler_Register_Args {
size_t struct_size;
void* priv;
XLA_FFI_ByteSpan name;
XLA_FFI_ByteSpan platform;
XLA_FFI_Handler_Bundle bundle;
XLA_FFI_Handler_Traits traits;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Handler_Register_Args, traits);
typedef XLA_FFI_Error* XLA_FFI_Handler_Register(
XLA_FFI_Handler_Register_Args* args);
struct XLA_FFI_TypeId_Register_Args {
size_t struct_size;
void* priv;
XLA_FFI_ByteSpan name;
XLA_FFI_TypeId* type_id;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_TypeId_Register_Args, type_id);
typedef XLA_FFI_Error* XLA_FFI_TypeId_Register(
XLA_FFI_TypeId_Register_Args* args);
struct XLA_FFI_ExecutionContext_Get_Args {
size_t struct_size;
void* priv;
XLA_FFI_ExecutionContext* ctx;
XLA_FFI_TypeId* type_id;
void* data;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_ExecutionContext_Get_Args, data);
typedef XLA_FFI_Error* XLA_FFI_ExecutionContext_Get(
XLA_FFI_ExecutionContext_Get_Args* args);
struct XLA_FFI_Stream_Get_Args {
size_t struct_size;
void* priv;
XLA_FFI_ExecutionContext* ctx;
void* stream;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Stream_Get_Args, stream);
typedef XLA_FFI_Error* XLA_FFI_Stream_Get(XLA_FFI_Stream_Get_Args* args);
struct XLA_FFI_DeviceMemory_Allocate_Args {
size_t struct_size;
void* priv;
XLA_FFI_ExecutionContext* ctx;
size_t size;
size_t alignment;
void* data;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_DeviceMemory_Allocate_Args, data);
typedef XLA_FFI_Error* XLA_FFI_DeviceMemory_Allocate(
XLA_FFI_DeviceMemory_Allocate_Args* args);
struct XLA_FFI_DeviceMemory_Free_Args {
size_t struct_size;
void* priv;
XLA_FFI_ExecutionContext* ctx;
size_t size;
void* data;
};
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_DeviceMemory_Free_Args, data);
typedef XLA_FFI_Error* XLA_FFI_DeviceMemory_Free(
XLA_FFI_DeviceMemory_Free_Args* args);
#define _XLA_FFI_API_STRUCT_FIELD(fn_type) fn_type* fn_type
struct XLA_FFI_Api {
size_t struct_size;
void* priv;
XLA_FFI_InternalApi* internal_api;
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Error_Create);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Error_GetMessage);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Error_Destroy);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Handler_Register);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_Stream_Get);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_TypeId_Register);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_ExecutionContext_Get);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_DeviceMemory_Allocate);
_XLA_FFI_API_STRUCT_FIELD(XLA_FFI_DeviceMemory_Free);
};
#undef _XLA_FFI_API_STRUCT_FIELD
XLA_FFI_DEFINE_STRUCT_TRAITS(XLA_FFI_Api, XLA_FFI_Stream_Get);
const XLA_FFI_Api* XLA_FFI_GetApi();
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/c/eager/c_api.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "xla/tsl/c/tsl_status_internal.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/custom_device.h"
#include "tensorflow/core/common_runtime/eager/custom_device_op_handler.h"
#include "tensorflow/core/common_runtime/eager/execute.h"
#include "tensorflow/core/common_runtime/eager/placement_utils.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/version.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/common_runtime/eager/context_distributed_manager.h"
#endif
using tensorflow::string;
namespace {
string DeviceName(const tensorflow::Device* d) {
return (d == nullptr) ? "cpu:0" : d->name();
}
void AnnotateEagerRuntimeConstructionContext(
tensorflow::FunctionDef& function_def) {
tensorflow::AttrValue value;
SetAttrValue("kEagerRuntime", &value);
(*function_def.mutable_attr())["_construction_context"] = value;
}
}
extern "C" {
TFE_ContextOptions* TFE_NewContextOptions() { return new TFE_ContextOptions; }
void TFE_ContextOptionsSetConfig(TFE_ContextOptions* options, const void* proto,
size_t proto_len, TF_Status* status) {
TF_SetConfig(&options->session_options, proto, proto_len, status);
}
void TFE_ContextOptionsSetAsync(TFE_ContextOptions* options,
unsigned char enable) {
options->async = enable;
}
void TFE_ContextOptionsSetDevicePlacementPolicy(
TFE_ContextOptions* options, TFE_ContextDevicePlacementPolicy policy) {
options->device_placement_policy = policy;
}
void TFE_DeleteContextOptions(TFE_ContextOptions* options) { delete options; }
TFE_Context* TFE_NewContext(const TFE_ContextOptions* opts, TF_Status* status) {
if (opts->use_tfrt) {
status->status = tensorflow::errors::Unimplemented("TFRT is not supported");
return nullptr;
}
std::vector<std::unique_ptr<tensorflow::Device>> devices;
status->status = tensorflow::DeviceFactory::AddDevices(
opts->session_options.options, "/job:localhost/replica:0/task:0",
&devices);
if (!status->status.ok()) return nullptr;
std::unique_ptr<tensorflow::DeviceMgr> device_mgr(
new tensorflow::DynamicDeviceMgr(std::move(devices)));
auto r = tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
opts->session_options.options,
static_cast<tensorflow::ContextDevicePlacementPolicy>(
opts->device_placement_policy),
opts->async, device_mgr.release(),
true, std::move(r),
nullptr,
nullptr,
opts->run_eager_op_as_function,
opts->jit_compile_rewrite);
#if !defined(IS_MOBILE_PLATFORM)
eager_context->SetDistributedManager(
std::make_unique<tensorflow::EagerContextDistributedManager>(
eager_context));
#endif
return tensorflow::wrap(eager_context);
}
void TFE_DeleteContext(TFE_Context* ctx) {
if (ctx == nullptr) {
return;
}
tensorflow::unwrap(ctx)->Release();
}
TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx, TF_Status* status) {
TF_DeviceList* l = new TF_DeviceList;
tensorflow::unwrap(ctx)->ListDevices(&l->response);
return l;
}
void TFE_ContextClearCaches(TFE_Context* ctx) {
tensorflow::unwrap(ctx)->ClearCachesAndThreadExecutors();
}
TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status) {
TFE_ContextSetServerDefWithTimeoutAndRetries(
ctx, keep_alive_secs, proto, proto_len, 0,
0, status, false);
}
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status,
bool clear_existing_contexts) {
TFE_ContextSetServerDefWithTimeoutAndRetries(
ctx, keep_alive_secs, proto, proto_len, init_timeout_in_ms,
0, status, clear_existing_contexts);
}
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeoutAndRetries(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, int retries, TF_Status* status,
bool clear_existing_contexts) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::errors::Unimplemented(
"TFE_ContextSetServerDef not supported on mobile");
#else
tensorflow::ServerDef server_def;
if (!server_def.ParseFromArray(proto, proto_len)) {
status->status = tensorflow::errors::InvalidArgument(
"Invalid tensorflow.ServerDef protocol buffer");
return;
}
status->status =
tensorflow::unwrap(ctx)->GetDistributedManager()->SetOrUpdateServerDef(
server_def, true, keep_alive_secs,
init_timeout_in_ms, retries, clear_existing_contexts);
#endif
}
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status) {
TFE_ContextUpdateServerDefWithTimeout(ctx, keep_alive_secs, proto, proto_len,
0, status);
}
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::errors::Unimplemented(
"TFE_ContextUpdateServerDef not supported on mobile");
#else
tensorflow::ServerDef server_def;
tensorflow::EagerContext* context =
tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
if (!server_def.ParseFromArray(proto, proto_len)) {
status->status = tensorflow::errors::InvalidArgument(
"Invalid tensorflow.ServerDef protocol buffer");
return;
} else if (context->GetContextId() ==
tensorflow::EagerContext::kInvalidContextId) {
status->status = tensorflow::errors::InvalidArgument(
"Trying to update a context with invalid context id.");
}
status->status =
tensorflow::unwrap(ctx)->GetDistributedManager()->SetOrUpdateServerDef(
server_def, false, keep_alive_secs,
init_timeout_in_ms, 0);
#endif
}
TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
const char* worker_name,
TF_Status* status) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::errors::Unimplemented(
"TFE_ContextSetServerDef not supported on mobile");
return false;
#else
bool is_alive;
status->status =
tensorflow::unwrap(ctx)->GetDistributedManager()->CheckRemoteAlive(
worker_name, &is_alive);
return is_alive;
#endif
}
TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
TF_Status* status) {
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::OkStatus();
#else
status->status = tensorflow::unwrap(ctx)->AsyncWait();
#endif
}
void TFE_ContextSetThreadLocalDevicePlacementPolicy(
TFE_Context* ctx, TFE_ContextDevicePlacementPolicy policy) {
tensorflow::unwrap(ctx)->SetThreadLocalDevicePlacementPolicy(
static_cast<tensorflow::ContextDevicePlacementPolicy>(policy));
}
extern TFE_ContextDevicePlacementPolicy TFE_ContextGetDevicePlacementPolicy(
TFE_Context* ctx) {
return static_cast<TFE_ContextDevicePlacementPolicy>(
tensorflow::unwrap(ctx)->GetDevicePlacementPolicy());
}
TFE_TensorHandle* TFE_NewTensorHandle(const TF_Tensor* t, TF_Status* status) {
tensorflow::Tensor tensor;
status->status = tensorflow::TF_TensorToTensor(t, &tensor);
if (!status->status.ok()) return nullptr;
return tensorflow::wrap(tensorflow::TensorHandle::CreateLocalHandle(tensor));
}
void TFE_DeleteTensorHandle(TFE_TensorHandle* h) {
if (h == nullptr) return;
tsl::profiler::TraceMe activity("TFE_DeleteTensorHandle",
tsl::profiler::TraceMeLevel::kInfo);
if (h) {
tensorflow::unwrap(h)->Unref();
}
}
TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h) {
return static_cast<TF_DataType>(tensorflow::unwrap(h)->DataType());
}
int TFE_TensorHandleNumDims(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return -1;
}
int num_dims = -1;
status->status = tensorflow::unwrap(h)->NumDims(&num_dims);
return num_dims;
}
int64_t TFE_TensorHandleNumElements(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return -1;
}
int64_t num_elements = -1;
status->status = tensorflow::unwrap(h)->NumElements(&num_elements);
return num_elements;
}
int64_t TFE_TensorHandleDim(TFE_TensorHandle* h, int dim_index,
TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return -1;
}
int64_t dim = -1;
status->status = tensorflow::unwrap(h)->Dim(dim_index, &dim);
return dim;
}
const char* TFE_TensorHandleDeviceName(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
return tensorflow::unwrap(h)->DeviceName(&status->status);
}
const char* TFE_TensorHandleBackingDeviceName(TFE_TensorHandle* h,
TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
return tensorflow::unwrap(h)->BackingDeviceName(&status->status);
}
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopySharingTensor(
TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::unwrap(h)->Ref();
return h;
}
TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::AbstractTensorInterface* t =
tensorflow::unwrap(h)->Resolve(&status->status);
if (t == nullptr) {
return nullptr;
}
return new TF_Tensor{t};
}
void* TFE_TensorHandleDevicePointer(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::ImmediateExecutionTensorHandle* unwrapped_handle =
tensorflow::unwrap(h);
if (tensorflow::CustomDeviceTensorHandle::classof(unwrapped_handle)) {
return tensorflow::down_cast<tensorflow::CustomDeviceTensorHandle*>(
unwrapped_handle)
->DevicePointer();
}
if (!tensorflow::TensorHandle::classof(unwrapped_handle)) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::TensorHandle* handle =
tensorflow::TensorHandleFromInterface(unwrapped_handle);
if (handle->Type() != tensorflow::TensorHandle::LOCAL) {
status->status = tensorflow::errors::InvalidArgument(
"TFE_TensorHandleDevicePointer may not be called on a ",
handle->TypeString(), " tensor handle.");
return nullptr;
}
tensorflow::Device* device(handle->device());
if (device != nullptr) {
status->status = device->Sync();
if (!status->status.ok()) {
return nullptr;
}
}
const tensorflow::Tensor* tensor;
status->status = handle->Tensor(&tensor);
if (!status->status.ok()) {
return nullptr;
}
return const_cast<void*>(
static_cast<const void*>(tensor->tensor_data().data()));
}
namespace tensorflow {
nam | #include "tensorflow/c/eager/c_api.h"
#include <string.h>
#include <memory>
#include <string>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/platform.h"
#include "absl/strings/match.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/tfe_op_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
using tensorflow::string;
namespace {
void BM_InitOp(::testing::benchmark::State& state) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
for (auto s : state) {
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_DeleteOp(matmul);
}
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_InitOp);
void BM_Execute(::testing::benchmark::State& state) {
const int async = state.range(0);
state.SetLabel(async ? "ExecuteAsync" : "Execute");
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = TFE_NewOp(ctx, "MatMul", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
for (auto s : state) {
TFE_OpReset(matmul, "MatMul", nullptr, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(matmul, m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(matmul, m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (state.iterations() >= state.max_iterations && async) {
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
}
}
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_Execute)->Arg(0)->Arg(1);
void BM_Execute_Identity(::testing::benchmark::State& state) {
const int async = state.range(0);
state.SetLabel(async ? "ExecuteIdentityAsync" : "ExecuteIdentity");
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* identity = TFE_NewOp(ctx, "Identity", status);
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
for (auto s : state) {
TFE_OpReset(identity, "Identity", nullptr, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpAddInput(identity, m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(identity, &retvals[0], &num_retvals, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (state.iterations() >= state.max_iterations && async) {
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
}
}
TFE_DeleteOp(identity);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
BENCHMARK(BM_Execute_Identity)->Arg(0)->Arg(1);
TEST(CAPI, Context) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
TFE_DeleteContextOptions(opts);
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const int num_devices = TF_DeviceListCount(devices);
EXPECT_GE(num_devices, 1) << "At least one CPU device should exist";
for (int i = 0; i < num_devices; ++i) {
EXPECT_NE("", TF_DeviceListName(devices, i, status)) << i;
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
}
TF_DeleteDeviceList(devices);
TF_DeleteStatus(status);
}
TEST(CAPI, TensorHandle) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
CHECK_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* h = TestMatrixTensorHandle(ctx);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(h));
TF_Tensor* t = TFE_TensorHandleResolve(h, status.get());
ASSERT_EQ(16, TF_TensorByteSize(t));
float data[4] = {0};
memcpy(&data[0], TF_TensorData(t), TF_TensorByteSize(t));
EXPECT_EQ(1.0, data[0]);
EXPECT_EQ(2.0, data[1]);
EXPECT_EQ(3.0, data[2]);
EXPECT_EQ(4.0, data[3]);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(h);
TFE_DeleteContext(ctx);
}
void TensorHandleCopyBetweenDevices(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
const int num_devices = TF_DeviceListCount(devices);
const char* kCPUDevice = "CPU:0";
for (int i = 0; i < num_devices; ++i) {
const string name(TF_DeviceListName(devices, i, status.get()));
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << i << " -- " << TF_Message(status.get());
continue;
}
auto tag = tensorflow::strings::StrCat("Device #", i, " (", name, ")");
TFE_TensorHandle* hdevice =
TFE_TensorHandleCopyToDevice(hcpu, ctx, name.c_str(), status.get());
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
continue;
}
TFE_TensorHandle* hdevice2 =
TFE_TensorHandleCopyToDevice(hdevice, ctx, name.c_str(), status.get());
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
continue;
}
TFE_DeleteTensorHandle(hdevice);
TFE_TensorHandle* hcopy =
TFE_TensorHandleCopyToDevice(hdevice2, ctx, kCPUDevice, status.get());
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
continue;
}
TFE_DeleteTensorHandle(hdevice2);
TF_Tensor* tcopy = TFE_TensorHandleResolve(hcopy, status.get());
TFE_DeleteTensorHandle(hcopy);
if (TF_GetCode(status.get()) != TF_OK) {
ADD_FAILURE() << tag;
continue;
}
EXPECT_EQ(TF_TensorByteSize(t), TF_TensorByteSize(tcopy)) << tag;
EXPECT_EQ(
0, memcmp(TF_TensorData(t), TF_TensorData(tcopy), TF_TensorByteSize(t)))
<< tag;
TF_DeleteTensor(tcopy);
}
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenDevices) {
TensorHandleCopyBetweenDevices(false);
}
TEST(CAPI, TensorHandleCopyBetweenDevicesAsync) {
TensorHandleCopyBetweenDevices(true);
}
void TensorHandleCopyBetweenDevicesError(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
const char* kErrorDevice = "NoSuchDevice:0";
TFE_TensorHandle* hdevice =
TFE_TensorHandleCopyToDevice(hcpu, ctx, kErrorDevice, status.get());
EXPECT_NE(TF_OK, TF_GetCode(status.get()));
const char* msg = "NoSuchDevice:0 unknown device";
EXPECT_TRUE(strstr(TF_Message(status.get()), msg) != nullptr)
<< TF_Message(status.get());
TF_SetStatus(status.get(), TF_OK, "");
const char* kCPUDevice = "CPU:0";
TFE_TensorHandle* hcopy =
TFE_TensorHandleCopyToDevice(hcpu, ctx, kCPUDevice, status.get());
EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteTensorHandle(hcopy);
TFE_DeleteTensorHandle(hcpu);
if (hdevice != nullptr) TFE_DeleteTensorHandle(hdevice);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenDevicesError) {
TensorHandleCopyBetweenDevicesError(false);
}
TEST(CAPI, TensorHandleCopyBetweenDevicesErrorAsync) {
TensorHandleCopyBetweenDevicesError(true);
}
void TensorHandleCopyBetweenTwoGPUDevices(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
const int num_devices = TF_DeviceListCount(devices);
bool has_gpu0 = false;
bool has_gpu1 = false;
for (int i = 0; i < num_devices; ++i) {
const char* dev = TF_DeviceListName(devices, i, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
string device_name(dev);
if (device_name.find("GPU:0") != string::npos) {
has_gpu0 = true;
}
if (device_name.find("GPU:1") != string::npos) {
has_gpu1 = true;
}
}
const char* kCPUDevice = "CPU:0";
if (!has_gpu0 || !has_gpu1) {
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_DeleteContext(ctx);
return;
}
const string gpu_1_name(TF_DeviceListName(devices, 1, status.get()));
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
const string gpu_2_name(TF_DeviceListName(devices, 2, status.get()));
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_TensorHandle* hdevice =
TFE_TensorHandleCopyToDevice(hcpu, ctx, gpu_1_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_TensorHandle* hdevice2 = TFE_TensorHandleCopyToDevice(
hdevice, ctx, gpu_2_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_DeleteTensorHandle(hdevice);
TFE_TensorHandle* hcopy =
TFE_TensorHandleCopyToDevice(hdevice2, ctx, kCPUDevice, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
TFE_DeleteTensorHandle(hdevice2);
TF_Tensor* tcopy = TFE_TensorHandleResolve(hcopy, status.get());
TFE_DeleteTensorHandle(hcopy);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
EXPECT_EQ(TF_TensorByteSize(t), TF_TensorByteSize(tcopy));
EXPECT_EQ(
0, memcmp(TF_TensorData(t), TF_TensorData(tcopy), TF_TensorByteSize(t)));
TF_DeleteTensor(tcopy);
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenTwoGPUDevices) {
TensorHandleCopyBetweenTwoGPUDevices(false);
}
TEST(CAPI, TensorHandleCopyBetweenTwoGPUDevicesAsync) {
TensorHandleCopyBetweenTwoGPUDevices(true);
}
void TensorHandleSilentCopy(bool async,
TFE_ContextDevicePlacementPolicy global_policy,
TFE_ContextDevicePlacementPolicy thread_policy,
bool cpu_op) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_ContextOptionsSetDevicePlacementPolicy(opts, global_policy);
TFE_Context* ctx = TFE_NewContext(opts, status.get());
if (thread_policy != global_policy) {
TFE_ContextSetThreadLocalDevicePlacementPolicy(ctx, thread_policy);
}
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
hcpu, ctx, gpu_device_name.c_str(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
auto cpu_arg =
tensorflow::TensorHandleFromInterface(tensorflow::unwrap(hcpu));
auto gpu_arg =
tensorflow::TensorHandleFromInterface(tensorflow::unwrap(hgpu));
auto gpu_device = gpu_arg->device();
ASSERT_FALSE(cpu_arg->HasLocalMirror(gpu_device));
TFE_Op* matmul = MatMulOp(ctx, hcpu, hgpu);
if (cpu_op) {
string cpu_device_name;
ASSERT_TRUE(GetDeviceName(ctx, &cpu_device_name, "CPU"));
TFE_OpSetDevice(matmul, cpu_device_name.c_str(), status.get());
} else {
TFE_OpSetDevice(matmul, gpu_device_name.c_str(), status.get());
}
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(matmul, &retvals[0], &num_retvals, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ASSERT_TRUE(cpu_arg->HasLocalMirror(gpu_device));
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(hgpu);
}
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleSilentCopy) {
TensorHandleSilentCopy(false, TFE_DEVICE_PLACEMENT_SILENT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
TEST(CAPI, TensorHandleSilentCopyAsync) {
TensorHandleSilentCopy(true, TFE_DEVICE_PLACEMENT_SILENT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
TEST(CAPI, TensorHandleSilentCopyLocalPolicy) {
TensorHandleSilentCopy(false, TFE_DEVICE_PLACEMENT_EXPLICIT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
TEST(CAPI, TensorHandleSilentCopyLocalPolicyAsync) {
TensorHandleSilentCopy(true, TFE_DEVICE_PLACEMENT_EXPLICIT,
TFE_DEVICE_PLACEMENT_SILENT, false);
}
void SetAndGetOpDevices(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_OpSetDevice(matmul, "GPU:0", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
const char* device_name = TFE_OpGetDevice(matmul, status);
ASSERT_TRUE(strstr(device_name, "GPU:0") != nullptr);
TFE_OpSetDevice(matmul, "CPU:0", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
device_name = TFE_OpGetDevice(matmul, status);
ASSERT_TRUE(strstr(device_name, "CPU:0") != nullptr);
}
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
TEST(CAPI, TensorHandleNullptr) {
TFE_TensorHandle* h = nullptr;
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TF_Tensor* t = TFE_TensorHandleResolve(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(t, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
const char* device_name = TFE_TensorHandleDeviceName(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_name, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
device_name = TFE_TensorHandleBackingDeviceName(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_name, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
int num_dims = TFE_TensorHandleNumDims(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(num_dims, -1);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
int dim = TFE_TensorHandleDim(h, 0, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(dim, -1);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
}
TEST(CAPI, TensorHandleDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
const char* device_name = TFE_TensorHandleDeviceName(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_name, "CPU:0")) << device_name;
const char* backing_device_name =
TFE_TensorHandleBackingDeviceName(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(backing_device_name, "CPU:0"))
<< backing_device_name;
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
hcpu, ctx, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_Op* shape_op = ShapeOp(ctx, hgpu);
TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
device_name = TFE_TensorHandleDeviceName(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_name, "GPU:0")) << device_name;
backing_device_name =
TFE_TensorHandleBackingDeviceName(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(backing_device_name, "CPU:0"))
<< backing_device_name;
TFE_DeleteOp(shape_op);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(hgpu);
}
TFE_DeleteTensorHandle(hcpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
void ExecuteAdd(bool async, bool forward_input, bool tfrt) {
#ifdef PLATFORM_WINDOWS
return;
#else
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetTfrt(opts, tfrt);
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* n = TestMatrixTensorHandle100x100(ctx);
std::string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* n_gpu =
TFE_TensorHandleCopyToDevice(n, ctx, gpu_device_name.c_str(), status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(n);
n = n_gpu;
}
TFE_TensorHandle* m = TestMatrixTensorHandle100x100(ctx);
TF_Tensor* orig = TFE_TensorHandleResolve(n, status);
void* orig_ptr = TF_TensorData(orig);
TF_DeleteTensor(orig);
TFE_Op* add_op = AddOp(ctx, n, m);
std::string cpu_device_name;
ASSERT_TRUE(GetDeviceName(ctx, &cpu_device_name, "CPU"));
TFE_OpSetDevice(add_op, cpu_device_name.c_str(), status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (forward_input) {
TFE_DeleteTensorHandle(n);
}
int num_retvals = 1;
if (async) {
for (int i = 0; i < 100000; ++i) {
TFE_Op* add_op_dummy = AddOp(ctx, m, m);
TFE_OpSetDevice(add_op_dummy, cpu_device_name.c_str(), status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* dummy = nullptr;
TFE_Execute(add_op_dummy, &dummy, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(dummy);
TFE_DeleteOp(add_op_dummy);
}
}
TFE_TensorHandle* retval = nullptr;
TFE_Execute(add_op, &retval, &num_retvals, status);
EXPECT_EQ(1, num_retvals);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
if (!forward_input) {
TFE_DeleteTensorHandle(n);
}
TFE_DeleteOp(add_op);
TF_Tensor* t = TFE_TensorHandleResolve(retval, status);
if (async) {
if (forward_input) {
EXPECT_EQ(orig_ptr, TF_TensorData(t));
} else {
EXPECT_EQ(orig_ptr, TF_TensorData(t));
}
} else {
if (forward_input) {
EXPECT_EQ(orig_ptr, TF_TensorData(t));
} else {
EXPECT_NE(orig_ptr, TF_TensorData(t));
}
}
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(m);
TFE_DeleteTensorHandle(retval);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float result[100 * 100] = {0};
EXPECT_EQ(sizeof(result), TF_TensorByteSize(t));
memcpy(&result[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
for (int i = 0; i < 100 * 100; ++i) {
EXPECT_EQ(2.0f, result[i]);
}
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
#endif
}
TEST(CAPI, ExecuteAdd) {
ExecuteAdd(
false,
false,
false);
}
TEST(CAPI, DISABLED_ExecuteAddAsync) {
ExecuteAdd(
true,
false,
false);
}
TEST(CAPI, ExecuteAddForward) {
ExecuteAdd(
false,
true,
false);
}
TEST(CAPI, ExecuteAddForwardAsync) {
ExecuteAdd(
true,
true,
false);
}
#ifdef PLATFORM_GOOGLE
TEST(CAPI, DISABLED_ExecuteAddTfrt) {
ExecuteAdd(
false,
false,
true);
}
#endif
void Execute_MatMul_CPU(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_TensorHandle* retvals[2] = {nullptr, nullptr};
int num_retvals = 2;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
EXPECT_EQ(1, num_retvals);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(7, product[0]);
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
TF_DeleteStatus(status);
}
TEST(CAPI, Execute_MatMul_CPU) { Execute_MatMul_CPU(false); }
TEST(CAPI, Execute_MatMul_CPUAsync) { Execute_MatMul_CPU(true); }
void Execute_MatMul_CPU_Runtime_Error(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m1 = TestMatrixTensorHandle(ctx);
TFE_TensorHandle* m2 = DoubleTestMatrixTensorHandle3X2(ctx);
TFE_Op* matmul = MatMulOp(ctx, m1, m2);
TFE_OpSetDevice(matmul, "/job:localhost/replica:0/task:0/device:CPU:0",
status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Op* matmul2 = MatMulOp(ctx, m1, m1);
TFE_OpSetDevice(matmul2, "/job:localhost/replica:0/task:0/device:CPU:0",
status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* retvals[1] = {nullptr};
int num_retvals = 1;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
TFE_DeleteOp(matmul);
if (!async) {
EXPECT_NE(TF_OK, TF_GetCode(status));
} else {
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
EXPECT_NE(TF_OK, TF_GetCode(status));
EXPECT_EQ(nullptr, t);
const char* msg = "Matrix size-incompatible: In[0]: [2,2], In[1]: [3,2]";
EXPECT_TRUE(strstr(TF_Message(status), msg) != nullptr)
<< TF_Message(status);
TF_SetStatus(status, TF_OK, "");
TFE_DeleteTensorHandle(retvals[0]);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
EXPECT_NE(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
retvals[0] = nullptr;
TFE_Execute(matmul2, &retvals[0], &num_retvals, status);
EXPECT_NE(TF_OK, TF_GetCode(status));
TFE_ExecutorClearError(executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
}
TF_SetStatus(status, TF_OK, "");
if (retvals[0] != nullptr) {
TFE_DeleteTensorHandle(retvals[0]);
}
retvals[0] = nullptr;
TFE_Execute(matmul2, &retvals[0], &num_retvals, status);
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
EXPECT_EQ(TF_ |
950 | cpp | tensorflow/tensorflow | c_api_experimental | tensorflow/lite/core/c/c_api_experimental.cc | tensorflow/lite/core/c/c_api_experimental_test.cc | #ifndef TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_H_
#define TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_H_
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/eager/c_api.h"
#ifdef __cplusplus
extern "C" {
#endif
TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset,
const char* op_or_function_name,
const char* raw_device_name,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextEnableGraphCollection(TFE_Context* ctx);
TF_CAPI_EXPORT extern void TFE_ContextDisableGraphCollection(TFE_Context* ctx);
typedef struct TFE_MonitoringCounterCell TFE_MonitoringCounterCell;
TF_CAPI_EXPORT extern void TFE_MonitoringCounterCellIncrementBy(
TFE_MonitoringCounterCell* cell, int64_t value);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringCounterCellValue(
TFE_MonitoringCounterCell* cell);
typedef struct TFE_MonitoringCounter0 TFE_MonitoringCounter0;
TF_CAPI_EXPORT extern TFE_MonitoringCounter0* TFE_MonitoringNewCounter0(
const char* name, TF_Status* status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter0(
TFE_MonitoringCounter0* counter);
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter0(
TFE_MonitoringCounter0* counter);
typedef struct TFE_MonitoringCounter1 TFE_MonitoringCounter1;
TF_CAPI_EXPORT extern TFE_MonitoringCounter1* TFE_MonitoringNewCounter1(
const char* name, TF_Status* status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter1(
TFE_MonitoringCounter1* counter);
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter1(
TFE_MonitoringCounter1* counter, const char* label1);
typedef struct TFE_MonitoringCounter2 TFE_MonitoringCounter2;
TF_CAPI_EXPORT extern TFE_MonitoringCounter2* TFE_MonitoringNewCounter2(
const char* name, TF_Status* status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteCounter2(
TFE_MonitoringCounter2* counter);
TF_CAPI_EXPORT extern TFE_MonitoringCounterCell* TFE_MonitoringGetCellCounter2(
TFE_MonitoringCounter2* counter, const char* label1, const char* label2);
typedef struct TFE_MonitoringIntGaugeCell TFE_MonitoringIntGaugeCell;
TF_CAPI_EXPORT extern void TFE_MonitoringIntGaugeCellSet(
TFE_MonitoringIntGaugeCell* cell, int64_t value);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringIntGaugeCellValue(
TFE_MonitoringIntGaugeCell* cell);
typedef struct TFE_MonitoringIntGauge0 TFE_MonitoringIntGauge0;
TF_CAPI_EXPORT extern TFE_MonitoringIntGauge0* TFE_MonitoringNewIntGauge0(
const char* name, TF_Status* out_status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge0(
TFE_MonitoringIntGauge0* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
TFE_MonitoringGetCellIntGauge0(TFE_MonitoringIntGauge0* gauge);
typedef struct TFE_MonitoringIntGauge1 TFE_MonitoringIntGauge1;
TF_CAPI_EXPORT extern TFE_MonitoringIntGauge1* TFE_MonitoringNewIntGauge1(
const char* name, TF_Status* out_status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge1(
TFE_MonitoringIntGauge1* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
TFE_MonitoringGetCellIntGauge1(TFE_MonitoringIntGauge1* gauge,
const char* label1);
typedef struct TFE_MonitoringIntGauge2 TFE_MonitoringIntGauge2;
TF_CAPI_EXPORT extern TFE_MonitoringIntGauge2* TFE_MonitoringNewIntGauge2(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteIntGauge2(
TFE_MonitoringIntGauge2* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringIntGaugeCell*
TFE_MonitoringGetCellIntGauge2(TFE_MonitoringIntGauge2* gauge,
const char* label1, const char* label2);
typedef struct TFE_MonitoringStringGaugeCell TFE_MonitoringStringGaugeCell;
TF_CAPI_EXPORT extern void TFE_MonitoringStringGaugeCellSet(
TFE_MonitoringStringGaugeCell* cell, const char* value);
TF_CAPI_EXPORT extern const void TFE_MonitoringStringGaugeCellValue(
TFE_MonitoringStringGaugeCell* cell, TF_Buffer* buf);
typedef struct TFE_MonitoringStringGauge0 TFE_MonitoringStringGauge0;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge0* TFE_MonitoringNewStringGauge0(
const char* name, TF_Status* out_status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge0(
TFE_MonitoringStringGauge0* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge0(TFE_MonitoringStringGauge0* gauge);
typedef struct TFE_MonitoringStringGauge1 TFE_MonitoringStringGauge1;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge1* TFE_MonitoringNewStringGauge1(
const char* name, TF_Status* out_status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge1(
TFE_MonitoringStringGauge1* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge1(TFE_MonitoringStringGauge1* gauge,
const char* label1);
typedef struct TFE_MonitoringStringGauge2 TFE_MonitoringStringGauge2;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge2* TFE_MonitoringNewStringGauge2(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge2(
TFE_MonitoringStringGauge2* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge2(TFE_MonitoringStringGauge2* gauge,
const char* label1, const char* label2);
typedef struct TFE_MonitoringStringGauge3 TFE_MonitoringStringGauge3;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge3* TFE_MonitoringNewStringGauge3(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2, const char* label3);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge3(
TFE_MonitoringStringGauge3* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge3(TFE_MonitoringStringGauge3* gauge,
const char* label1, const char* label2,
const char* label3);
typedef struct TFE_MonitoringStringGauge4 TFE_MonitoringStringGauge4;
TF_CAPI_EXPORT extern TFE_MonitoringStringGauge4* TFE_MonitoringNewStringGauge4(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2, const char* label3,
const char* label4);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteStringGauge4(
TFE_MonitoringStringGauge4* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringStringGaugeCell*
TFE_MonitoringGetCellStringGauge4(TFE_MonitoringStringGauge4* gauge,
const char* label1, const char* label2,
const char* label3, const char* label4);
typedef struct TFE_MonitoringBoolGaugeCell TFE_MonitoringBoolGaugeCell;
TF_CAPI_EXPORT extern void TFE_MonitoringBoolGaugeCellSet(
TFE_MonitoringBoolGaugeCell* cell, bool value);
TF_CAPI_EXPORT extern bool TFE_MonitoringBoolGaugeCellValue(
TFE_MonitoringBoolGaugeCell* cell);
typedef struct TFE_MonitoringBoolGauge0 TFE_MonitoringBoolGauge0;
TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge0* TFE_MonitoringNewBoolGauge0(
const char* name, TF_Status* out_status, const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge0(
TFE_MonitoringBoolGauge0* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
TFE_MonitoringGetCellBoolGauge0(TFE_MonitoringBoolGauge0* gauge);
typedef struct TFE_MonitoringBoolGauge1 TFE_MonitoringBoolGauge1;
TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge1* TFE_MonitoringNewBoolGauge1(
const char* name, TF_Status* out_status, const char* description,
const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge1(
TFE_MonitoringBoolGauge1* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
TFE_MonitoringGetCellBoolGauge1(TFE_MonitoringBoolGauge1* gauge,
const char* label1);
typedef struct TFE_MonitoringBoolGauge2 TFE_MonitoringBoolGauge2;
TF_CAPI_EXPORT extern TFE_MonitoringBoolGauge2* TFE_MonitoringNewBoolGauge2(
const char* name, TF_Status* out_status, const char* description,
const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBoolGauge2(
TFE_MonitoringBoolGauge2* gauge);
TF_CAPI_EXPORT extern TFE_MonitoringBoolGaugeCell*
TFE_MonitoringGetCellBoolGauge2(TFE_MonitoringBoolGauge2* gauge,
const char* label1, const char* label2);
typedef struct TFE_MonitoringSamplerCell TFE_MonitoringSamplerCell;
TF_CAPI_EXPORT extern void TFE_MonitoringSamplerCellAdd(
TFE_MonitoringSamplerCell* cell, double value);
TF_CAPI_EXPORT extern void TFE_MonitoringSamplerCellValue(
TFE_MonitoringSamplerCell* cell, TF_Buffer* buf);
typedef struct TFE_MonitoringBuckets TFE_MonitoringBuckets;
TF_CAPI_EXPORT extern TFE_MonitoringBuckets*
TFE_MonitoringNewExponentialBuckets(double scale, double growth_factor,
int bucket_count);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteBuckets(
TFE_MonitoringBuckets* buckets);
typedef struct TFE_MonitoringSampler0 TFE_MonitoringSampler0;
TF_CAPI_EXPORT extern TFE_MonitoringSampler0* TFE_MonitoringNewSampler0(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
const char* description);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler0(
TFE_MonitoringSampler0* sampler);
TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler0(
TFE_MonitoringSampler0* sampler);
typedef struct TFE_MonitoringSampler1 TFE_MonitoringSampler1;
TF_CAPI_EXPORT extern TFE_MonitoringSampler1* TFE_MonitoringNewSampler1(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
const char* description, const char* label1);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler1(
TFE_MonitoringSampler1* sampler);
TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler1(
TFE_MonitoringSampler1* sampler, const char* label1);
typedef struct TFE_MonitoringSampler2 TFE_MonitoringSampler2;
TF_CAPI_EXPORT extern TFE_MonitoringSampler2* TFE_MonitoringNewSampler2(
const char* name, TFE_MonitoringBuckets* buckets, TF_Status* out_status,
const char* description, const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_MonitoringDeleteSampler2(
TFE_MonitoringSampler2* sampler);
TF_CAPI_EXPORT extern TFE_MonitoringSamplerCell* TFE_MonitoringGetCellSampler2(
TFE_MonitoringSampler2* sampler, const char* label1, const char* label2);
TF_CAPI_EXPORT extern void TFE_ContextOptionsSetTfrt(TFE_ContextOptions*,
bool use_tfrt);
TF_CAPI_EXPORT extern uint64_t TFE_GetContextId(TFE_Context* ctx);
typedef struct TFE_CancellationManager TFE_CancellationManager;
typedef int64_t TFE_CancellationToken;
typedef struct TFE_CancelCallback {
void (*callback)(void* context);
void* context;
} TFE_CancelCallback;
TF_CAPI_EXPORT extern TFE_CancellationManager* TFE_NewCancellationManager();
TF_CAPI_EXPORT extern bool TFE_CancellationManagerIsCancelled(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerIsCancelling(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern void TFE_CancellationManagerStartCancel(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern TFE_CancellationToken TFE_CancellationManagerGetToken(
TFE_CancellationManager*);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerRegisterCallback(
TFE_CancellationManager*, TFE_CancellationToken token,
const TFE_CancelCallback* c_callback, const char* callback_name);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerDeregisterCallback(
TFE_CancellationManager*, TFE_CancellationToken token);
TF_CAPI_EXPORT extern bool TFE_CancellationManagerTryDeregisterCallback(
TFE_CancellationManager*, TFE_CancellationToken token);
TF_CAPI_EXPORT extern void TFE_DeleteCancellationManager(
TFE_CancellationManager*);
typedef struct TFE_CancellationManager TFE_CancellationManager;
TF_CAPI_EXPORT extern void TFE_OpSetCancellationManager(
TFE_Op* op, TFE_CancellationManager* cancellation_manager,
TF_Status* status);
typedef struct TFE_Executor TFE_Executor;
TF_CAPI_EXPORT extern TFE_Executor* TFE_NewExecutor(
bool is_async, bool enable_streaming_enqueue, int in_flight_nodes_limit);
TF_CAPI_EXPORT extern void TFE_DeleteExecutor(TFE_Executor*);
TF_CAPI_EXPORT extern bool TFE_ExecutorIsAsync(TFE_Executor*);
TF_CAPI_EXPORT extern void TFE_ExecutorWaitForAllPendingNodes(
TFE_Executor*, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ExecutorClearError(TFE_Executor*);
TF_CAPI_EXPORT extern void TFE_ContextSetExecutorForThread(TFE_Context*,
TFE_Executor*);
TF_CAPI_EXPORT extern TFE_Executor* TFE_ContextGetExecutorForThread(
TFE_Context*);
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDef(TFE_Context* ctx,
int keep_alive_secs,
const void* proto,
size_t proto_len,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextUpdateServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeout(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, TF_Status* status,
bool clear_existing_contexts);
TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeoutAndRetries(
TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len,
int64_t init_timeout_in_ms, int retries, TF_Status* status,
bool clear_existing_contexts);
TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
const char* worker_name,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
TF_Status* status);
TF_CAPI_EXPORT extern void* TFE_TensorHandleDevicePointer(TFE_TensorHandle*,
TF_Status*);
TF_CAPI_EXPORT extern size_t TFE_TensorHandleDeviceMemorySize(TFE_TensorHandle*,
TF_Status*);
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromDeviceMemory(
TFE_Context* ctx, const char* device_name, TF_DataType, const int64_t* dims,
int num_dims, void* data, size_t len,
void (*deallocator)(void* data, size_t len, void* arg),
void* deallocator_arg, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_HostAddressSpace(TFE_Context* ctx,
TF_Buffer* buf);
typedef struct TFE_OpAttrs TFE_OpAttrs;
TF_CAPI_EXPORT extern const TFE_OpAttrs* TFE_OpGetAttrs(const TFE_Op* op);
TF_CAPI_EXPORT extern void TFE_OpAddAttrs(TFE_Op* op, const TFE_OpAttrs* attrs);
TF_CAPI_EXPORT extern void TFE_OpAttrsSerialize(const TFE_OpAttrs* attrs,
TF_Buffer* buf,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_OpSetAttrValueProto(const TFE_Op* op,
const char* attr_name,
const void* proto,
size_t proto_len,
TF_Status* status);
#define TFE_CUSTOM_DEVICE_VERSION 4
typedef struct TFE_CustomDevice {
int version = TFE_CUSTOM_DEVICE_VERSION;
TFE_TensorHandle* (*copy_tensor_to_device)(TFE_Context* context,
TFE_TensorHandle* tensor,
TF_Status* status,
void* device_info);
TFE_TensorHandle* (*copy_tensor_from_device)(TFE_Context* context,
TFE_TensorHandle* tensor,
const char* target_device_name,
TF_Status* status,
void* device_info);
void (*execute)(const TFE_Op* op, int* num_outputs,
TFE_TensorHandle** outputs, TF_Status* s, void* device_info);
void (*delete_device)(void* device_info);
TFE_TensorHandle* (*pack)(TFE_Context* context, TFE_TensorHandle** handles,
int num_handles, TF_Status* s,
void* device_info) = nullptr;
bool (*shall_pin_to_this_device)(const TFE_Op* op, TF_Status* s) = nullptr;
} TFE_CustomDevice;
TF_CAPI_EXPORT extern void TFE_RegisterCustomDevice(TFE_Context* ctx,
TFE_CustomDevice device,
const char* device_name,
void* device_info,
TF_Status* status);
TF_CAPI_EXPORT extern bool TFE_IsCustomDevice(TFE_Context* ctx,
const char* device_name);
typedef struct TFE_CustomDeviceTensorHandleMethods {
int version = TFE_CUSTOM_DEVICE_VERSION;
int (*num_dims)(void* data, TF_Status* status);
int64_t (*dim)(void* data, int dim_index, TF_Status* status);
void (*deallocator)(void* data);
TF_Buffer* (*summarize)(void* data, TF_Status* status) = nullptr;
} TFE_CustomDeviceTensorHandle;
TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewCustomDeviceTensorHandle(
TFE_Context*, const char* device_name, TF_DataType, void* data,
TFE_CustomDeviceTensorHandle methods, TF_Status* status);
TF_CAPI_EXPORT extern void TFE_ContextGetFunctionDef(TFE_Context* ctx,
const char* function_name,
TF_Buffer* buf,
TF_Status* status);
TF_CAPI_EXPORT extern void TFE_Contex | #include "tensorflow/c/eager/c_api_experimental.h"
#include <string.h>
#include <string>
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h"
#include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/lib/monitoring/collection_registry.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
using tensorflow::string;
namespace tensorflow {
namespace {
static bool HasSubstr(absl::string_view base, absl::string_view substr) {
bool ok = absl::StrContains(base, substr);
EXPECT_TRUE(ok) << base << ", expected substring " << substr;
return ok;
}
TEST(CAPI, MonitoringCounter0) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0("test/counter", status, "description");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, 1);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 1);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/counter",
metrics->point_set_map.at("test/counter")->metric_name);
EXPECT_EQ(
1, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
TFE_MonitoringCounterCellIncrementBy(cell, 5);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell), 6);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(
6, metrics->point_set_map.at("test/counter")->points.at(0)->int64_value);
TFE_MonitoringDeleteCounter0(counter);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(metrics->point_set_map.end(),
metrics->point_set_map.find("test/counter"));
}
TEST(CAPI, MonitoringCounterMultiple) {
TF_Status* status = TF_NewStatus();
auto* counter1 = TFE_MonitoringNewCounter1("test/counter1", status,
"description", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellCounter1(counter1, "test");
TFE_MonitoringCounterCellIncrementBy(cell1, 1);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell1), 1);
auto* counter2 = TFE_MonitoringNewCounter2("test/counter2", status,
"description", "label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
auto* cell2 = TFE_MonitoringGetCellCounter2(counter2, "foo", "bar");
TFE_MonitoringCounterCellIncrementBy(cell2, 2);
EXPECT_EQ(TFE_MonitoringCounterCellValue(cell2), 2);
TFE_MonitoringDeleteCounter1(counter1);
TFE_MonitoringDeleteCounter2(counter2);
}
TEST(CAPI, MonitoringGauge0) {
TF_Status* status = TF_NewStatus();
auto* gauge = TFE_MonitoringNewIntGauge0("test/gauge", status, "test");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell = TFE_MonitoringGetCellIntGauge0(gauge);
TFE_MonitoringIntGaugeCellSet(cell, 1);
EXPECT_EQ(TFE_MonitoringIntGaugeCellValue(cell), 1);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/gauge", metrics->point_set_map.at("test/gauge")->metric_name);
EXPECT_EQ(1,
metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
TFE_MonitoringIntGaugeCellSet(cell, 5);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(5,
metrics->point_set_map.at("test/gauge")->points.at(0)->int64_value);
TFE_MonitoringDeleteIntGauge0(gauge);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringMultipleGauge) {
TF_Status* status = TF_NewStatus();
auto* gauge1 =
TFE_MonitoringNewBoolGauge1("test/gauge1", status, "test", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellBoolGauge1(gauge1, "foo");
TFE_MonitoringBoolGaugeCellSet(cell1, true);
EXPECT_TRUE(TFE_MonitoringBoolGaugeCellValue(cell1));
TFE_MonitoringDeleteBoolGauge1(gauge1);
auto* gauge2 = TFE_MonitoringNewStringGauge2("test/gauge2", status, "test",
"label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell2 = TFE_MonitoringGetCellStringGauge2(gauge2, "foo", "bar");
TFE_MonitoringStringGaugeCellSet(cell2, "str");
auto* buf = new TF_Buffer;
TFE_MonitoringStringGaugeCellValue(cell2, buf);
string data(static_cast<const char*>(buf->data), buf->length);
TF_DeleteBuffer(buf);
EXPECT_EQ(data, "str");
TFE_MonitoringDeleteStringGauge2(gauge2);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringSampler0) {
TF_Status* status = TF_NewStatus();
auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
auto* sampler =
TFE_MonitoringNewSampler0("test/sampler", buckets, status, "test");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell = TFE_MonitoringGetCellSampler0(sampler);
TFE_MonitoringSamplerCellAdd(cell, 1.0);
auto* collection_registry = monitoring::CollectionRegistry::Default();
monitoring::CollectionRegistry::CollectMetricsOptions options;
std::unique_ptr<monitoring::CollectedMetrics> metrics =
collection_registry->CollectMetrics(options);
EXPECT_EQ("test/sampler",
metrics->point_set_map.at("test/sampler")->metric_name);
EXPECT_EQ(1.0, metrics->point_set_map.at("test/sampler")
->points.at(0)
->histogram_value.sum());
TFE_MonitoringSamplerCellAdd(cell, 5.0);
metrics = collection_registry->CollectMetrics(options);
EXPECT_EQ(6.0, metrics->point_set_map.at("test/sampler")
->points.at(0)
->histogram_value.sum());
TFE_MonitoringDeleteBuckets(buckets);
TFE_MonitoringDeleteSampler0(sampler);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringMultipleSampler) {
TF_Status* status = TF_NewStatus();
auto* buckets = TFE_MonitoringNewExponentialBuckets(1.0, 2.0, 2);
auto* sampler1 = TFE_MonitoringNewSampler1("test/sampler1", buckets, status,
"test", "label1");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell1 = TFE_MonitoringGetCellSampler1(sampler1, "foo");
TFE_MonitoringSamplerCellAdd(cell1, 1.0);
TFE_MonitoringSamplerCellAdd(cell1, 2.0);
TF_Buffer* result1 = TF_NewBuffer();
TFE_MonitoringSamplerCellValue(cell1, result1);
tensorflow::HistogramProto histogram1;
EXPECT_TRUE(histogram1.ParseFromString(
{reinterpret_cast<const char*>(result1->data), result1->length}));
EXPECT_EQ(histogram1.sum(), 3.0);
TF_DeleteBuffer(result1);
TFE_MonitoringDeleteSampler1(sampler1);
auto* sampler2 = TFE_MonitoringNewSampler2("test/sampler2", buckets, status,
"test", "label1", "label2");
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
auto* cell2 = TFE_MonitoringGetCellSampler2(sampler2, "foo", "bar");
TFE_MonitoringSamplerCellAdd(cell2, 2.0);
TFE_MonitoringSamplerCellAdd(cell2, 3.0);
TF_Buffer* result2 = TF_NewBuffer();
TFE_MonitoringSamplerCellValue(cell2, result2);
tensorflow::HistogramProto histogram2;
EXPECT_TRUE(histogram2.ParseFromString(
{reinterpret_cast<const char*>(result2->data), result2->length}));
EXPECT_EQ(histogram2.sum(), 5.0);
TF_DeleteBuffer(result2);
TFE_MonitoringDeleteSampler2(sampler2);
TFE_MonitoringDeleteBuckets(buckets);
TF_DeleteStatus(status);
}
TEST(CAPI, CancellationManager) {
TFE_CancellationManager* c_mgr = TFE_NewCancellationManager();
EXPECT_FALSE(TFE_CancellationManagerIsCancelled(c_mgr));
TFE_CancelCallback callback1;
callback1.callback = [](void* context) {
ADD_FAILURE() << "Callback1 should be deregistered.";
};
TFE_CancellationToken token1 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token1, &callback1,
"callback1"));
TFE_CancelCallback callback2;
bool callback2_invoked = false;
callback2.context = &callback2_invoked;
callback2.callback = [](void* context) {
*reinterpret_cast<bool*>(context) = true;
};
TFE_CancellationToken token2 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token2, &callback2,
"callback2"));
TFE_CancellationToken token3 = TFE_CancellationManagerGetToken(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerRegisterCallback(c_mgr, token3, &callback1,
"callback3"));
EXPECT_TRUE(TFE_CancellationManagerDeregisterCallback(c_mgr, token1));
EXPECT_TRUE(TFE_CancellationManagerTryDeregisterCallback(c_mgr, token3));
TFE_CancellationManagerStartCancel(c_mgr);
EXPECT_TRUE(TFE_CancellationManagerIsCancelled(c_mgr));
EXPECT_TRUE(callback2_invoked);
TFE_DeleteCancellationManager(c_mgr);
}
TEST(CAPI, ExecutorContextDestructionOrder) {
TF_Status* status = TF_NewStatus();
{
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* executor = TFE_NewExecutor(
false, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_DeleteContext(ctx);
TFE_DeleteExecutor(executor);
}
{
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* executor = TFE_NewExecutor(
false, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TF_DeleteStatus(status);
}
TEST(CAPI, Function_ident_CPU) {
TF_Graph* function_graph = TF_NewGraph();
TF_OperationDescription* arg_descr =
TF_NewOperation(function_graph, "Placeholder", "arg");
TF_SetAttrType(arg_descr, "dtype", TF_INT32);
TF_Status* status = TF_NewStatus();
TF_Operation* arg = TF_FinishOperation(arg_descr, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_OperationDescription* id_descr =
TF_NewOperation(function_graph, "Identity", "id");
TF_SetAttrType(id_descr, "T", TF_INT32);
TF_AddInput(id_descr, {arg, 0});
TF_Operation* id = TF_FinishOperation(id_descr, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_Output input{arg, 0};
TF_Output output{id, 0};
TF_Function* fn =
TF_GraphToFunction(function_graph, "ident", 0, 1, &id, 1, &input, 1,
&output, nullptr, nullptr, "test", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteGraph(function_graph);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_ContextAddFunction(ctx, fn, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteFunction(fn);
for (bool async : {false, true, false}) {
TFE_Executor* old_executor = TFE_ContextGetExecutorForThread(ctx);
TFE_Executor* executor = TFE_NewExecutor(
async, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* t =
TF_AllocateTensor(TF_INT32, nullptr, 0, 1 * sizeof(tensorflow::int32));
*reinterpret_cast<tensorflow::int32*>(TF_TensorData(t)) = 42;
TFE_TensorHandle* h = TFE_NewTensorHandle(t, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteTensor(t);
TFE_Op* op = TFE_NewOp(ctx, "ident", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_OpAddInput(op, h, status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
std::vector<TFE_TensorHandle*> result;
result.push_back(nullptr);
int num_retvals = 1;
TFE_Execute(op, result.data(), &num_retvals, status);
TFE_DeleteOp(op);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
ASSERT_EQ(num_retvals, 1);
TF_Tensor* r = TFE_TensorHandleResolve(result[0], status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
EXPECT_EQ(*reinterpret_cast<tensorflow::int32*>(TF_TensorData(r)), 42);
TFE_ContextSetExecutorForThread(ctx, old_executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
TFE_DeleteExecutor(old_executor);
TFE_DeleteTensorHandle(h);
TF_DeleteTensor(r);
TFE_DeleteTensorHandle(result[0]);
}
TFE_ContextRemoveFunction(ctx, "ident", status);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TFE_DeleteContext(ctx);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteStatus(status);
}
void Executor_MatMul_CPU(bool async) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_Executor* old_executor = TFE_ContextGetExecutorForThread(ctx);
TFE_Executor* executor = TFE_NewExecutor(
async, true,
0);
TFE_ContextSetExecutorForThread(ctx, executor);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TFE_Op* matmul = MatMulOp(ctx, m, m);
TFE_TensorHandle* retvals[2] = {nullptr, nullptr};
int num_retvals = 2;
TFE_Execute(matmul, &retvals[0], &num_retvals, status);
EXPECT_EQ(1, num_retvals);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(retvals[0]);
TFE_ContextSetExecutorForThread(ctx, old_executor);
TFE_ExecutorWaitForAllPendingNodes(executor, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteExecutor(executor);
TFE_DeleteExecutor(old_executor);
TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
TF_DeleteTensor(t);
EXPECT_EQ(7, product[0]);
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
TF_DeleteStatus(status);
}
TEST(CAPI, Executor_MatMul_CPU) { Executor_MatMul_CPU(false); }
TEST(CAPI, Executor_MatMul_CPUAsync) { Executor_MatMul_CPU(true); }
void Deleter(void* data, size_t unused, void* tensor_handle) {
TFE_DeleteTensorHandle(static_cast<TFE_TensorHandle*>(tensor_handle));
}
TEST(CAPI, TensorHandleOnDeviceMemory) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
TF_Tensor* m_data = TFE_TensorHandleResolve(m, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float* m_float = static_cast<float*>(TF_TensorData(m_data));
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int num_devices = TF_DeviceListCount(devices);
for (int d = 0; d < num_devices; ++d) {
const char* name = TF_DeviceListName(devices, d, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* copy = TFE_TensorHandleCopyToDevice(m, ctx, name, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
void* data = TFE_TensorHandleDevicePointer(copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
size_t size = TFE_TensorHandleDeviceMemorySize(copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
int64_t dims[] = {2, 2};
TFE_TensorHandle* copy_aliased = TFE_NewTensorHandleFromDeviceMemory(
ctx, name, TF_FLOAT, dims, 2, data, size, &Deleter, copy, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_TensorHandle* on_host =
TFE_TensorHandleCopyToDevice(copy_aliased, ctx, "CPU:0", status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Tensor* resolved = TFE_TensorHandleResolve(on_host, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const float* resolved_data =
static_cast<const float*>(TF_TensorData(resolved));
EXPECT_EQ(0, memcmp(m_float, resolved_data, 4 * sizeof(float)));
TF_DeleteTensor(resolved);
TFE_DeleteTensorHandle(copy_aliased);
TFE_DeleteTensorHandle(on_host);
}
TF_DeleteDeviceList(devices);
TF_DeleteTensor(m_data);
TFE_DeleteTensorHandle(m);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, TensorHandleNullptr) {
TFE_TensorHandle* h = nullptr;
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
const char* device_type = TFE_TensorHandleDeviceType(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_type, nullptr);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
TF_SetStatus(status.get(), TF_OK, "");
int device_id = TFE_TensorHandleDeviceID(h, status.get());
ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get()));
ASSERT_EQ(device_id, -1);
ASSERT_EQ("Invalid handle", string(TF_Message(status.get())));
}
TEST(CAPI, TensorHandleDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(ctx);
const char* device_type = TFE_TensorHandleDeviceType(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = TFE_TensorHandleDeviceID(hcpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
string gpu_device_name;
if (GetDeviceName(ctx, &gpu_device_name, "GPU")) {
TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
hcpu, ctx, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_Op* shape_op = ShapeOp(ctx, hgpu);
TFE_OpSetDevice(shape_op, gpu_device_name.c_str(), status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retvals[1];
int num_retvals = 1;
TFE_Execute(shape_op, &retvals[0], &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
device_type = TFE_TensorHandleDeviceType(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "GPU")) << device_type;
device_id = TFE_TensorHandleDeviceID(retvals[0], status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
TFE_DeleteOp(shape_op);
TFE_DeleteTensorHandle(retvals[0]);
TFE_DeleteTensorHandle(hgpu);
}
TFE_DeleteTensorHandle(hcpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleDefaults) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_TensorHandle* h_default = TestMatrixTensorHandle(ctx);
const char* device_type = TFE_TensorHandleDeviceType(h_default, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = TFE_TensorHandleDeviceID(h_default, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id) << device_id;
TFE_TensorHandle* h_cpu = TFE_TensorHandleCopyToDevice(
h_default, ctx, "/device:CPU:0", status.get());
const char* device_type_cpu = TFE_TensorHandleDeviceType(h_cpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_TRUE(absl::StrContains(device_type_cpu, "CPU")) << device_type_cpu;
int device_id_cpu = TFE_TensorHandleDeviceID(h_cpu, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
ASSERT_EQ(0, device_id_cpu) << device_id_cpu;
TFE_DeleteTensorHandle(h_default);
TFE_DeleteTensorHandle(h_cpu);
TFE_Executor* executor = TFE_ContextGetExecutorForThread(ctx);
TFE_ExecutorWaitForAllPendingNodes(executor, status.get());
ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
TFE_DeleteExecutor(executor);
TFE_DeleteContext(ctx);
}
TEST(CAPI, CreateLocalContextAsReset) {
tensorflow::ServerDef server_def = GetServerDef("worker", 2);
server_def.mutable_default_session_config()->set_isolate_session_state(false);
ServerFactory* factory;
ASSERT_TRUE(ServerFactory::GetFactory(server_def, &factory).ok());
server_def.set_job_name("worker");
server_def.set_task_index(0);
std::unique_ptr<tensorflow::ServerInterface> w0;
ASSERT_TRUE(
factory->NewServer(server_def, ServerFactory::Options(), &w0).ok());
ASSERT_TRUE(w0->Start().ok());
server_def.set_task_index(1);
std::unique_ptr<tensorflow::ServerInterface> w1;
ASSERT_TRUE(
factory->NewServer(server_def, ServerFactory::Options(), &w1).ok());
ASSERT_TRUE(w1->Start().ok());
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
opts->session_options.options.config.set_isolate_session_state(false);
TFE_ContextOptionsSetDevicePlacementPolicy(opts, TFE_DEVICE_PLACEMENT_SILENT);
TFE_Context* ctx = TFE_NewContext(opts, status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
server_def.set_task_index(0);
auto cluster = server_def.mutable_cluster();
auto client_job = cluster->add_job();
client_job->set_name("localhost");
int client_port = tensorflow::testing::PickUnusedPortOrDie();
client_job->mutable_tasks()->insert(
{0, strings::StrCat("localhost:", client_port)});
server_def.set_job_name("localhost");
auto serialized = server_def.SerializeAsString();
TFE_ContextSetServerDef(ctx, 0, serialized.data(), serialized.size(), status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
server_def.set_job_name("worker");
server_def.set_task_index(0);
tensorflow::ClusterDef* cluster_def = server_def.mutable_cluster();
tensorflow::JobDef* job_def = cluster_def->mutable_job(0);
int worker_port = tensorflow::testing::PickUnusedPortOrDie();
job_def->mutable_tasks()->at(0) =
tensorflow::strings::StrCat("localhost:", worker_port);
serialized = server_def.SerializeAsString();
TFE_InitializeLocalOnlyContext(ctx, 0, serialized.data(), serialized.size(),
status);
EXPECT_EQ(TF_GetCode(status), TF_OK) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
w0.release();
w1.release();
}
TEST(CAPI, ShareVariableAcrossContextsAfterUpdateContextWorksWithTimeout) {
tensorflow::ServerDef server_def_0 = GetServerDef(3);
server_def_0.mutable_default_session_config()->set_isolate_session_state(
false);
tensorflow::ServerDef server_def_1 =
ReplaceTaskInServerDef(server_def_0, 0);
string serialized_server_def_0 = server_def_0.SerializeAsString();
string serialized_server_def_1 = server_def_1.SerializeAsString();
server_def_0.set_task_index(1);
std::unique_ptr<tensorflow::GrpcServer> worker_server1;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
server_def_0.set_task_index(2);
std::unique_ptr<tensorflow::GrpcServer> worker_server2;
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server2)
.ok());
ASSERT_TRUE(worker_server2->Start().ok());
int32_t init_timeout_in_ms = 300000;
TFE_Context* ctx_0 =
CreateContext(serialized_server_def_0,
false, init_timeout_in_ms);
TFE_Context* ctx_1 =
CreateContext(serialized_server_def_1,
false, init_timeout_in_ms);
const char remote_device[] = "/job:localhost/replica:0/task:2/device:CPU:0";
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_0);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
{
const std::vector<std::string>& device_names = ListDeviceNames(ctx_1);
ASSERT_TRUE(std::find(device_names.begin(), device_names.end(),
remote_device) != device_names.end());
}
TFE_TensorHandle* handle_0 =
CreateVariable(ctx_0, 1.2, remote_device, "var");
TF_Status* status = TF_NewStatus();
TFE_ContextAsyncWait(ctx_0, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
int port = tensorflow::testing::PickUnusedPortOrDie();
ReplaceTaskInServerDef(&server_def_0, 1, "localhost", port);
ReplaceTaskInServerDef(&server_def_1, 1, "localhost", port);
server_def_0.set_task_index(1);
worker_server1.release();
ASSERT_TRUE(tensorflow::GrpcServer::Create(
server_def_0, tensorflow::Env::Default(), &worker_server1)
.ok());
ASSERT_TRUE(worker_server1->Start().ok());
{
server_def_0.set_task_index(0);
string serialized_update = server_def_0.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDefWithTimeout(ctx_0, 0, serialized_update.data(),
serialized_update.size(),
init_timeout_in_ms, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
server_def_1.set_task_index(0);
string serialized_update = server_def_1.SerializeAsString();
TF_Status* status = TF_NewStatus();
TFE_ContextUpdateServerDefWithTimeout(ctx_1, 0, serialized_update.data(),
serialized_update.size(),
init_timeout_in_ms, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
{
TFE_TensorHandle* var_handle =
CreateVarHandle(ctx_1, remote_device, "var");
TFE_TensorHandle* handle_1 = nullptr;
int num_retvals = 1;
TF_Status* status = TF_NewStatus();
TFE_Op* op = TFE_NewOp(ctx_1, "ReadVariableOp", status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
TFE_OpAddInput(op, var_handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_Execute(op, &handle_1, &num_retvals, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteOp(op);
ASSERT_EQ(1, num_retvals);
EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(handle_1));
EXPECT_EQ(0, TFE_TensorHandleNumDims(handle_1, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float value = 0.0f;
TF_Tensor* t = TFE_TensorHandleResolve(handle_1, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
memcpy(&value, TF_TensorData(t), sizeof(float));
TF_DeleteTensor(t);
EXPECT_EQ(1.2f, value);
TFE_DeleteTensorHandle(handle_1);
TF_DeleteStatus(status);
TFE_DeleteTensorHandle(var_handle);
}
TFE_DeleteTensorHandle(handle_0);
TFE_DeleteContext(ctx_0);
TFE_DeleteContext(ctx_1);
worker_server1.release();
worker_server2.release();
}
}
} |
951 | cpp | tensorflow/tensorflow | c_api_opaque | tensorflow/lite/core/c/c_api_opaque.cc | tensorflow/lite/core/c/c_api_opaque_test.cc | #ifndef TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_
#define TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/c/operator.h"
#ifdef __cplusplus
extern "C" {
#endif
TFL_CAPI_EXPORT extern TfLiteType TfLiteOpaqueTensorType(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern int32_t TfLiteOpaqueTensorNumDims(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern int32_t TfLiteOpaqueTensorDim(
const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorGetNumDimsSignature(
const TfLiteOpaqueTensor* opaque_tensor, int32_t* num_dims);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorGetDimSignature(
const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index,
int32_t* dim_length);
TFL_CAPI_EXPORT extern int TfLiteOpaqueTensorIsVariable(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern size_t TfLiteOpaqueTensorByteSize(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern void* TfLiteOpaqueTensorData(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern TfLiteAllocationType TfLiteOpaqueTensorGetAllocationType(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern TfLiteAllocationStrategy
TfLiteOpaqueTensorGetAllocationStrategy(const TfLiteOpaqueTensor* t);
TFL_CAPI_EXPORT extern TfLiteRunStability
TfLiteOpaqueTensorGetBufferAddressStability(const TfLiteOpaqueTensor* t);
TFL_CAPI_EXPORT extern TfLiteRunStability TfLiteOpaqueTensorGetDataStability(
const TfLiteOpaqueTensor* t);
TFL_CAPI_EXPORT extern TfLiteRunStep TfLiteOpaqueTensorGetDataKnownStep(
const TfLiteOpaqueTensor* t);
TFL_CAPI_EXPORT extern TfLiteRunStep TfLiteOpaqueTensorGetShapeKnownStep(
const TfLiteOpaqueTensor* t);
TFL_CAPI_EXPORT extern const char* TfLiteOpaqueTensorName(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern TfLiteQuantization TfLiteOpaqueTensorGetQuantization(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern TfLiteQuantizationParams
TfLiteOpaqueTensorGetQuantizationParams(
const TfLiteOpaqueTensor* opaque_tensor);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorCopyFromBuffer(
TfLiteOpaqueTensor* opaque_tensor, const void* input_data,
size_t input_data_size);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorCopyToBuffer(
const TfLiteOpaqueTensor* opaque_tensor, void* output_data,
size_t output_data_size);
int TfLiteOpaqueTensorGetStringCount(const TfLiteOpaqueTensor* tensor);
TfLiteStatus TfLiteOpaqueTensorGetString(const TfLiteOpaqueTensor* tensor,
int index, const char** str, int* len);
TfLiteStatus TfLiteOpaqueTensorWriteStrings(TfLiteOpaqueTensor* tensor,
const char* const* str_array,
int str_array_len,
const int* str_n_len);
TfLiteStatus TfLiteOpaqueTensorWriteString(TfLiteOpaqueTensor* tensor,
const char* str, int len);
typedef struct TfLiteOpaqueTensorBuilder TfLiteOpaqueTensorBuilder;
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderCreate();
void TfLiteOpaqueTensorBuilderDelete(TfLiteOpaqueTensorBuilder* builder);
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetType(
TfLiteOpaqueTensorBuilder* builder, TfLiteType type);
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetData(
TfLiteOpaqueTensorBuilder* builder, void* data);
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetAllocationType(
TfLiteOpaqueTensorBuilder* builder, TfLiteAllocationType allocation_type);
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetQuantizationParams(
TfLiteOpaqueTensorBuilder* builder, TfLiteQuantizationParams params);
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetQuantization(
TfLiteOpaqueTensorBuilder* builder, TfLiteQuantization quantization);
void TfLiteOpaqueTensorSetAllocationTypeToDynamic(TfLiteOpaqueTensor* tensor);
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor* TfLiteOpaqueNodeGetInput(
const TfLiteOpaqueContext* opaque_context,
const TfLiteOpaqueNode* opaque_node, int index);
TFL_CAPI_EXPORT extern TfLiteOpaqueTensor* TfLiteOpaqueNodeGetOutput(
TfLiteOpaqueContext* opaque_context, const TfLiteOpaqueNode* opaque_node,
int index);
TFL_CAPI_EXPORT int TfLiteOpaqueNodeNumberOfInputs(
const TfLiteOpaqueNode* opaque_node);
TFL_CAPI_EXPORT int TfLiteOpaqueNodeNumberOfOutputs(
const TfLiteOpaqueNode* opaque_node);
TFL_CAPI_EXPORT extern void* TfLiteOpaqueNodeGetUserData(
const TfLiteOpaqueNode* opaque_node);
TFL_CAPI_EXPORT extern void* TfLiteOpaqueNodeGetBuiltinData(
const TfLiteOpaqueNode* opaque_node);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueNodeGetCustomInitialData(
const TfLiteOpaqueNode* opaque_node, const void** init_data, int* size);
TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueNodeInputs(
const TfLiteOpaqueNode* opaque_node, const int** inputs, int* num_inputs);
TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueNodeOutputs(
const TfLiteOpaqueNode* opaque_node, const int** outputs, int* num_outputs);
TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueNodeSetTemporaries(
TfLiteOpaqueNode* opaque_node, const int* temporaries, int num_temporaries);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueNodeTemporaries(const TfLiteOpaqueNode* opaque_node,
const int** temporaries,
int* num_temporaries);
TFL_CAPI_EXPORT
int TfLiteOpaqueNodeGetInputTensorIndex(const TfLiteOpaqueNode* opaque_node,
int index_of_input);
TFL_CAPI_EXPORT
int TfLiteOpaqueNodeGetOutputTensorIndex(const TfLiteOpaqueNode* opaque_node,
int index_of_output);
typedef struct TfLiteIntArray TfLiteIntArray;
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueContextGetExecutionPlan(
TfLiteOpaqueContext* opaque_context, TfLiteIntArray** execution_plan);
TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueContextGetNodeAndRegistration(
struct TfLiteOpaqueContext* opaque_context, int node_index,
TfLiteOpaqueNode** node, TfLiteOperator** registration_external);
TFL_CAPI_EXPORT TfLiteStatus
TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
struct TfLiteOpaqueContext* opaque_context,
TfLiteOperator* registration_external,
const TfLiteIntArray* nodes_to_replace,
TfLiteOpaqueDelegate* opaque_delegate);
TFL_CAPI_EXPORT
TfLiteOpaqueTensor* TfLiteOpaqueContextGetOpaqueTensor(
const TfLiteOpaqueContext* opaque_context, int index);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextGetInputs(
const struct TfLiteOpaqueContext* opaque_context, const int** inputs,
int* num_inputs);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextGetOutputs(
const struct TfLiteOpaqueContext* opaque_context, const int** outputs,
int* num_outputs);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextGetVariables(
const struct TfLiteOpaqueContext* opaque_context, const int** variables,
int* num_variables);
TFL_CAPI_EXPORT
size_t TfLiteOpaqueContextGetNumNodes(
const struct TfLiteOpaqueContext* opaque_context);
TFL_CAPI_EXPORT
size_t TfLiteOpaqueContextGetNumTensors(
const struct TfLiteOpaqueContext* opaque_context);
TFL_CAPI_EXPORT
const char* TfLiteOpaqueContextGetName(
const struct TfLiteOpaqueContext* opaque_context);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextResizeTensor(TfLiteOpaqueContext* context,
TfLiteOpaqueTensor* tensor,
TfLiteIntArray* new_size);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextAcquireSubgraphContext(
struct TfLiteOpaqueContext* opaque_context, int subgraph_index,
TfLiteOpaqueContext** acquired_opaque_context);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextReleaseSubgraphContext(
struct TfLiteOpaqueContext* opaque_context, int subgraph_index);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable(
TfLiteOpaqueContext* opaque_context, int subgraph_index);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextGetNodeInitDataMmapInfo(
const TfLiteOpaqueContext* context, const TfLiteOpaqueNode* node, int* fd,
int64_t* custom_initial_data_offset_in_file,
int64_t* custom_initial_data_size);
TFL_CAPI_EXPORT
TfLiteStatus TfLiteOpaqueContextAddTensor(TfLiteOpaqueContext* context,
TfLiteOpaqueTensorBuilder* builder,
int* new_tensor_index);
TFL_CAPI_EXPORT
TfLiteSta | #include "tensorflow/lite/core/c/c_api_opaque.h"
#include <stddef.h>
#include <cstring>
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api.h"
namespace tflite {
namespace {
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithMemNoneBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithMmapRoBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithArenaRwBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithArenaRwPersistentBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithDynamicBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithPersistentRoBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithCustomBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithVariantObjectBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithMemNoneBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithMmapRoBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithArenaRwBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithArenaRwPersistentBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithDynamicBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithPersistentRoBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithCustomBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithVariantObjectBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorData, ValidInput) {
TfLiteTensor t;
char data[] = "data";
t.data.raw = data;
EXPECT_EQ(TfLiteOpaqueTensorData(reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
data);
}
TEST(TestTfLiteOpaqueTensorData, NullInput) {
EXPECT_EQ(TfLiteOpaqueTensorData(nullptr), nullptr);
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithMemNoneBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithMmapRoBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithArenaRwBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithArenaRwPersistentBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithDynamicBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithPersistentRoBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithCustomBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithVariantObjectBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithMemNoneBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithMmapRoBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithArenaRwBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithArenaRwPersistentBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithDynamicBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithPersistentRoBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithCustomBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithVariantObjectBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithMemNoneBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithMmapRoBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithArenaRwBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithArenaRwPersistentBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithDynamicBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithPersistentRoBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithCustomBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithVariantObjectBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueDelegate, CreateAndDelete) {
std::unique_ptr<TfLiteOpaqueDelegateBuilder> opaque_delegate_builder(
new TfLiteOpaqueDelegateBuilder{});
TfLiteOpaqueDelegate* opaque_delegate =
TfLiteOpaqueDelegateCreate(opaque_delegate_builder.get());
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
TEST(TestTfLiteOpaqueDelegate, Create_WithNull) {
EXPECT_EQ(nullptr, TfLiteOpaqueDelegateCreate(nullptr));
}
TEST(TestTfLiteOpaqueDelegate, Delete_WithNull) {
TfLiteOpaqueDelegateDelete(nullptr);
}
TEST(TestTfLiteOpaqueDelegate, GetData_WellFormedOpaqueDelegate) {
int delegate_data = 42;
TfLiteOpaqueDelegateBuilder builder{};
builder.data = &delegate_data;
TfLiteOpaqueDelegate* opaque_delegate = TfLiteOpaqueDelegateCreate(&builder);
EXPECT_EQ(&delegate_data, TfLiteOpaqueDelegateGetData(opaque_delegate));
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
TEST(TestTfLiteOpaqueDelegate,
GetData_NotConstructedWithTfLiteOpaqueDelegateCreate) {
int delegate_data = 42;
TfLiteDelegate non_opaque_delegate = TfLiteDelegateCreate();
non_opaque_delegate.data_ = &delegate_data;
auto* opaque_delegate =
reinterpret_cast<TfLiteOpaqueDelegate*>(&non_opaque_delegate);
EXPECT_EQ(&delegate_data, TfLiteOpaqueDelegateGetData(opaque_delegate));
}
TEST(TestTfLiteOpaqueDelegate, GetData_NoDataSetViaOpaqueDelegateBuilder) {
TfLiteOpaqueDelegateBuilder builder{};
TfLiteOpaqueDelegate* opaque_delegate = TfLiteOpaqueDelegateCreate(&builder);
EXPECT_EQ(nullptr, TfLiteOpaqueDelegateGetData(opaque_delegate));
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
namespace my_custom_op {
struct MyOpData {
int temp_tensor_index;
};
void* Init(TfLiteOpaqueContext* context, const char* buffer, size_t length) {
auto* op_data = new MyOpData{};
return op_data;
}
void Free(TfLiteOpaqueContext* context, void* buffer) {
delete reinterpret_cast<MyOpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
auto* op_data =
reinterpret_cast<MyOpData*>(TfLiteOpaqueNodeGetUserData(node));
const int num_temporaries = 1;
int temporary_tensor_indices[num_temporaries];
TfLiteStatus status =
TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
-1);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteError);
status = TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
0);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteOk);
TfLiteOpaqueTensorBuilder* builder = TfLiteOpaqueTensorBuilderCreate();
TfLiteOpaqueTensorBuilderSetType(builder, kTfLiteFloat32);
TfLiteOpaqueTensorBuilderSetAllocationType(builder, kTfLiteArenaRw);
TfLiteOpaqueContextAddTensor(context, builder, &temporary_tensor_indices[0]);
TfLiteOpaqueTensorBuilderDelete(builder);
status = TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
num_temporaries);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteOk);
op_data->temp_tensor_index = temporary_tensor_indices[0];
TfLiteOpaqueTensor* temp_tensor =
TfLiteOpaqueContextGetOpaqueTensor(context, op_data->temp_tensor_index);
TfLiteIntArray* temp_size = TfLiteIntArrayCreate(1);
temp_size->data[0] = 1;
return TfLiteOpaqueContextResizeTensor(context, temp_tensor, temp_size);
}
TfLiteStatus Invoke(TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
auto* op_data =
reinterpret_cast<MyOpData*>(TfLiteOpaqueNodeGetUserData(node));
const int* temporary_tensor_indices;
int num_temporaries;
TfLiteOpaqueNodeTemporaries(node, &temporary_tensor_indices,
&num_temporaries);
TF_LITE_OPAQUE_ENSURE(context, num_temporaries == 1);
TF_LITE_OPAQUE_ENSURE(
context, temporary_tensor_indices[0] == op_data->temp_tensor_index);
TfLiteOpaqueTensor* temp_tensor =
TfLiteOpaqueContextGetOpaqueTensor(context, op_data->temp_tensor_index);
TF_LITE_OPAQUE_ENSURE(context,
TfLiteOpaqueTensorType(temp_tensor) == kTfLiteFloat32);
TF_LITE_OPAQUE_ENSURE(context, TfLiteOpaqueTensorGetAllocationType(
temp_tensor) == kTfLiteArenaRw);
size_t temp_bytes = TfLiteOpaqueTensorByteSize(temp_tensor);
void* temp_data = TfLiteOpaqueTensorData(temp_tensor);
TF_LITE_OPAQUE_ENSURE(context, temp_bytes != 0);
TF_LITE_OPAQUE_ENSURE(context, temp_data != nullptr);
EXPECT_EQ(1, TfLiteOpaqueNodeNumberOfInputs(node));
const TfLiteOpaqueTensor* input = TfLiteOpaqueNodeGetInput(context, node, 0);
size_t input_bytes = TfLiteOpaqueTensorByteSize(input);
void* input_data = TfLiteOpaqueTensorData(input);
EXPECT_EQ(input_bytes, temp_bytes);
std::memcpy(temp_data, input_data, input_bytes);
EXPECT_EQ(1, TfLiteOpaqueNodeNumberOfOutputs(node));
TfLiteOpaqueTensor* output = TfLiteOpaqueNodeGetOutput(context, node, 0);
size_t output_bytes = TfLiteOpaqueTensorByteSize(output);
void* output_data = TfLiteOpaqueTensorData(output);
EXPECT_EQ(output_bytes, temp_bytes);
std::memcpy(output_data, temp_data, output_bytes);
return kTfLiteOk;
}
}
TEST(TestTfLiteOpaqueNode, CustomOpWithSetAndGetTemporaries) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepare(reg, my_custom_op::Prepare);
TfLiteOperatorSetInit(reg, my_custom_op::Init);
TfLiteOperatorSetFree(reg, my_custom_op::Free);
TfLiteOperatorSetInvoke(reg, my_custom_op::Invoke);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithLegacyCallbacks) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepare(reg, [](auto context, auto node) {
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInit(reg, [](auto context, auto buffer, auto length) {
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFree(
reg, [](auto context, auto data) { my_custom_op::Free(context, data); });
TfLiteOperatorSetInvoke(reg, [](auto context, auto node) {
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithNoUserData) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepareWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInitWithData(
reg, [](auto user_data, auto context, auto buffer, auto length) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFreeWithData(reg,
[](auto user_data, auto context, auto data) {
EXPECT_EQ(nullptr, user_data);
my_custom_op::Free(context, data);
});
TfLiteOperatorSetInvokeWithData(reg,
[](auto user_data, auto context, auto node) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithData) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
reinterpret_cast<void*>(345));
TfLiteOperatorSetPrepareWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInitWithData(
reg, [](auto user_data, auto context, auto buffer, auto length) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFreeWithData(
reg, [](auto user_data, auto context, auto data) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
my_custom_op::Free(context, data);
});
TfLiteOperatorSetInvokeWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
}
} |
952 | cpp | tensorflow/tensorflow | flatbuffer_conversions | tensorflow/lite/core/api/flatbuffer_conversions.cc | tensorflow/lite/core/api/flatbuffer_conversions_test.cc | #ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
#define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
#include <cstddef>
#include <new>
#include <type_traits>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
class BuiltinDataAllocator {
public:
virtual void* Allocate(size_t size, size_t alignment_hint) = 0;
virtual void Deallocate(void* data) = 0;
template <typename T>
T* AllocatePOD() {
static_assert(std::is_pod<T>::value, "Builtin data structure must be POD.");
void* allocated_memory = this->Allocate(sizeof(T), alignof(T));
return new (allocated_memory) T();
}
virtual ~BuiltinDataAllocator() {}
};
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
ErrorReporter* error_reporter);
TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseAssignVariable(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseBatchToSpaceNd(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseBroadcastArgs(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseBroadcastTo(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseConcatenation(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseDepthToSpace(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseElu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseEmbeddingLookup(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseExp(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseExpandDims(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFill(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseFloorDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFloorMod(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFullyConnected(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseGatherNd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseGreaterEqual(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseL2Normalization(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePow(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseReadVariable(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseResizeBilinear(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSelectV2(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSlice(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSpaceToBatchNd(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSpaceToDepth(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSquaredDifference(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseStridedSlice(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseTranspose(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseTransposeConv(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseBitwiseXor(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseRightShift(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseStablehloScatter(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseStablehloRngBitGenerator(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseStablehloGather(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseStablehloReduceWindow(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseStablehloPad(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseStablehloComposite(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
}
#endif
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/vector.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class SafeBuiltinDataAllocator {
public:
class BuiltinDataDeleter {
public:
explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
void operator()(void* data) { allocator_->Deallocate(data); }
private:
BuiltinDataAllocator* allocator_;
};
template <typename T>
using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
template <typename T>
BuiltinDataPtr<T> Allocate() {
return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
BuiltinDataDeleter(allocator_));
}
private:
BuiltinDataAllocator* allocator_;
};
void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
TFLITE_DCHECK(op != nullptr);
TFLITE_DCHECK(error_reporter != nullptr);
TFLITE_DCHECK(allocator != nullptr);
TFLITE_DCHECK(builtin_data != nullptr);
}
template <typename DataType = int32_t>
static TfLiteStatus FlatBufferIntVectorToArray(
int max_size_of_buffer, const flatbuffers::Vector<DataType>* flat_vector,
DataType* buffer, ErrorReporter* error_reporter, const char* op_name) {
if (!flat_vector) {
TF_LITE_REPORT_ERROR(error_reporter,
"Input array not provided for operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
size_t num_dimensions = flat_vector->size();
if (num_dimensions > max_size_of_buffer / sizeof(DataType)) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Found too many dimensions in the input array of operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
for (size_t i = 0; i < num_dimensions; ++i) {
buffer[i] = flat_vector->Get(i);
}
}
}
return kTfLiteOk;
}
TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
switch (activation) {
case ActivationFunctionType_NONE:
return kTfLiteActNone;
case ActivationFunctionType_RELU:
return kTfLiteActRelu;
case ActivationFunctionType_RELU_N1_TO_1:
return kTfLiteActReluN1To1;
case ActivationFunctionType_RELU6:
return kTfLiteActRelu6;
case ActivationFunctionType_TANH:
return kTfLiteActTanh;
case ActivationFunctionType_SIGN_BIT:
return kTfLiteActSignBit;
}
return kTfLiteActNone;
}
TfLitePadding ConvertPadding(Padding padding) {
switch (padding) {
case Padding_SAME:
return kTfLitePaddingSame;
case Padding_VALID:
return kTfLitePaddingValid;
}
return kTfLitePaddingUnknown;
}
TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) {
switch (padding) {
case MirrorPadMode_REFLECT:
return kTfLiteMirrorPaddingReflect;
case MirrorPadMode_SYMMETRIC:
return kTfLiteMirrorPaddingSymmetric;
}
return kTfLiteMirrorPaddingUnknown;
}
TfLiteRngAlgorithm ConvertRngAlgorithm(RngAlgorithm algorithm) {
switch (algorithm) {
case RngAlgorithm_THREEFRY:
return kTfLiteRngAlgorithmThreefry;
case RngAlgorithm_PHILOX:
return kTfLiteRngAlgorithmPhilox;
case RngAlgorithm_DEFAULT:
return kTfLiteRngAlgorithmDefault;
}
return kTfLiteRngAlgorithmUnknown;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
auto parseLSHProjectionType = [](LSHProjectionType type) {
switch (type) {
case LSHProjectionType_SPARSE:
return kTfLiteLshProjectionSparse;
case LSHProjectionType_DENSE:
return kTfLiteLshProjectionDense;
default:
return kTfLiteLshProjectionUnknown;
}
};
auto parseCombinerType = [](CombinerType type) {
switch (type) {
case CombinerType_MEAN:
return kTfLiteCombinerTypeMean;
case CombinerType_SQRTN:
return kTfLiteCombinerTypeSqrtn;
case CombinerType_SUM:
default:
return kTfLiteCombinerTypeSum;
}
};
SafeBuiltinDataAllocator safe_allocator(allocator);
*builtin_data = nullptr;
switch (op_type) {
case BuiltinOperator_ABS: {
return ParseAbs(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD: {
return ParseAdd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD_N: {
return ParseAddN(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MAX: {
return ParseArgMax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MIN: {
return ParseArgMin(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ASSIGN_VARIABLE: {
return ParseAssignVariable(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_AVERAGE_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_MATMUL: {
return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_TO_SPACE_ND: {
return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BROADCAST_ARGS: {
return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BROADCAST_TO: {
return ParseBroadcastTo(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CALL_ONCE: {
return ParseCallOnce(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CEIL: {
return ParseCeil(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONCATENATION: {
return ParseConcatenation(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONV_2D: {
return ParseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CUMSUM: {
return ParseCumsum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTH_TO_SPACE: {
return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEQUANTIZE: {
return ParseDequantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DIV: {
return ParseDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ELU: {
return ParseElu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EMBEDDING_LOOKUP: {
return ParseEmbeddingLookup(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXP: {
return ParseExp(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXPAND_DIMS: {
return ParseExpandDims(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FILL: {
return ParseFill(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR: {
return ParseFloor(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_DIV: {
return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_MOD: {
return ParseFloorMod(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FULLY_CONNECTED: {
return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GATHER_ND: {
return ParseGatherNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER: {
return ParseGreater(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER_EQUAL: {
return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_HARD_SWISH: {
return ParseHardSwish(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_NORMALIZATION: {
return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LEAKY_RELU: {
return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS: {
return ParseLess(op, error_reporter, allo | #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include <cstdarg>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
using testing::AllOf;
using testing::Each;
using testing::ElementsAre;
using testing::Eq;
using testing::HasSubstr;
using testing::StrEq;
namespace tflite {
namespace {
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ += vsnprintf(buffer_ + buffer_size_,
kBufferSize - buffer_size_, format, args);
return buffer_size_;
}
const char* GetBuffer() const { return buffer_; }
int GetBufferSize() const { return buffer_size_; }
bool IsEmpty() const { return !buffer_size_; }
string GetString() const { return string(buffer_, buffer_size_); }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
class MockDataAllocator : public BuiltinDataAllocator {
public:
MockDataAllocator() : is_allocated_(false) {}
void* Allocate(size_t size, size_t alignment_hint) override {
EXPECT_FALSE(is_allocated_);
const int max_size = kBufferSize;
EXPECT_LE(size, max_size);
is_allocated_ = true;
return buffer_;
}
void Deallocate(void* data) override { is_allocated_ = false; }
private:
static constexpr int kBufferSize = 1024;
char buffer_[kBufferSize];
bool is_allocated_;
};
}
class FlatbufferConversionsTest : public ::testing::Test {
public:
const Operator* BuildTestOperator(BuiltinOptions op_type,
flatbuffers::Offset<void> options) {
flatbuffers::Offset<Operator> offset =
CreateOperatorDirect(builder_, 0, nullptr, nullptr, op_type, options,
nullptr, CustomOptionsFormat_FLEXBUFFERS, nullptr);
builder_.Finish(offset);
void* pointer = builder_.GetBufferPointer();
return flatbuffers::GetRoot<Operator>(pointer);
}
const Operator* BuildTestOperator(BuiltinOptions2 op_type,
flatbuffers::Offset<void> options) {
flatbuffers::Offset<Operator> offset = CreateOperatorDirect(
builder_, 0, nullptr, nullptr,
tflite::BuiltinOptions_NONE,
0, nullptr,
tflite::CustomOptionsFormat_FLEXBUFFERS,
nullptr, nullptr,
0, 0,
op_type,
options);
builder_.Finish(offset);
void* pointer = builder_.GetBufferPointer();
return flatbuffers::GetRoot<Operator>(pointer);
}
protected:
MockErrorReporter mock_reporter_;
MockDataAllocator mock_allocator_;
flatbuffers::FlatBufferBuilder builder_;
};
TEST_F(FlatbufferConversionsTest, ParseSqueezeAll) {
const Operator* op = BuildTestOperator(
BuiltinOptions_SqueezeOptions, CreateSqueezeOptions(builder_).Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_SQUEEZE, &mock_reporter_,
&mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, ParseDynamicReshape) {
const Operator* op = BuildTestOperator(
BuiltinOptions_ReshapeOptions, CreateReshapeOptions(builder_).Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_RESHAPE, &mock_reporter_,
&mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, TestParseOpDataConv) {
const Operator* conv_op =
BuildTestOperator(BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(builder_, Padding_SAME, 1, 2,
ActivationFunctionType_RELU, 3, 4)
.Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk,
ParseOpData(conv_op, BuiltinOperator_CONV_2D, &mock_reporter_,
&mock_allocator_, &output_data));
EXPECT_NE(nullptr, output_data);
TfLiteConvParams* params = reinterpret_cast<TfLiteConvParams*>(output_data);
EXPECT_EQ(kTfLitePaddingSame, params->padding);
EXPECT_EQ(1, params->stride_width);
EXPECT_EQ(2, params->stride_height);
EXPECT_EQ(kTfLiteActRelu, params->activation);
EXPECT_EQ(3, params->dilation_width_factor);
EXPECT_EQ(4, params->dilation_height_factor);
}
TEST_F(FlatbufferConversionsTest, ParseBadFullyConnected) {
const Operator* conv_op = BuildTestOperator(
BuiltinOptions_FullyConnectedOptions,
CreateFullyConnectedOptions(
builder_, ActivationFunctionType_RELU,
static_cast<FullyConnectedOptionsWeightsFormat>(-1), true)
.Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteError,
ParseOpData(conv_op, BuiltinOperator_FULLY_CONNECTED,
&mock_reporter_, &mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, TestParseOpDataCustom) {
const Operator* custom_op =
BuildTestOperator(BuiltinOptions_NONE, flatbuffers::Offset<void>());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk,
ParseOpData(custom_op, BuiltinOperator_CUSTOM, &mock_reporter_,
&mock_allocator_, &output_data));
EXPECT_EQ(nullptr, output_data);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorType) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_FLOAT32, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteFloat32, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeFloat16) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_FLOAT16, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteFloat16, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeBFloat16) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_BFLOAT16, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteBFloat16, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeInt4) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_INT4, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteInt4, type);
}
class StablehloReduceWindowFlatbufferConversionsTest
: public FlatbufferConversionsTest {
public:
static constexpr int kMaxDims =
TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT;
static constexpr int64_t kValidValue = 5;
auto ValidAttr() {
return builder_.CreateVector(std::vector<int64_t>(kMaxDims, kValidValue));
}
auto InvalidAttr() {
return builder_.CreateVector(
std::vector<int64_t>(kMaxDims + 1, kValidValue));
}
auto ValidPaddingAttr() {
return builder_.CreateVector(
std::vector<int64_t>(2 * kMaxDims, kValidValue));
}
auto InvalidPaddingAttr() {
return builder_.CreateVector(
std::vector<int64_t>(2 * kMaxDims + 1, kValidValue));
}
auto EmptyAttr() { return builder_.CreateVector<int64_t>({}); }
};
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, Succeeds) {
const Operator* stablehlo_reduce_window_op = BuildTestOperator(
BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
builder_.CreateVector<int64_t>({1, 2}),
builder_.CreateVector<int64_t>({3, 4}),
builder_.CreateVector<int64_t>({5, 6}),
builder_.CreateVector<int64_t>({7, 8}),
builder_.CreateVector<int64_t>({9, 10, 11, 12}),
13)
.Union());
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, 2),
ElementsAre(1, 2));
EXPECT_THAT(std::make_tuple(output_data->window_strides, 2),
ElementsAre(3, 4));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, 2),
ElementsAre(5, 6));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, 2),
ElementsAre(7, 8));
EXPECT_THAT(std::make_tuple(output_data->padding, 4),
ElementsAre(9, 10, 11, 12));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWithNoWindowDimensions) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
0,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoWindowStrides) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
0,
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoBaseDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
0,
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoWindowDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
0,
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(1));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, SucceedsWithNoPadding) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
0,
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWithEmptyWindowDimensions) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
EmptyAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyWindowStrides) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
EmptyAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyBaseDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
EmptyAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyWindowDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
EmptyAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(1));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyPadding) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
EmptyAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithParamsAtMaxDims) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowDimensionsHasMoreThanMaxDims) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
InvalidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
AllOf(HasSubstr("Found too many dimensions in the input array of "
"operation 'stablehlo.reduce_window'."),
HasSubstr("Check the 'window_dimensions' attribute.")));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowStridesHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
InvalidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr("'window_strides' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenBaseDilationsHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
InvalidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr("'base_dilations' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowDilationsHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
InvalidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr(
"'window_dilations' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenPaddingHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
InvalidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'padding' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, FailsWithWrongOptions) {
const Operator* stablehlo_reduce_window_op =
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions, 0);
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data), |
953 | cpp | tensorflow/tensorflow | error_reporter | tensorflow/compiler/mlir/lite/core/api/error_reporter.cc | tensorflow/compiler/mlir/lite/core/api/error_reporter_test.cc | #ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
#define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
#include <cstdarg>
namespace tflite {
class ErrorReporter {
public:
virtual ~ErrorReporter() = default;
virtual int Report(const char* format, va_list args) = 0;
int Report(const char* format, ...);
int ReportError(void*, const char* format, ...);
};
}
#ifndef TF_LITE_STRIP_ERROR_STRINGS
#define TF_LITE_REPORT_ERROR(reporter, ...) \
do { \
static_cast<::tflite::ErrorReporter*>(reporter)->Report(__VA_ARGS__); \
} while (false)
#else
#define TF_LITE_REPORT_ERROR(reporter, ...)
#endif
#endif
#include "tensorflow/lite/core/api/error_reporter.h"
#include <cstdarg>
namespace tflite {
int ErrorReporter::Report(const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
int ErrorReporter::ReportError(void*, const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
} | #include "tensorflow/lite/core/api/error_reporter.h"
#include <cstdio>
#include <gtest/gtest.h>
namespace tflite {
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() { buffer_[0] = 0; }
int Report(const char* format, va_list args) override {
vsnprintf(buffer_, kBufferSize, format, args);
return 0;
}
char* GetBuffer() { return buffer_; }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
};
TEST(ErrorReporter, TestReport) {
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
reporter->Report("Error: %d", 23);
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
}
TEST(ErrorReporter, TestReportMacro) {
MockErrorReporter mock_reporter;
#ifndef TF_LITE_STRIP_ERROR_STRINGS
ErrorReporter* reporter = &mock_reporter;
#endif
TF_LITE_REPORT_ERROR(reporter, "Error: %d", 23);
#ifndef TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
#else
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), ""));
#endif
}
} |
954 | cpp | tensorflow/tensorflow | op_resolver | tensorflow/lite/core/api/op_resolver.cc | tensorflow/lite/core/api/op_resolver_test.cc | #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
#ifndef DOXYGEN_SKIP
class OpResolverInternal;
class Subgraph;
namespace internal {
class CommonOpaqueConversionUtil;
class OperatorsCache;
}
#endif
class OpResolver {
public:
virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
int version) const = 0;
virtual const TfLiteRegistration* FindOp(const char* op,
int version) const = 0;
using TfLiteDelegatePtrVector =
std::vector<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>>;
virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
return {};
}
using TfLiteDelegateCreator =
std::function<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>(
TfLiteContext* )>;
using TfLiteDelegateCreators = std::vector<TfLiteDelegateCreator>;
virtual TfLiteDelegateCreators GetDelegateCreators() const { return {}; }
using TfLiteOpaqueDelegatePtr =
std::unique_ptr<TfLiteOpaqueDelegate, void (*)(TfLiteOpaqueDelegate*)>;
using TfLiteOpaqueDelegateCreator =
std::function<TfLiteOpaqueDelegatePtr(int )>;
using TfLiteOpaqueDelegateCreators = std::vector<TfLiteOpaqueDelegateCreator>;
virtual TfLiteOpaqueDelegateCreators GetOpaqueDelegateCreators() const {
return {};
}
virtual ~OpResolver() = default;
OpResolver() = default;
OpResolver(const OpResolver& other) = default;
private:
virtual bool MayContainUserDefinedOps() const { return true; }
#ifndef DOXYGEN_SKIP
friend class OpResolverInternal;
friend class Subgraph;
friend class tflite::internal::CommonOpaqueConversionUtil;
friend class tflite::internal::OperatorsCache;
#endif
struct OpId {
int builtin_code;
const char* custom_name;
int version;
bool operator==(const OpId& other) const {
return builtin_code == other.builtin_code &&
custom_name == other.custom_name && version == other.version;
}
struct Hasher {
size_t operator()(const OpId& op_id) const {
size_t hash_builtin_code = std::hash<int>()(op_id.builtin_code);
size_t hash_custom_name =
op_id.custom_name != nullptr
? std::hash<std::string>()(std::string(op_id.custom_name))
: 0;
size_t hash_version = std::hash<int>()(op_id.version);
return Combine(hash_builtin_code,
Combine(hash_custom_name, hash_version));
}
private:
static size_t Combine(size_t hash1, size_t hash2) {
constexpr int num_bits_to_rotate_left = 21;
constexpr int num_bits_to_rotate_right =
std::numeric_limits<size_t>::digits - num_bits_to_rotate_left;
size_t hash1_rotated = (hash1 << num_bits_to_rotate_left) |
(hash1 >> num_bits_to_rotate_right);
return hash1_rotated + hash2;
}
};
};
mutable std::shared_ptr<internal::OperatorsCache>
registration_externals_cache_;
};
#ifndef DOXYGEN_SKIP
namespace internal {
class OperatorsCache
: private std::unordered_map<OpResolver::OpId,
std::unique_ptr<TfLiteOperator>,
OpResolver::OpId::Hasher> {
friend class ::tflite::Subgraph;
friend class ::tflite::internal::CommonOpaqueConversionUtil;
};
}
#endif
TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
const OpResolver& op_resolver,
ErrorReporter* error_reporter,
const TfLiteRegistration** registration);
}
#endif
#include "tensorflow/lite/core/api/op_resolver.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
TfLiteStatus GetRegistrationFromOpCode(
const OperatorCode* opcode, const OpResolver& op_resolver,
ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
TfLiteStatus status = kTfLiteOk;
*registration = nullptr;
auto builtin_code = GetBuiltinCode(opcode);
int version = opcode->version();
if (builtin_code > BuiltinOperator_MAX) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Op builtin_code out of range: %d. Are you using old TFLite binary "
"with newer model?",
builtin_code);
status = kTfLiteError;
} else if (builtin_code != BuiltinOperator_CUSTOM) {
*registration = op_resolver.FindOp(builtin_code, version);
if (*registration == nullptr) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Didn't find op for builtin opcode '%s' version '%d'. "
"An older version of this builtin might be supported. "
"Are you using an old TFLite binary with a newer model?\n",
EnumNameBuiltinOperator(builtin_code), version);
status = kTfLiteError;
}
} else if (!opcode->custom_code()) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Operator with CUSTOM builtin_code has no custom_code.\n");
status = kTfLiteError;
} else {
const char* name = opcode->custom_code()->c_str();
*registration = op_resolver.FindOp(name, version);
if (*registration == nullptr) {
status = kTfLiteError;
}
}
return status;
}
} | #include "tensorflow/lite/core/api/op_resolver.h"
#include <cstring>
#include <gtest/gtest.h>
#include "tensorflow/lite/schema/schema_conversion_utils.h"
namespace tflite {
namespace {
void* MockInit(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void MockFree(TfLiteContext* context, void* buffer) {
}
TfLiteStatus MockPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus MockInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
class MockOpResolver : public OpResolver {
public:
const TfLiteRegistration* FindOp(BuiltinOperator op,
int version) const override {
if (op == BuiltinOperator_CONV_2D) {
static TfLiteRegistration r = {MockInit, MockFree, MockPrepare,
MockInvoke};
return &r;
} else {
return nullptr;
}
}
const TfLiteRegistration* FindOp(const char* op, int version) const override {
if (strcmp(op, "mock_custom") == 0) {
static TfLiteRegistration r = {MockInit, MockFree, MockPrepare,
MockInvoke};
return &r;
} else {
return nullptr;
}
}
};
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ = vsnprintf(buffer_, kBufferSize, format, args);
return buffer_size_;
}
char* GetBuffer() { return buffer_; }
int GetBufferSize() { return buffer_size_; }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
}
TEST(OpResolver, TestResolver) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
const TfLiteRegistration* registration =
resolver->FindOp(BuiltinOperator_CONV_2D, 0);
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
registration = resolver->FindOp(BuiltinOperator_CAST, 0);
EXPECT_EQ(nullptr, registration);
registration = resolver->FindOp("mock_custom", 0);
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
registration = resolver->FindOp("nonexistent_custom", 0);
EXPECT_EQ(nullptr, registration);
}
TEST(OpResolver, TestGetRegistrationFromOpCodeConv) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset =
CreateOperatorCodeDirect(builder, BuiltinOperator_CONV_2D, nullptr, 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteOk, GetRegistrationFromOpCode(conv_code, *resolver, reporter,
®istration));
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeCast) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset =
CreateOperatorCodeDirect(builder, BuiltinOperator_CAST, nullptr, 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteError, GetRegistrationFromOpCode(conv_code, *resolver,
reporter, ®istration));
EXPECT_EQ(nullptr, registration);
EXPECT_NE(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeCustom) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset = CreateOperatorCodeDirect(
builder, BuiltinOperator_CUSTOM, "mock_custom", 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteOk, GetRegistrationFromOpCode(conv_code, *resolver, reporter,
®istration));
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeNonexistentCustom) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset = CreateOperatorCodeDirect(
builder, BuiltinOperator_CUSTOM, "nonexistent_custom", 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteError, GetRegistrationFromOpCode(conv_code, *resolver,
reporter, ®istration));
EXPECT_EQ(nullptr, registration);
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
} |
955 | cpp | tensorflow/tensorflow | task_internal | tensorflow/lite/core/async/task_internal.cc | tensorflow/lite/core/async/task_internal_test.cc | #ifndef TENSORFLOW_LITE_CORE_ASYNC_TASK_INTERNAL_H_
#define TENSORFLOW_LITE_CORE_ASYNC_TASK_INTERNAL_H_
#include <atomic>
#include <map>
#include <memory>
#include <string>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite::async {
class ExecutionTask;
}
struct TfLiteExecutionTask {
TfLiteExecutionTask();
std::unique_ptr<tflite::async::ExecutionTask> task;
};
namespace tflite {
namespace async {
class ExecutionTask {
public:
TfLiteBufferHandle GetBufferHandle(TfLiteIoType io_type,
const char* name) const;
TfLiteBufferHandle GetBufferHandle(int tensor_index) const;
TfLiteStatus SetBufferHandle(TfLiteIoType io_type, const char* name,
TfLiteBufferHandle handle);
TfLiteStatus SetBufferHandle(int tensor_index, TfLiteBufferHandle handle);
TfLiteSynchronization* GetSynchronization(TfLiteIoType io_type,
const char* name) const;
TfLiteSynchronization* GetSynchronization(int tensor_index) const;
TfLiteStatus SetSynchronization(TfLiteIoType io_type, const char* name,
TfLiteSynchronization* sync);
TfLiteStatus SetSynchronization(int tensor_index,
TfLiteSynchronization* sync);
using TensorNameMapT = std::map<std::string, uint32_t>;
void SetInputNameMap(const TensorNameMapT* input_name_to_idx) {
input_name_to_idx_ = input_name_to_idx;
}
void SetOutputNameMap(const TensorNameMapT* output_name_to_idx) {
output_name_to_idx_ = output_name_to_idx;
}
bool Scheduled() const { return scheduled_.load(); }
bool SetScheduled(bool scheduled) { return scheduled_.exchange(scheduled); }
TfLiteStatus Status() const { return status_.load(); }
void SetStatus(TfLiteStatus status) { status_.store(status); }
void SetDelegateExecutionData(TfLiteAsyncKernel* kernel, void* data) {
data_ = data;
}
void* GetDelegateExecutionData(TfLiteAsyncKernel* kernel) const {
return data_;
}
private:
struct IOData {
TfLiteBufferHandle buf = kTfLiteNullBufferHandle;
TfLiteSynchronization* sync = nullptr;
};
bool GetTensorIdx(TfLiteIoType io_type, const char* name, int* idx) const;
std::map<int, IOData> io_data_;
std::atomic_bool scheduled_ = false;
std::atomic<TfLiteStatus> status_ = kTfLiteOk;
const TensorNameMapT* input_name_to_idx_ = nullptr;
const TensorNameMapT* output_name_to_idx_ = nullptr;
void* data_ = nullptr;
};
}
}
#endif
#include "tensorflow/lite/core/async/task_internal.h"
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace async {
bool ExecutionTask::GetTensorIdx(TfLiteIoType io_type, const char* name,
int* idx) const {
const std::map<std::string, uint32_t>* map = nullptr;
if (io_type == kTfLiteIoTypeInput) {
map = input_name_to_idx_;
} else {
map = output_name_to_idx_;
}
if (!map) return false;
if (auto it_idx = map->find(name); it_idx != map->end()) {
*idx = it_idx->second;
return true;
}
return false;
}
TfLiteBufferHandle ExecutionTask::GetBufferHandle(TfLiteIoType io_type,
const char* name) const {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteNullBufferHandle;
}
return GetBufferHandle(index);
}
TfLiteBufferHandle ExecutionTask::GetBufferHandle(int tensor_index) const {
if (auto it = io_data_.find(tensor_index); it != io_data_.end()) {
return it->second.buf;
}
return kTfLiteNullBufferHandle;
}
TfLiteStatus ExecutionTask::SetBufferHandle(TfLiteIoType io_type,
const char* name,
TfLiteBufferHandle handle) {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteError;
}
return SetBufferHandle(index, handle);
}
TfLiteStatus ExecutionTask::SetBufferHandle(int tensor_index,
TfLiteBufferHandle handle) {
io_data_[tensor_index].buf = handle;
return kTfLiteOk;
}
TfLiteSynchronization* ExecutionTask::GetSynchronization(
TfLiteIoType io_type, const char* name) const {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return nullptr;
}
return GetSynchronization(index);
}
TfLiteSynchronization* ExecutionTask::GetSynchronization(
int tensor_index) const {
if (auto it = io_data_.find(tensor_index); it != io_data_.end()) {
return it->second.sync;
}
return nullptr;
}
TfLiteStatus ExecutionTask::SetSynchronization(TfLiteIoType io_type,
const char* name,
TfLiteSynchronization* sync) {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteError;
}
return SetSynchronization(index, sync);
}
TfLiteStatus ExecutionTask::SetSynchronization(int tensor_index,
TfLiteSynchronization* sync) {
io_data_[tensor_index].sync = sync;
return kTfLiteOk;
}
}
}
TfLiteExecutionTask::TfLiteExecutionTask() {
task = std::make_unique<tflite::async::ExecutionTask>();
} | #include "tensorflow/lite/core/async/task_internal.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite::async {
TEST(TfLiteExecutionTaskTest, BasicTest) {
tflite::async::ExecutionTask task;
tflite::async::ExecutionTask::TensorNameMapT input_names;
input_names["x"] = 1;
input_names["y"] = 2;
tflite::async::ExecutionTask::TensorNameMapT output_names;
output_names["a"] = 3;
task.SetInputNameMap(&input_names);
task.SetOutputNameMap(&output_names);
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk, task.SetSynchronization(kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(42, task.GetBufferHandle(kTfLiteIoTypeInput, "x"));
EXPECT_EQ(43, task.GetBufferHandle(kTfLiteIoTypeInput, "y"));
EXPECT_EQ(44, task.GetBufferHandle(kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(sync, task.GetSynchronization(kTfLiteIoTypeInput, "x"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeInput, "y"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "a"));
TfLiteSynchronizationDelete(sync);
}
TEST(TfLiteExecutionTaskTest, NameMapUninitialized) {
tflite::async::ExecutionTask task;
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeInput, "foo"));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeOutput, "foo"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "foo"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "foo"));
}
TEST(TfLiteExecutionTaskTest, NoMatchingName) {
tflite::async::ExecutionTask task;
tflite::async::ExecutionTask::TensorNameMapT input_names;
input_names["x"] = 1;
input_names["y"] = 2;
tflite::async::ExecutionTask::TensorNameMapT output_names;
output_names["a"] = 3;
task.SetInputNameMap(&input_names);
task.SetOutputNameMap(&output_names);
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteError, task.SetBufferHandle(kTfLiteIoTypeInput, "xx", 42));
EXPECT_EQ(kTfLiteError, task.SetBufferHandle(kTfLiteIoTypeOutput, "aa", 44));
EXPECT_EQ(kTfLiteError,
task.SetSynchronization(kTfLiteIoTypeInput, "xx", sync));
EXPECT_EQ(kTfLiteError,
task.SetSynchronization(kTfLiteIoTypeOutput, "aa", sync));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeInput, "xx"));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeOutput, "aa"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeInput, "xx"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "aa"));
TfLiteSynchronizationDelete(sync);
}
TEST(TfLiteExecutionTaskTest, DelegateData) {
TfLiteAsyncKernel kernel{};
int data = 0;
tflite::async::ExecutionTask task;
EXPECT_EQ(nullptr, task.GetDelegateExecutionData(&kernel));
task.SetDelegateExecutionData(&kernel, &data);
EXPECT_EQ(&data, task.GetDelegateExecutionData(&kernel));
}
} |
956 | cpp | tensorflow/tensorflow | async_subgraph | tensorflow/lite/core/async/async_subgraph.cc | tensorflow/lite/core/async/async_subgraph_test.cc | #ifndef TENSORFLOW_LITE_CORE_ASYNC_ASYNC_SUBGRAPH_H_
#define TENSORFLOW_LITE_CORE_ASYNC_ASYNC_SUBGRAPH_H_
#include <atomic>
#include <map>
#include <vector>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
namespace tflite {
namespace async {
class AsyncSubgraphTestPeer;
class AsyncSubgraph {
public:
explicit AsyncSubgraph(Subgraph* subgraph);
Subgraph* subgraph() const;
TfLiteContext* context() const;
TfLiteStatus RegisterBuffer(TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle);
TfLiteStatus RegisterBufferSlice(TfLiteBufferHandle buffer_pool,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle);
TfLiteStatus UnregisterBuffer(TfLiteBufferHandle handle);
const std::vector<const char*>& SupportedBufferTypes(
TfLiteIoType io_type) const;
const std::vector<const char*>& SupportedSynchronizations(
TfLiteIoType io_type) const;
bool ReconcileRestrictions(int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged,
TfLiteAttributeMap* conflict) const;
TfLiteStatus SetAttributes(int tensor_index, const TfLiteAttributeMap* attrs);
TfLiteStatus SetBufferAttributes(const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs);
TfLiteStatus GetBufferAttributes(const TfLiteBackendBuffer* buffer,
TfLiteAttributeMap* attrs);
TfLiteStatus Prepare();
TfLiteExecutionTask* CreateTask();
TfLiteStatus InvokeAsync(TfLiteExecutionTask* task);
TfLiteStatus Wait(TfLiteExecutionTask* task);
TfLiteStatus Finish(TfLiteExecutionTask* task);
private:
friend class AsyncSubgraphTestPeer;
bool IsFullyDelegated() const;
TfLiteOpaqueContext* opaque_context() const;
TfLiteAsyncKernel* async_kernel() const;
Subgraph* subgraph_ = nullptr;
std::atomic<TfLiteBufferHandle> next_buffer_handle_ = {0};
std::map<TfLiteIoType, std::vector<const char*>> supported_buffer_types_;
std::map<TfLiteIoType, std::vector<const char*>> supported_synchronizations_;
mutable TfLiteAsyncKernel* async_kernel_ = nullptr;
TfLiteOpaqueNode* opaque_node_ = nullptr;
};
}
}
#endif
#include "tensorflow/lite/core/async/async_subgraph.h"
#include <vector>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace async {
namespace {
TfLiteAsyncKernel* GetAsyncKernel(TfLiteContext* context,
const TfLiteRegistration& op_reg,
TfLiteNode& node) {
if (op_reg.registration_external) {
auto* context_ = reinterpret_cast<TfLiteOpaqueContext*>(context);
auto* node_ = reinterpret_cast<TfLiteOpaqueNode*>(&node);
if (op_reg.registration_external->async_kernel_with_data) {
auto user_data = op_reg.registration_external->user_data;
return op_reg.registration_external->async_kernel_with_data(
user_data, context_, node_);
} else if (op_reg.registration_external->async_kernel) {
return op_reg.registration_external->async_kernel(context_, node_);
}
}
if (op_reg.async_kernel) {
return op_reg.async_kernel(context, &node);
}
return nullptr;
}
}
Subgraph* AsyncSubgraph::subgraph() const { return subgraph_; }
TfLiteContext* AsyncSubgraph::context() const { return subgraph_->context(); }
TfLiteOpaqueContext* AsyncSubgraph::opaque_context() const {
return reinterpret_cast<TfLiteOpaqueContext*>(context());
}
TfLiteAsyncKernel* AsyncSubgraph::async_kernel() const { return async_kernel_; }
AsyncSubgraph::AsyncSubgraph(Subgraph* subgraph) : subgraph_(subgraph) {
if (!IsFullyDelegated()) {
subgraph->ReportError("Model is not fully delegated by 1 backend.");
return;
}
auto node_index = subgraph_->execution_plan()[0];
TfLiteNode& node = subgraph_->nodes_and_registration_[node_index].first;
const TfLiteRegistration& registration =
subgraph_->nodes_and_registration_[node_index].second;
async_kernel_ = GetAsyncKernel(context(), registration, node);
if (!async_kernel_) {
subgraph->ReportError("Backend does not support asynchronous execution.");
return;
}
opaque_node_ =
reinterpret_cast<TfLiteOpaqueNode*>(const_cast<TfLiteNode*>(&node));
#define POPULATE_VECTOR(io_type, accessor, dest) \
{ \
const char* const* types = nullptr; \
size_t n_types = 0; \
(*async_kernel_->accessor)(async_kernel_, io_type, &types, &n_types); \
dest[io_type] = std::vector<const char*>(types, types + n_types); \
}
POPULATE_VECTOR(kTfLiteIoTypeInput, supported_buffer_types,
supported_buffer_types_);
POPULATE_VECTOR(kTfLiteIoTypeOutput, supported_buffer_types,
supported_buffer_types_);
POPULATE_VECTOR(kTfLiteIoTypeInput, supported_synchronizations,
supported_synchronizations_);
POPULATE_VECTOR(kTfLiteIoTypeOutput, supported_synchronizations,
supported_synchronizations_);
#undef POPULATE_VECTOR
}
bool AsyncSubgraph::IsFullyDelegated() const {
if (subgraph_->execution_plan().size() != 1) return false;
const TfLiteNode& node =
subgraph_->nodes_and_registration()[subgraph_->execution_plan()[0]].first;
if (node.delegate == nullptr) return false;
return true;
}
TfLiteStatus AsyncSubgraph::RegisterBuffer(TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (buffer == nullptr || attrs == nullptr || handle == nullptr ||
async_kernel() == nullptr) {
return kTfLiteError;
}
*handle = next_buffer_handle_.fetch_add(1, std::memory_order_relaxed);
return (*async_kernel_->register_buffer)(
async_kernel_, reinterpret_cast<TfLiteOpaqueContext*>(context()), io_type,
buffer, attrs, *handle);
}
TfLiteStatus AsyncSubgraph::RegisterBufferSlice(TfLiteBufferHandle buffer_pool,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (attrs == nullptr || handle == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
*handle = next_buffer_handle_.fetch_add(1, std::memory_order_relaxed);
return (*async_kernel_->register_buffer_slice)(
async_kernel_, opaque_context(), buffer_pool, attrs, *handle);
}
TfLiteStatus AsyncSubgraph::UnregisterBuffer(TfLiteBufferHandle handle) {
if (async_kernel() == nullptr) return kTfLiteError;
return (*async_kernel_->unregister_buffer)(async_kernel_, opaque_context(),
handle);
}
const std::vector<const char*>& AsyncSubgraph::SupportedBufferTypes(
TfLiteIoType io_type) const {
return supported_buffer_types_.at(io_type);
}
const std::vector<const char*>& AsyncSubgraph::SupportedSynchronizations(
TfLiteIoType io_type) const {
return supported_synchronizations_.at(io_type);
}
bool AsyncSubgraph::ReconcileRestrictions(
int tensor_index, const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) const {
if (user_provided_attributes == nullptr || merged == nullptr ||
async_kernel() == nullptr) {
return false;
}
if (tensor_index < 0 || tensor_index >= subgraph_->tensors_size()) {
return false;
}
return (*async_kernel_->reconcile_restrictions)(
async_kernel_, opaque_context(), opaque_node_, tensor_index,
user_provided_attributes, merged, conflict);
}
TfLiteStatus AsyncSubgraph::SetAttributes(int tensor_index,
const TfLiteAttributeMap* attrs) {
if (attrs == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
if (tensor_index < 0 || tensor_index >= subgraph_->tensors_size()) {
return kTfLiteError;
}
return (*async_kernel_->set_attributes)(async_kernel_, opaque_context(),
opaque_node_, tensor_index, attrs);
}
TfLiteStatus AsyncSubgraph::SetBufferAttributes(
const TfLiteBackendBuffer* buffer, const TfLiteAttributeMap* attrs) {
return (*async_kernel_->set_buffer_attributes)(async_kernel_, buffer, attrs);
}
TfLiteStatus AsyncSubgraph::GetBufferAttributes(
const TfLiteBackendBuffer* buffer, TfLiteAttributeMap* attrs) {
return (*async_kernel_->get_buffer_attributes)(async_kernel_, buffer, attrs);
}
TfLiteStatus AsyncSubgraph::Prepare() {
if (async_kernel() == nullptr) return kTfLiteError;
return (*async_kernel_->prepare)(async_kernel_, opaque_context(),
opaque_node_);
}
TfLiteExecutionTask* AsyncSubgraph::CreateTask() {
return new TfLiteExecutionTask;
}
TfLiteStatus AsyncSubgraph::InvokeAsync(TfLiteExecutionTask* task) {
if (task == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
if (task->task->SetScheduled(true)) {
TFLITE_LOG(tflite::TFLITE_LOG_ERROR,
"The task has already been scheduled for execution.");
return kTfLiteError;
}
auto ret = (*async_kernel_->eval)(async_kernel_, opaque_context(),
opaque_node_, task);
task->task->SetStatus(ret);
return ret;
}
TfLiteStatus AsyncSubgraph::Wait(TfLiteExecutionTask* task) {
if (task == nullptr || async_kernel() == nullptr) {
return kTfLiteError;
}
if (!task->task->Scheduled()) {
return task->task->Status();
}
auto ret = (*async_kernel_->wait)(async_kernel_, opaque_context(), task);
task->task->SetStatus(ret);
task->task->SetScheduled(false);
return ret;
}
TfLiteStatus AsyncSubgraph::Finish(TfLiteExecutionTask* task) {
if (async_kernel() == nullptr) return kTfLiteError;
auto ret = (*async_kernel_->finish)(async_kernel_, opaque_context(), task);
if (ret != kTfLiteOk) {
subgraph_->ReportError("Failed to finish task.");
}
delete task;
return ret;
}
}
} | #include "tensorflow/lite/core/async/async_subgraph.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/backend_async_kernel_interface.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
#include "tensorflow/lite/core/async/testing/test_backend.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
using ::testing::_;
namespace tflite {
namespace async {
class AsyncSubgraphTestPeer {
public:
explicit AsyncSubgraphTestPeer(AsyncSubgraph* subgraph)
: subgraph_(subgraph) {}
bool IsFullyDelegated() const { return subgraph_->IsFullyDelegated(); }
private:
AsyncSubgraph* subgraph_;
};
class AsyncSubgraphTest : public ::testing::Test {
protected:
void SetUp() override {
kernel_ = std::make_unique<testing::MockAsyncKernel>();
backend_ = std::make_unique<testing::TestBackend>(kernel_->kernel());
interpreter_ = std::make_unique<Interpreter>();
interpreter_->AddTensors(5);
interpreter_->SetInputs({0, 1});
interpreter_->SetOutputs({3, 4});
TfLiteQuantizationParams quant;
interpreter_->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(4, kTfLiteFloat32, "", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
void* builtin_data_2 = malloc(sizeof(int));
void* builtin_data_3 = malloc(sizeof(int));
interpreter_->AddNodeWithParameters({0, 0}, {2}, nullptr, 0, builtin_data_1,
reg);
interpreter_->AddNodeWithParameters({1, 1}, {3}, nullptr, 0, builtin_data_2,
reg);
interpreter_->AddNodeWithParameters({2, 1}, {4}, nullptr, 0, builtin_data_3,
reg);
}
void BuildAsyncSubgraph() {
interpreter_->ModifyGraphWithDelegate(backend_->get_delegate());
subgraph_ = std::make_unique<AsyncSubgraph>(interpreter_->subgraph(0));
}
void TearDown() override { subgraph_.reset(); }
protected:
std::unique_ptr<testing::MockAsyncKernel> kernel_;
std::unique_ptr<testing::TestBackend> backend_;
std::unique_ptr<Interpreter> interpreter_;
std::unique_ptr<AsyncSubgraph> subgraph_;
};
TEST_F(AsyncSubgraphTest, FullyDelegated) {
BuildAsyncSubgraph();
EXPECT_TRUE(AsyncSubgraphTestPeer(subgraph_.get()).IsFullyDelegated());
}
TEST_F(AsyncSubgraphTest, NotFullyDelegated) {
backend_->SetMinPartitionedNodes(42);
BuildAsyncSubgraph();
EXPECT_FALSE(AsyncSubgraphTestPeer(subgraph_.get()).IsFullyDelegated());
}
TEST_F(AsyncSubgraphTest, BasicTest) {
BuildAsyncSubgraph();
EXPECT_CALL(*kernel_, RegisterBuffer(_, _, _, _, _));
EXPECT_CALL(*kernel_, RegisterBufferSlice(_, _, _, _));
EXPECT_CALL(*kernel_, UnregisterBuffer(_, _));
EXPECT_CALL(*kernel_, ReconcileRestrictions(_, _, _, _, _, _));
EXPECT_CALL(*kernel_, SetAttributes(_, _, _, _));
EXPECT_CALL(*kernel_, Prepare(_, _));
EXPECT_CALL(*kernel_, Eval(_, _, _));
EXPECT_CALL(*kernel_, Wait(_, _));
EXPECT_CALL(*kernel_, Finish(_, _));
auto* buffer = TfLiteBackendBufferCreate();
auto* attrs = new TfLiteAttributeMap(kTfLiteAttrMapTypeBuffer);
TfLiteBufferHandle handle = 1;
TfLiteBufferHandle another_handle = 1;
auto* task = new TfLiteExecutionTask;
EXPECT_FALSE(task->task->Scheduled());
subgraph_->RegisterBuffer(kTfLiteIoTypeInput, buffer, attrs, &handle);
subgraph_->RegisterBufferSlice(handle, attrs, &another_handle);
subgraph_->UnregisterBuffer(handle);
subgraph_->ReconcileRestrictions(0, attrs, attrs, attrs);
subgraph_->SetAttributes(0, attrs);
subgraph_->Prepare();
EXPECT_EQ(kTfLiteOk, subgraph_->InvokeAsync(task));
EXPECT_TRUE(task->task->Scheduled());
EXPECT_EQ(kTfLiteError, subgraph_->InvokeAsync(task));
EXPECT_TRUE(task->task->Scheduled());
EXPECT_EQ(kTfLiteOk, task->task->Status());
EXPECT_EQ(kTfLiteOk, subgraph_->Wait(task));
task->task->SetStatus(kTfLiteError);
EXPECT_EQ(kTfLiteError, subgraph_->Wait(task));
EXPECT_EQ(kTfLiteError, subgraph_->Wait(task));
EXPECT_FALSE(task->task->Scheduled());
subgraph_->Finish(task);
TfLiteBackendBufferDelete(buffer);
delete attrs;
EXPECT_NE(handle, another_handle);
}
TEST_F(AsyncSubgraphTest, OutOfBoundTest) {
BuildAsyncSubgraph();
auto* attrs = new TfLiteAttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(subgraph_->ReconcileRestrictions(42, attrs, attrs, attrs));
EXPECT_EQ(kTfLiteError, subgraph_->SetAttributes(42, attrs));
delete attrs;
}
}
} |
957 | cpp | tensorflow/tensorflow | async_signature_runner | tensorflow/lite/core/async/c/async_signature_runner.cc | tensorflow/lite/core/async/c/async_signature_runner_test.cc | #ifndef TENSORFLOW_LITE_CORE_ASYNC_C_ASYNC_SIGNATURE_RUNNER_H_
#define TENSORFLOW_LITE_CORE_ASYNC_C_ASYNC_SIGNATURE_RUNNER_H_
#include <stdbool.h>
#include <stdint.h>
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TfLiteAsyncSignatureRunner TfLiteAsyncSignatureRunner;
TFL_CAPI_EXPORT extern TfLiteAsyncSignatureRunner*
TfLiteInterpreterGetAsyncSignatureRunner(const TfLiteInterpreter* interpreter,
const char* signature_key);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerRegisterBufferSlice(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle buffer_pool, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerUnregisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle handle);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types);
TFL_CAPI_EXPORT extern bool TfLiteAsyncSignatureRunnerReconcileRestrictions(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* name,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict);
TFL_CAPI_EXPORT extern bool
TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
const TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributes(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const char* name, const TfLiteAttributeMap* attrs);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerSetAttributesByIndex(
TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* attrs);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerPrepareBackends(
TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern TfLiteExecutionTask*
TfLiteAsyncSignatureRunnerCreateTask(
TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerInvokeAsync(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerWait(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerFinish(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task);
TFL_CAPI_EXPORT extern size_t TfLiteAsyncSignatureRunnerGetInputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const char* TfLiteAsyncSignatureRunnerGetInputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t input_index);
TFL_CAPI_EXPORT extern size_t TfLiteAsyncSignatureRunnerGetOutputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const char* TfLiteAsyncSignatureRunnerGetOutputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t output_index);
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor*
TfLiteAsyncSignatureRunnerGetInputTensor(
TfLiteAsyncSignatureRunner* async_signature_runner, const char* input_name);
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor*
TfLiteAsyncSignatureRunnerGetOutputTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner,
const char* output_name);
TFL_CAPI_EXPORT extern void TfLiteAsyncSignatureRunnerDelete(
TfLiteAsyncSignatureRunner* signature_runner);
TFL_CAPI_EXPORT extern const int* TfLiteAsyncSignatureRunnerInputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const int* TfLiteAsyncSignatureRunnerOutputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor*
TfLiteAsyncSignatureRunnerGetTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner, int index);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/async/c/async_signature_runner.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/core/async/async_signature_runner.h"
#include "tensorflow/lite/core/async/c/internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
TfLiteAsyncSignatureRunner* TfLiteInterpreterGetAsyncSignatureRunner(
const TfLiteInterpreter* interpreter, const char* signature_key) {
if (!interpreter) return nullptr;
tflite::async::AsyncSignatureRunner* runner =
interpreter->impl->GetAsyncSignatureRunner(signature_key);
if (!runner) return nullptr;
return new TfLiteAsyncSignatureRunner{runner};
}
TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->RegisterBuffer(io_type, buffer, attrs,
handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBufferSlice(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle buffer_pool, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->RegisterBufferSlice(buffer_pool, attrs,
handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerUnregisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->UnregisterBuffer(handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types) {
if (async_signature_runner == nullptr || types == nullptr ||
num_types == nullptr)
return kTfLiteError;
const auto& buffer_types =
async_signature_runner->impl->SupportedBufferTypes(io_type);
*types = buffer_types.data();
*num_types = buffer_types.size();
return kTfLiteOk;
}
TfLiteStatus TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types) {
if (async_signature_runner == nullptr || types == nullptr ||
num_types == nullptr)
return kTfLiteError;
const auto& synchronization_types =
async_signature_runner->impl->SupportedSynchronizations(io_type);
*types = synchronization_types.data();
*num_types = synchronization_types.size();
return kTfLiteOk;
}
bool TfLiteAsyncSignatureRunnerReconcileRestrictions(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* name,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) {
if (!async_signature_runner) return false;
return async_signature_runner->impl->ReconcileRestrictions(
io_type, name, user_provided_attributes, merged, conflict);
}
bool TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
const TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) {
if (!async_signature_runner) return false;
return async_signature_runner->impl->ReconcileRestrictions(
tensor_index, user_provided_attributes, merged, conflict);
}
TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributes(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const char* name, const TfLiteAttributeMap* attrs) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->SetAttributes(io_type, name, attrs);
}
TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributesByIndex(
TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* attrs) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->SetAttributes(tensor_index, attrs);
}
TfLiteStatus TfLiteAsyncSignatureRunnerPrepareBackends(
TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->PrepareBackends();
}
TfLiteExecutionTask* TfLiteAsyncSignatureRunnerCreateTask(
TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->CreateTask();
}
TfLiteStatus TfLiteAsyncSignatureRunnerInvokeAsync(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->InvokeAsync(task);
}
TfLiteStatus TfLiteAsyncSignatureRunnerWait(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->Wait(task);
}
TfLiteStatus TfLiteAsyncSignatureRunnerFinish(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->Finish(task);
}
size_t TfLiteAsyncSignatureRunnerGetInputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return 0;
return async_signature_runner->impl->input_size();
}
const char* TfLiteAsyncSignatureRunnerGetInputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t input_index) {
if (!async_signature_runner) return nullptr;
size_t count =
TfLiteAsyncSignatureRunnerGetInputCount(async_signature_runner);
if (input_index < 0 || input_index >= count) {
return nullptr;
}
const auto& input_names = async_signature_runner->impl->input_names();
if (input_index >= input_names.size()) {
return nullptr;
}
return input_names[input_index];
}
size_t TfLiteAsyncSignatureRunnerGetOutputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return 0;
return async_signature_runner->impl->output_size();
}
const char* TfLiteAsyncSignatureRunnerGetOutputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t output_index) {
if (!async_signature_runner) return nullptr;
size_t count =
TfLiteAsyncSignatureRunnerGetOutputCount(async_signature_runner);
if (output_index < 0 || output_index >= count) {
return nullptr;
}
const auto& output_names = async_signature_runner->impl->output_names();
if (output_index >= output_names.size()) {
return nullptr;
}
return async_signature_runner->impl->output_names()[output_index];
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetInputTensor(
TfLiteAsyncSignatureRunner* async_signature_runner,
const char* input_name) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->input_tensor(input_name);
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetOutputTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner,
const char* output_name) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->output_tensor(output_name);
}
void TfLiteAsyncSignatureRunnerDelete(
TfLiteAsyncSignatureRunner* signature_runner) {
delete signature_runner;
}
const int* TfLiteAsyncSignatureRunnerInputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->inputs().data();
}
const int* TfLiteAsyncSignatureRunnerOutputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->outputs().data();
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner, int index) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->tensor(index);
} | #include "tensorflow/lite/core/async/c/async_signature_runner.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/backend_async_kernel_interface.h"
#include "tensorflow/lite/core/async/c/internal.h"
#include "tensorflow/lite/core/async/c/task.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
#include "tensorflow/lite/core/async/testing/test_backend.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/interpreter_test_util.h"
using ::testing::_;
using ::testing::Return;
namespace tflite {
namespace async {
class AsyncSignatureRunnerTest : public InterpreterTest,
public ::testing::WithParamInterface<bool> {
protected:
void SetUp() override {
kernel_ =
std::make_unique<::testing::StrictMock<testing::MockAsyncKernel>>();
backend_ = std::make_unique<testing::TestBackend>(kernel_->kernel());
auto interpreter = std::make_unique<Interpreter>();
interpreter->AddTensors(2);
interpreter->SetInputs({0});
interpreter->SetOutputs({1});
TfLiteQuantizationParams quant;
interpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "x", {3},
quant);
interpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "a", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
interpreter->AddNodeWithParameters({0, 0}, {1}, nullptr, 0, builtin_data_1,
reg);
tflite_interpreter_.impl = std::move(interpreter);
}
void BuildRunner(bool has_signature) {
auto* interpreter = tflite_interpreter_.impl.get();
if (has_signature) {
const char kSignatureKey[] = "serving_default";
BuildSignature(interpreter, kSignatureKey, {{"input", 0}},
{{"output", 1}});
interpreter->ModifyGraphWithDelegate(backend_->get_delegate());
runner_ = TfLiteInterpreterGetAsyncSignatureRunner(&tflite_interpreter_,
kSignatureKey);
} else {
interpreter->ModifyGraphWithDelegate(backend_->get_delegate());
runner_ = TfLiteInterpreterGetAsyncSignatureRunner(&tflite_interpreter_,
nullptr);
}
ASSERT_NE(nullptr, runner_);
}
void TearDown() override { TfLiteAsyncSignatureRunnerDelete(runner_); }
protected:
TfLiteAsyncSignatureRunner* runner_ = nullptr;
std::unique_ptr<::testing::StrictMock<testing::MockAsyncKernel>> kernel_;
std::unique_ptr<testing::TestBackend> backend_;
internal::SignatureDef signature_def_;
TfLiteInterpreter tflite_interpreter_{};
};
INSTANTIATE_TEST_SUITE_P(AsyncSignatureRunnerTest, AsyncSignatureRunnerTest,
::testing::Bool());
TEST_P(AsyncSignatureRunnerTest, RegisterBufferTest) {
BuildRunner(GetParam());
EXPECT_CALL(*kernel_, RegisterBuffer(_, _, _, _, _))
.WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, RegisterBufferSlice(_, _, _, _))
.WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, UnregisterBuffer(_, _)).WillOnce(Return(kTfLiteOk));
TfLiteBufferHandle handle;
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
auto* buf = TfLiteBackendBufferCreate();
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerRegisterBuffer(
runner_, kTfLiteIoTypeInput, buf, attr, &handle));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerRegisterBufferSlice(
runner_, handle, attr, &handle));
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerUnregisterBuffer(runner_, handle));
TfLiteAttributeMapDelete(attr);
TfLiteBackendBufferDelete(buf);
}
TEST_P(AsyncSignatureRunnerTest, SupportedTypesTest) {
BuildRunner(GetParam());
const char* const* buffer_types = nullptr;
size_t num_buffer_types = 0;
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
runner_, kTfLiteIoTypeInput, &buffer_types, &num_buffer_types));
EXPECT_EQ(1, num_buffer_types);
EXPECT_STREQ("buffer_type", buffer_types[0]);
const char* const* sync_types = nullptr;
size_t num_sync_types = 0;
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
runner_, kTfLiteIoTypeInput, &sync_types, &num_sync_types));
EXPECT_EQ(1, num_sync_types);
EXPECT_STREQ("sync_type", sync_types[0]);
}
TEST_P(AsyncSignatureRunnerTest, ReconcileTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_CALL(*kernel_, ReconcileRestrictions(_, _, _, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*kernel_, SetAttributes(_, _, _, _)).WillOnce(Return(kTfLiteOk));
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
if (has_signature) {
EXPECT_TRUE(TfLiteAsyncSignatureRunnerReconcileRestrictions(
runner_, kTfLiteIoTypeInput, "input", attr, attr, nullptr));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerSetAttributes(
runner_, kTfLiteIoTypeInput, "input", attr));
} else {
EXPECT_TRUE(TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
runner_, 0, attr, attr, nullptr));
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerSetAttributesByIndex(runner_, 0, attr));
}
TfLiteAttributeMapDelete(attr);
}
TEST_P(AsyncSignatureRunnerTest, ExecutionTest) {
BuildRunner(GetParam());
EXPECT_CALL(*kernel_, Prepare(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Eval(_, _, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Wait(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Finish(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerPrepareBackends(runner_));
auto* task = TfLiteAsyncSignatureRunnerCreateTask(runner_);
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerInvokeAsync(runner_, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerWait(runner_, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerFinish(runner_, task));
}
TEST_P(AsyncSignatureRunnerTest, InputsTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetInputCount(runner_));
if (has_signature) {
EXPECT_STREQ("input", TfLiteAsyncSignatureRunnerGetInputName(runner_, 0));
EXPECT_STREQ(
"x", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetInputTensor(runner_, "input")));
} else {
EXPECT_EQ(nullptr, TfLiteAsyncSignatureRunnerGetInputName(runner_, 0));
EXPECT_EQ(nullptr,
TfLiteAsyncSignatureRunnerGetInputTensor(runner_, "input"));
}
}
TEST_P(AsyncSignatureRunnerTest, OutputsTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetOutputCount(runner_));
if (has_signature) {
EXPECT_STREQ("output", TfLiteAsyncSignatureRunnerGetOutputName(runner_, 0));
EXPECT_STREQ(
"a", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetOutputTensor(runner_, "output")));
} else {
EXPECT_EQ(nullptr, TfLiteAsyncSignatureRunnerGetOutputName(runner_, 0));
EXPECT_EQ(nullptr,
TfLiteAsyncSignatureRunnerGetOutputTensor(runner_, "output"));
}
}
TEST_P(AsyncSignatureRunnerTest, InputByIndexTest) {
BuildRunner(GetParam());
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetInputCount(runner_));
auto* indices = TfLiteAsyncSignatureRunnerInputTensorIndices(runner_);
EXPECT_NE(nullptr, indices);
auto indice = indices[0];
EXPECT_STREQ("x", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetTensor(runner_, indice)));
}
TEST_P(AsyncSignatureRunnerTest, OutputsByIndexTest) {
BuildRunner(GetParam());
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetOutputCount(runner_));
auto* indices = TfLiteAsyncSignatureRunnerOutputTensorIndices(runner_);
EXPECT_NE(nullptr, indices);
auto indice = indices[0];
EXPECT_STREQ("a", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetTensor(runner_, indice)));
}
TEST_P(AsyncSignatureRunnerTest, IndexOutOfBound) {
BuildRunner(GetParam());
EXPECT_EQ(nullptr, TfLiteAsyncSignatureRunnerGetTensor(runner_, 42));
}
}
} |
958 | cpp | tensorflow/tensorflow | task | tensorflow/lite/core/async/c/task.cc | tensorflow/lite/core/async/c/task_test.cc | #ifndef XLA_SERVICE_CPU_RUNTIME_TASK_H_
#define XLA_SERVICE_CPU_RUNTIME_TASK_H_
#include <memory>
#include <utility>
#include "absl/functional/any_invocable.h"
namespace xla::cpu {
inline auto ToCopyableTask(absl::AnyInvocable<void()> task) {
return [shared_task = std::make_shared<decltype(task)>(std::move(task))] {
(*shared_task)();
};
}
}
#endif
#include "tensorflow/lite/core/async/c/task.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
extern "C" {
TfLiteStatus TfLiteExecutionTaskSetBuffer(TfLiteExecutionTask* task,
TfLiteIoType io_type,
const char* tensor_signature_name,
TfLiteBufferHandle handle) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteError;
return task->task->SetBufferHandle(io_type, tensor_signature_name, handle);
}
TfLiteStatus TfLiteExecutionTaskSetBufferByIndex(TfLiteExecutionTask* task,
int tensor_index,
TfLiteBufferHandle handle) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->SetBufferHandle(tensor_index, handle);
}
TfLiteStatus TfLiteExecutionTaskSetSync(TfLiteExecutionTask* task,
TfLiteIoType io_type,
const char* tensor_signature_name,
TfLiteSynchronization* sync) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteError;
return task->task->SetSynchronization(io_type, tensor_signature_name, sync);
}
TfLiteStatus TfLiteExecutionTaskSetSyncByIndex(TfLiteExecutionTask* task,
int tensor_index,
TfLiteSynchronization* sync) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->SetSynchronization(tensor_index, sync);
}
TfLiteBufferHandle TfLiteExecutionTaskGetBufferByName(
const TfLiteExecutionTask* task, TfLiteIoType io_type,
const char* tensor_signature_name) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteNullBufferHandle;
return task->task->GetBufferHandle(io_type, tensor_signature_name);
}
TfLiteSynchronization* TfLiteExecutionTaskGetSyncByName(
const TfLiteExecutionTask* task, TfLiteIoType io_type,
const char* tensor_signature_name) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return nullptr;
return task->task->GetSynchronization(io_type, tensor_signature_name);
}
TfLiteBufferHandle TfLiteExecutionTaskGetBufferByIndex(
const TfLiteExecutionTask* task, int tensor_index) {
if (task == nullptr || task->task == nullptr) return kTfLiteNullBufferHandle;
return task->task->GetBufferHandle(tensor_index);
}
TfLiteSynchronization* TfLiteExecutionTaskGetSyncByIndex(
const TfLiteExecutionTask* task, int tensor_index) {
if (task == nullptr || task->task == nullptr) return nullptr;
return task->task->GetSynchronization(tensor_index);
}
void* TfLiteExecutionTaskGetDelegateExecutionData(
const TfLiteExecutionTask* task, TfLiteAsyncKernel* kernel) {
if (task == nullptr || task->task == nullptr) return nullptr;
return task->task->GetDelegateExecutionData(kernel);
}
void TfLiteExecutionTaskSetDelegateExecutionData(TfLiteExecutionTask* task,
TfLiteAsyncKernel* kernel,
void* data) {
if (task == nullptr || task->task == nullptr) return;
task->task->SetDelegateExecutionData(kernel, data);
}
TfLiteStatus TfLiteExecutionTaskGetStatus(const TfLiteExecutionTask* task) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->Status();
}
void TfLiteExecutionTaskSetStatus(TfLiteExecutionTask* task,
TfLiteStatus status) {
if (task == nullptr || task->task == nullptr) return;
task->task->SetStatus(status);
}
} | #include "tensorflow/lite/core/async/c/task.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/common.h"
namespace {
class TfLiteExecutionTaskTest : public ::testing::Test {
protected:
void SetUp() override {
input_names_["x"] = 1;
input_names_["y"] = 2;
output_names_["a"] = 3;
task_.task->SetInputNameMap(&input_names_);
task_.task->SetOutputNameMap(&output_names_);
}
TfLiteExecutionTask* task() { return &task_; }
protected:
tflite::async::ExecutionTask::TensorNameMapT input_names_;
tflite::async::ExecutionTask::TensorNameMapT output_names_;
TfLiteExecutionTask task_;
};
TEST_F(TfLiteExecutionTaskTest, BasicTest) {
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetSync(task(), kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(
42, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeInput, "x"));
EXPECT_EQ(
43, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeInput, "y"));
EXPECT_EQ(
44, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(sync,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeInput, "x"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeInput, "y"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeOutput, "a"));
TfLiteSynchronizationDelete(sync);
}
TEST_F(TfLiteExecutionTaskTest, BasicTestByTensorIndex) {
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetSync(task(), kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(42, TfLiteExecutionTaskGetBufferByIndex(task(), 1));
EXPECT_EQ(43, TfLiteExecutionTaskGetBufferByIndex(task(), 2));
EXPECT_EQ(44, TfLiteExecutionTaskGetBufferByIndex(task(), 3));
EXPECT_EQ(sync, TfLiteExecutionTaskGetSyncByIndex(task(), 1));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(task(), 2));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(task(), 3));
TfLiteSynchronizationDelete(sync);
}
TEST_F(TfLiteExecutionTaskTest, NullTest) {
EXPECT_EQ(kTfLiteError,
TfLiteExecutionTaskSetBuffer(nullptr, kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskSetSync(
nullptr, kTfLiteIoTypeInput, "x", nullptr));
EXPECT_EQ(kTfLiteNullBufferHandle, TfLiteExecutionTaskGetBufferByName(
nullptr, kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(nullptr, kTfLiteIoTypeInput, "x"));
EXPECT_EQ(kTfLiteNullBufferHandle,
TfLiteExecutionTaskGetBufferByIndex(nullptr, 3));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(nullptr, 3));
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskGetStatus(nullptr));
TfLiteExecutionTaskSetStatus(nullptr, kTfLiteOk);
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskSetBufferByIndex(nullptr, 0, 0));
EXPECT_EQ(kTfLiteError,
TfLiteExecutionTaskSetSyncByIndex(nullptr, 0, nullptr));
}
TEST_F(TfLiteExecutionTaskTest, StatusTest) {
EXPECT_EQ(kTfLiteOk, TfLiteExecutionTaskGetStatus(task()));
TfLiteExecutionTaskSetStatus(task(), kTfLiteError);
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskGetStatus(task()));
}
} |
959 | cpp | tensorflow/tensorflow | reconcile_fns | tensorflow/lite/core/async/interop/reconcile_fns.cc | tensorflow/lite/core/async/interop/reconcile_fns_test.cc | #ifndef TENSORFLOW_LITE_CORE_ASYNC_INTEROP_RECONCILE_FNS_H_
#define TENSORFLOW_LITE_CORE_ASYNC_INTEROP_RECONCILE_FNS_H_
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite {
namespace interop {
bool ReconcileGeneralAttributeKeys(TfLiteAttrMapType type,
const AttributeMap::ContainerT* lhs,
const AttributeMap::ContainerT* rhs,
AttributeMap::ContainerT* merged,
AttributeMap::ContainerT* conflict);
bool CheckGeneralAttributeKeysCoverage(TfLiteAttrMapType type,
const AttributeMap::ContainerT* lhs,
const AttributeMap::ContainerT* rhs,
AttributeMap::ContainerT* conflict);
}
}
#endif
#include "tensorflow/lite/core/async/interop/reconcile_fns.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <set>
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite {
namespace interop {
namespace {
template <typename T>
T gcd(T x, T y) {
while (y) {
auto m = x % y;
x = y;
y = m;
}
return x;
}
template <typename T>
T lcm(T x, T y) {
return x / gcd(x, y) * y;
}
void ReconcileAlignment(size_t l, size_t r, AttributeMap::ContainerT* merged) {
merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeyAlignment),
lcm(l, r));
}
void ReconcilePadding(size_t l, size_t r, AttributeMap::ContainerT* merged) {
merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeyPadding),
lcm(l, r));
}
bool CheckMultiples(size_t l, size_t r) { return l % r == 0; }
void ReconcileSize(size_t l, size_t r, AttributeMap::ContainerT* merged) {
merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeySize),
std::max(l, r));
}
bool CheckSize(size_t l, size_t r) { return l >= r; }
}
bool ReconcileGeneralAttributeKeys(TfLiteAttrMapType type,
const AttributeMap::ContainerT* lhs,
const AttributeMap::ContainerT* rhs,
AttributeMap::ContainerT* merged,
AttributeMap::ContainerT* conflict) {
if (lhs == nullptr || rhs == nullptr || merged == nullptr) return false;
bool ret = true;
std::set<uint32_t> keys;
std::transform(lhs->begin(), lhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
std::transform(rhs->begin(), rhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
for (auto k : keys) {
const auto l = lhs->find(k);
const auto r = rhs->find(k);
if (l == lhs->end() || l->second.GetPtr() == nullptr) {
merged->insert_or_assign(k, r->second);
continue;
}
if (r == rhs->end() || r->second.GetPtr() == nullptr) {
merged->insert_or_assign(k, l->second);
continue;
}
if (type == kTfLiteAttrMapTypeBuffer) {
switch (static_cast<TfLiteBufferAttrKey>(k)) {
case kTfLiteBufferAttrKeySize:
ReconcileSize(*l->second.Get<size_t>(), *r->second.Get<size_t>(),
merged);
break;
case kTfLiteBufferAttrKeyAlignment:
ReconcileAlignment(*l->second.Get<size_t>(), *r->second.Get<size_t>(),
merged);
break;
case kTfLiteBufferAttrKeyPadding:
ReconcilePadding(*l->second.Get<size_t>(), *r->second.Get<size_t>(),
merged);
break;
default:
if (l->second == r->second) {
merged->insert_or_assign(k, l->second);
} else {
ret = false;
if (conflict) conflict->insert_or_assign(k, r->second);
}
}
} else {
if (l->second == r->second) {
merged->insert_or_assign(k, l->second);
} else {
ret = false;
if (conflict) conflict->insert_or_assign(k, r->second);
}
}
}
return ret;
}
bool CheckGeneralAttributeKeysCoverage(TfLiteAttrMapType type,
const AttributeMap::ContainerT* lhs,
const AttributeMap::ContainerT* rhs,
AttributeMap::ContainerT* conflict) {
if (lhs == nullptr || rhs == nullptr) return false;
bool ret = true;
std::set<uint32_t> keys;
std::transform(lhs->begin(), lhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
std::transform(rhs->begin(), rhs->end(), std::inserter(keys, keys.end()),
[](auto pair) { return pair.first; });
for (auto k : keys) {
bool has_conflict = false;
const auto l = lhs->find(k);
const auto r = rhs->find(k);
if (r == rhs->end() || r->second.GetPtr() == nullptr) {
continue;
} else if (l == lhs->end() || l->second.GetPtr() == nullptr) {
has_conflict = true;
} else {
if (type == kTfLiteAttrMapTypeBuffer) {
switch (static_cast<TfLiteBufferAttrKey>(k)) {
case kTfLiteBufferAttrKeySize:
has_conflict |=
!CheckSize(*l->second.Get<size_t>(), *r->second.Get<size_t>());
break;
case kTfLiteBufferAttrKeyAlignment:
has_conflict |= !CheckMultiples(*l->second.Get<size_t>(),
*r->second.Get<size_t>());
break;
case kTfLiteBufferAttrKeyPadding:
has_conflict |=
!CheckSize(*l->second.Get<size_t>(), *r->second.Get<size_t>());
break;
default:
if (l->second != r->second) {
has_conflict = true;
}
}
} else {
if (l->second != r->second) {
has_conflict = true;
}
}
}
if (has_conflict) {
if (conflict != nullptr) conflict->insert_or_assign(k, r->second);
ret = false;
}
}
return ret;
}
}
} | #include "tensorflow/lite/core/async/interop/reconcile_fns.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
#include <tuple>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite::interop {
namespace {
using ContainerT = AttributeMap::ContainerT;
template <typename ValT, typename KeyT>
void SetAttr(ContainerT* c, KeyT k, ValT v) {
c->insert_or_assign(static_cast<uint32_t>(k), v);
}
template <typename ValT, typename KeyT>
ValT GetAttr(const ContainerT& c, KeyT k) {
return *(c.at(static_cast<uint32_t>(k)).Get<ValT>());
}
TEST(ReconcileTest, NullCheck) {
ContainerT m1, m2;
EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &m1, &m2,
nullptr,
nullptr));
EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer,
nullptr, &m1, &m2,
nullptr));
EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &m1,
nullptr, &m2,
nullptr));
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer,
nullptr, &m1, &m2));
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &m1,
nullptr, &m2));
}
TEST(ReconcileTest, MissingAttributeTest) {
{
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(4, GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment));
}
{
ContainerT lhs, rhs, merged;
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(4, GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment));
}
{
ContainerT lhs, rhs, merged;
const char value[] = "string";
SetAttr(&rhs, kTfLiteSynchronizationAttrKeyObjectTypeName, value);
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeSync, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(value, GetAttr<const char*>(
merged, kTfLiteSynchronizationAttrKeyObjectTypeName));
}
}
TEST(CheckCoverageTest, MissingAttributeTest) {
{
ContainerT lhs, rhs;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer,
&lhs, &rhs, nullptr));
}
{
ContainerT lhs, rhs, merged;
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, size_t(4));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer,
&lhs, &rhs, nullptr));
}
}
class ReconcileAlignmentTest
: public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {};
TEST_P(ReconcileAlignmentTest, Test) {
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, std::get<1>(GetParam()));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(std::get<2>(GetParam()),
GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment));
}
INSTANTIATE_TEST_SUITE_P(ReconcileAlignmentTest, ReconcileAlignmentTest,
testing::Values(std::make_tuple(4, 4, 4),
std::make_tuple(1, 4, 4),
std::make_tuple(8, 4, 8),
std::make_tuple(8, 3, 24)));
class CheckAlignmentTest
: public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {};
TEST_P(CheckAlignmentTest, Test) {
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, std::get<1>(GetParam()));
EXPECT_EQ(std::get<2>(GetParam()),
CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &conflict));
EXPECT_EQ(
!std::get<2>(GetParam()),
conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeyAlignment)));
}
INSTANTIATE_TEST_SUITE_P(CheckAlignmentTest, CheckAlignmentTest,
testing::Values(std::make_tuple(4, 4, true),
std::make_tuple(4, 1, true),
std::make_tuple(1, 4, false)));
class ReconcilePaddingTest
: public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {};
TEST_P(ReconcilePaddingTest, Test) {
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeyPadding, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyPadding, std::get<1>(GetParam()));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(std::get<2>(GetParam()),
GetAttr<size_t>(merged, kTfLiteBufferAttrKeyPadding));
}
INSTANTIATE_TEST_SUITE_P(ReconcilePaddingTest, ReconcilePaddingTest,
testing::Values(std::make_tuple(4, 4, 4),
std::make_tuple(1, 4, 4),
std::make_tuple(8, 4, 8),
std::make_tuple(8, 3, 24)));
class CheckPaddingTest
: public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {};
TEST_P(CheckPaddingTest, Test) {
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, kTfLiteBufferAttrKeyPadding, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeyPadding, std::get<1>(GetParam()));
EXPECT_EQ(std::get<2>(GetParam()),
CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &conflict));
EXPECT_EQ(!std::get<2>(GetParam()),
conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeyPadding)));
}
INSTANTIATE_TEST_SUITE_P(CheckPaddingTest, CheckPaddingTest,
testing::Values(std::make_tuple(4, 4, true),
std::make_tuple(4, 1, true),
std::make_tuple(1, 4, false)));
class ReconcileSizeTest
: public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {};
TEST_P(ReconcileSizeTest, Test) {
ContainerT lhs, rhs, merged;
SetAttr(&lhs, kTfLiteBufferAttrKeySize, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeySize, std::get<1>(GetParam()));
EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(std::get<2>(GetParam()),
GetAttr<size_t>(merged, kTfLiteBufferAttrKeySize));
}
INSTANTIATE_TEST_SUITE_P(ReconcileSizeTest, ReconcileSizeTest,
testing::Values(std::make_tuple(4, 4, 4),
std::make_tuple(1, 4, 4),
std::make_tuple(8, 4, 8),
std::make_tuple(8, 3, 8)));
class CheckSizeTest
: public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {};
TEST_P(CheckSizeTest, Test) {
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, kTfLiteBufferAttrKeySize, std::get<0>(GetParam()));
SetAttr(&rhs, kTfLiteBufferAttrKeySize, std::get<1>(GetParam()));
EXPECT_EQ(std::get<2>(GetParam()),
CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs,
&rhs, &conflict));
EXPECT_EQ(!std::get<2>(GetParam()),
conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeySize)));
}
INSTANTIATE_TEST_SUITE_P(CheckSizeTest, CheckSizeTest,
testing::Values(std::make_tuple(4, 4, true),
std::make_tuple(4, 1, true),
std::make_tuple(1, 4, false)));
class ReconcileNameTest
: public testing::TestWithParam<std::tuple<TfLiteAttrMapType, uint32_t>> {};
TEST_P(ReconcileNameTest, Test) {
constexpr char name_string1[] = "string1";
std::string name_string1_1 = "string1";
constexpr char name_string2[] = "string2";
{
ContainerT lhs, rhs, merged;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string1_1.c_str());
EXPECT_TRUE(ReconcileGeneralAttributeKeys(std::get<0>(GetParam()), &lhs,
&rhs, &merged, nullptr));
EXPECT_EQ(0, strcmp(GetAttr<const char*>(merged, std::get<1>(GetParam())),
name_string1));
}
{
ContainerT lhs, rhs, merged, conflict;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string2);
EXPECT_FALSE(ReconcileGeneralAttributeKeys(std::get<0>(GetParam()), &lhs,
&rhs, &merged, &conflict));
EXPECT_TRUE(conflict.count(std::get<1>(GetParam())));
}
}
INSTANTIATE_TEST_SUITE_P(
ReconcileNameTest, ReconcileNameTest,
testing::Values(
std::make_tuple(
kTfLiteAttrMapTypeBuffer,
static_cast<uint32_t>(kTfLiteBufferAttrKeyResourceTypeName)),
std::make_tuple(kTfLiteAttrMapTypeSync,
static_cast<uint32_t>(
kTfLiteSynchronizationAttrKeyObjectTypeName))));
class CheckNameTest
: public testing::TestWithParam<std::tuple<TfLiteAttrMapType, uint32_t>> {};
TEST_P(CheckNameTest, Test) {
constexpr char name_string1[] = "string1";
std::string name_string1_1 = "string1";
constexpr char name_string2[] = "string2";
{
ContainerT lhs, rhs;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string1_1.c_str());
EXPECT_TRUE(CheckGeneralAttributeKeysCoverage(std::get<0>(GetParam()), &lhs,
&rhs, nullptr));
}
{
ContainerT lhs, rhs, conflict;
SetAttr(&lhs, std::get<1>(GetParam()), name_string1);
SetAttr(&rhs, std::get<1>(GetParam()), name_string2);
EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(std::get<0>(GetParam()),
&lhs, &rhs, &conflict));
EXPECT_TRUE(conflict.count(std::get<1>(GetParam())));
}
}
INSTANTIATE_TEST_SUITE_P(
CheckNameTest, CheckNameTest,
testing::Values(
std::make_tuple(
kTfLiteAttrMapTypeBuffer,
static_cast<uint32_t>(kTfLiteBufferAttrKeyResourceTypeName)),
std::make_tuple(kTfLiteAttrMapTypeSync,
static_cast<uint32_t>(
kTfLiteSynchronizationAttrKeyObjectTypeName))));
}
} |
960 | cpp | tensorflow/tensorflow | attribute_map_internal | tensorflow/lite/core/async/interop/attribute_map_internal.cc | tensorflow/lite/core/async/interop/attribute_map_internal_test.cc | #ifndef TENSORFLOW_LITE_CORE_ASYNC_INTEROP_ATTRIBUTE_MAP_INTERNAL_H_
#define TENSORFLOW_LITE_CORE_ASYNC_INTEROP_ATTRIBUTE_MAP_INTERNAL_H_
#include <cstdint>
#include <map>
#include <string>
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/interop/variant.h"
namespace tflite {
namespace interop {
class AttributeMap {
public:
explicit AttributeMap(TfLiteAttrMapType type) : type_(type) {}
using KeyT = uint32_t;
using CustomKeyT = std::string;
using ValueT = tflite::interop::Variant;
using ContainerT = std::map<KeyT, ValueT>;
using CustomContainerT = std::map<CustomKeyT, ValueT>;
bool IsBufferAttributeMap() const {
return type_ == kTfLiteAttrMapTypeBuffer;
}
bool IsSyncAttributeMap() const { return type_ == kTfLiteAttrMapTypeSync; }
bool ReconcileAttributes(const AttributeMap* other, AttributeMap* merged,
AttributeMap* conflict) const;
bool CheckAttributeCoverage(const AttributeMap* other,
AttributeMap* conflict) const;
template <typename AttrKeyT, typename ValueT>
bool GetAttr(AttrKeyT key, ValueT* value) const {
if (auto it = attrs_.find(static_cast<uint32_t>(key)); it != attrs_.end()) {
if (auto* v = it->second.Get<ValueT>(); v != nullptr) {
*value = *v;
return true;
}
}
return false;
}
template <typename AttrKeyT, typename ValueT>
void SetAttr(AttrKeyT key, ValueT value) {
attrs_.insert_or_assign(static_cast<KeyT>(key), value);
}
template <typename ValueT>
bool GetCustomAttr(CustomKeyT key, ValueT* value) const {
if (auto it = custom_attrs_.find(key); it != custom_attrs_.end()) {
if (auto* v = it->second.Get<ValueT>(); v != nullptr) {
*value = *v;
return true;
}
}
return false;
}
template <typename ValueT>
void SetCustomAttr(CustomKeyT key, ValueT value) {
custom_attrs_.insert_or_assign(key, value);
}
private:
TfLiteAttrMapType type_;
ContainerT attrs_;
CustomContainerT custom_attrs_;
};
}
}
struct TfLiteAttributeMap {
explicit TfLiteAttributeMap(TfLiteAttrMapType type) : impl(type) {}
tflite::interop::AttributeMap impl;
};
#endif
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/reconcile_fns.h"
namespace tflite {
namespace interop {
bool AttributeMap::ReconcileAttributes(const AttributeMap* other,
AttributeMap* merged,
AttributeMap* conflict) const {
if (other == nullptr || merged == nullptr) return false;
if (type_ != other->type_) return false;
merged->type_ = type_;
if (conflict) conflict->type_ = type_;
return tflite::interop::ReconcileGeneralAttributeKeys(
type_, &attrs_, &other->attrs_, &merged->attrs_,
conflict ? &conflict->attrs_ : nullptr);
}
bool AttributeMap::CheckAttributeCoverage(const AttributeMap* other,
AttributeMap* conflict) const {
if (other == nullptr) return false;
if (type_ != other->type_) return false;
if (conflict) conflict->type_ = type_;
return tflite::interop::CheckGeneralAttributeKeysCoverage(
type_, &attrs_, &other->attrs_, conflict ? &conflict->attrs_ : nullptr);
}
}
} | #include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite {
namespace interop {
namespace {
TEST(AttributeMapTest, TypeTest) {
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_TRUE(attrs.IsBufferAttributeMap());
EXPECT_FALSE(attrs.IsSyncAttributeMap());
}
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs.IsSyncAttributeMap());
EXPECT_FALSE(attrs.IsBufferAttributeMap());
}
}
TEST(AttributeMapTest, AccessorTest) {
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
{
attrs.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
size_t result;
EXPECT_TRUE(attrs.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
{
attrs.SetCustomAttr("Foo", 12);
int result;
EXPECT_FALSE(attrs.GetCustomAttr("Bar", &result));
EXPECT_TRUE(attrs.GetCustomAttr("Foo", &result));
EXPECT_EQ(12, result);
}
}
TEST(AttributeMapTest, ReconcileFailDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(
attrs1.ReconcileAttributes(&attrs2, &attrs3, nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
}
TEST(AttributeMapTest, NullptrTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(nullptr, &attrs2,
nullptr));
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, nullptr,
nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(nullptr,
nullptr));
}
TEST(AttributeMapTest, ReconcileDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, &attrs3,
nullptr));
}
TEST(AttributeMapTest, ReconcileTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs4 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.ReconcileAttributes(&attrs2, &attrs3, &attrs4));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
EXPECT_TRUE(attrs4.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(attrs3.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
TEST(AttributeMapTest, CoverageTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
}
TEST(AttributeMapTest, CoverageFailedTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(10));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto conflict = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &conflict));
EXPECT_TRUE(conflict.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(conflict.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(4, result);
}
}
}
} |
961 | cpp | tensorflow/tensorflow | variant | tensorflow/lite/core/async/interop/variant.cc | tensorflow/lite/core/async/interop/variant_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_VARIANT_H_
#define TENSORFLOW_CORE_FRAMEWORK_VARIANT_H_
#include <functional>
#include <iostream>
#include <memory>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/type_index.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
template <typename T>
std::string TypeNameVariant(const T& value);
template <typename T>
std::string DebugStringVariant(const T& value);
template <typename T>
bool DecodeVariant(VariantTensorData* data, T* value);
template <typename T>
bool DecodeVariant(std::string* buf, T* value);
template <typename T>
void EncodeVariant(const T& value, VariantTensorData* data);
template <typename T>
void EncodeVariant(const T& value, std::string* buf);
class Variant {
public:
Variant() noexcept : heap_value_(nullptr), is_inline_(false) {}
~Variant();
Variant(const Variant& other);
Variant(Variant&& other) noexcept;
template <typename T, typename VT = typename std::decay<T>::type,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_move_constructible<VT>::value,
void>::type* = nullptr>
Variant(T&& value);
template <typename T, typename VT = typename std::decay<T>::type,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_copy_constructible<VT>::value,
void>::type* = nullptr>
Variant(const T& value);
template <typename T, typename VT = typename std::decay<T>::type,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_copy_constructible<VT>::value,
void>::type* = nullptr>
Variant& operator=(const T& value);
template <typename T, typename VT = typename std::decay<T>::type,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_move_constructible<VT>::value,
void>::type* = nullptr>
Variant& operator=(T&& value);
Variant& operator=(const Variant& rhs) {
if (&rhs == this) return *this;
Variant(rhs).swap(*this);
return *this;
}
Variant& operator=(Variant&& rhs) noexcept {
if (&rhs == this) return *this;
Variant(std::move(rhs)).swap(*this);
return *this;
}
template <typename T, class... Args>
T& emplace(Args&&... args) {
ResetMemory();
is_inline_ = CanInlineType<T>();
if (is_inline_) {
new (&inline_value_)
InlineValue(InlineValue::Tag<T>{}, std::forward<Args>(args)...);
return static_cast<Variant::Value<T>*>(inline_value_.AsValueInterface())
->value;
} else {
new (&heap_value_) HeapValue(
absl::make_unique<Value<T>>(InPlace(), std::forward<Args>(args)...));
return static_cast<Variant::Value<T>*>(heap_value_.get())->value;
}
}
bool is_empty() const { return GetValue() == nullptr; }
void clear() noexcept;
void swap(Variant& other) noexcept;
TypeIndex TypeId() const {
const TypeIndex VoidTypeIndex = TypeIndex::Make<void>();
if (is_empty()) {
return VoidTypeIndex;
}
return GetValue()->TypeId();
}
std::string DebugString() const {
return strings::StrCat("Variant<type: ", TypeName(),
" value: ", SummarizeValue(), ">");
}
std::string SummarizeValue() const {
return is_empty() ? "[empty]" : GetValue()->DebugString();
}
template <typename T>
T* get() {
const TypeIndex TTypeIndex = TypeIndex::Make<T>();
if (is_empty() || (TTypeIndex != TypeId())) return nullptr;
return std::addressof(static_cast<Variant::Value<T>*>(GetValue())->value);
}
template <typename T>
const T* get() const {
const TypeIndex TTypeIndex = TypeIndex::Make<T>();
if (is_empty() || (TTypeIndex != TypeId())) return nullptr;
return std::addressof(
static_cast<const Variant::Value<T>*>(GetValue())->value);
}
std::string TypeName() const {
if (is_empty()) {
return "";
}
return GetValue()->TypeName();
}
void Encode(VariantTensorData* data) const {
if (!is_empty()) {
GetValue()->Encode(data);
}
}
bool Decode(VariantTensorData data);
void Encode(std::string* buf) const {
if (!is_empty()) {
GetValue()->Encode(buf);
}
}
bool Decode(std::string buf) {
if (!is_empty()) {
return GetValue()->Decode(std::move(buf));
}
return true;
}
template <typename VT>
static constexpr bool CanInlineType() {
return ((sizeof(Value<VT>) <= InlineValue::kMaxValueSize) &&
(alignof(Value<VT>) <= kMaxInlineValueAlignSize));
}
private:
struct in_place_t {};
static constexpr in_place_t InPlace() { return in_place_t{}; }
struct ValueInterface {
virtual ~ValueInterface() = default;
virtual TypeIndex TypeId() const = 0;
virtual void* RawPtr() = 0;
virtual const void* RawPtr() const = 0;
virtual std::unique_ptr<ValueInterface> Clone() const = 0;
virtual void CloneInto(ValueInterface* memory) const = 0;
virtual void MoveAssign(ValueInterface* memory) = 0;
virtual void MoveInto(ValueInterface* memory) = 0;
virtual std::string TypeName() const = 0;
virtual std::string DebugString() const = 0;
virtual void Encode(VariantTensorData* data) const = 0;
virtual bool Decode(VariantTensorData data) = 0;
virtual void Encode(std::string* buf) const = 0;
virtual bool Decode(std::string data) = 0;
};
template <typename T>
struct Value final : ValueInterface {
template <class... Args>
explicit Value(in_place_t , Args&&... args)
: value(std::forward<Args>(args)...) {}
~Value() final = default;
TypeIndex TypeId() const final {
const TypeIndex value_type_index =
TypeIndex::Make<typename std::decay<T>::type>();
return value_type_index;
}
void* RawPtr() final { return &value; }
const void* RawPtr() const final { return &value; }
std::unique_ptr<ValueInterface> Clone() const final {
return absl::make_unique<Value>(InPlace(), value);
}
void MoveAssign(ValueInterface* memory) final {
CHECK(TypeId() == memory->TypeId())
<< TypeId().name() << " vs. " << memory->TypeId().name();
static_cast<Value*>(memory)->value = std::move(value);
}
void CloneInto(ValueInterface* memory) const final {
new (memory) Value(InPlace(), value);
}
void MoveInto(ValueInterface* memory) final {
new (memory) Value(InPlace(), std::move(value));
}
std::string TypeName() const final { return TypeNameVariant(value); }
std::string DebugString() const final { return DebugStringVariant(value); }
void Encode(VariantTensorData* data) const final {
EncodeVariant(value, data);
}
bool Decode(VariantTensorData data) final {
return DecodeVariant(&data, &value);
}
void Encode(std::string* buf) const final { EncodeVariant(value, buf); }
bool Decode(std::string buf) final { return DecodeVariant(&buf, &value); }
T value;
};
static constexpr int kMaxInlineValueAlignSize = alignof(Value<void*>);
using HeapValue = std::unique_ptr<ValueInterface>;
struct InlineValue {
static constexpr int kMaxValueSize = (64 - 8);
typedef char ValueDataArray[kMaxValueSize];
alignas(kMaxInlineValueAlignSize) ValueDataArray value_data;
template <typename VT>
struct Tag {};
template <typename VT, class... Args>
explicit InlineValue(Tag<VT> , Args&&... args) noexcept {
Value<VT>* inline_value_data = reinterpret_cast<Value<VT>*>(value_data);
new (inline_value_data) Value<VT>(InPlace(), std::forward<Args>(args)...);
}
InlineValue(const InlineValue& other) noexcept {
other.AsValueInterface()->CloneInto(AsValueInterface());
}
InlineValue(InlineValue&& other) noexcept {
other.AsValueInterface()->MoveInto(AsValueInterface());
}
void ResetMemory() { AsValueInterface()->~ValueInterface(); }
InlineValue& operator=(const InlineValue& other) {
if (&other == this) return *this;
ResetMemory();
other.AsValueInterface()->CloneInto(AsValueInterface());
return *this;
}
InlineValue& operator=(InlineValue&& other) {
if (&other == this) return *this;
if (AsValueInterface()->TypeId() == other.AsValueInterface()->TypeId()) {
other.AsValueInterface()->MoveAssign(AsValueInterface());
} else {
ResetMemory();
other.AsValueInterface()->MoveInto(AsValueInterface());
}
return *this;
}
ValueInterface* AsValueInterface() {
return reinterpret_cast<ValueInterface*>(value_data);
}
const ValueInterface* AsValueInterface() const {
return reinterpret_cast<const ValueInterface*>(value_data);
}
~InlineValue() { ResetMemory(); }
};
union {
HeapValue heap_value_;
InlineValue inline_value_;
};
bool is_inline_;
bool IsInlineValue() const { return is_inline_; }
void ResetMemory() {
if (IsInlineValue()) {
inline_value_.~InlineValue();
} else {
heap_value_.~HeapValue();
}
}
template <typename... Args>
void ResetAndSetInline(Args&&... args) noexcept {
ResetMemory();
new (&inline_value_) InlineValue(std::forward<Args>(args)...);
is_inline_ = true;
}
template <typename... Args>
void ResetAndSetHeap(Args&&... args) noexcept {
ResetMemory();
new (&heap_value_) HeapValue(std::forward<Args>(args)...);
is_inline_ = false;
}
ValueInterface* GetValue() {
if (IsInlineValue()) {
return inline_value_.AsValueInterface();
} else {
return heap_value_.get();
}
}
const ValueInterface* GetValue() const {
if (IsInlineValue()) {
return inline_value_.AsValueInterface();
} else {
return heap_value_.get();
}
}
template <typename VT, typename T>
void InsertValue(T&& value) {
if (IsInlineValue()) {
new (&inline_value_)
InlineValue(InlineValue::Tag<VT>{}, std::forward<T>(value));
} else {
new (&heap_value_) HeapValue(
absl::make_unique<Value<VT>>(InPlace(), std::forward<T>(value)));
}
}
};
static_assert(sizeof(Variant) <= 64,
"Expected internal representation to be 64 bytes.");
inline Variant::Variant(const Variant& other)
: is_inline_(other.IsInlineValue()) {
if (IsInlineValue()) {
new (&inline_value_) InlineValue(other.inline_value_);
} else {
new (&heap_value_)
HeapValue(other.heap_value_ ? other.heap_value_->Clone() : nullptr);
}
}
inline Variant::Variant(Variant&& other) noexcept
: is_inline_(other.IsInlineValue()) {
if (IsInlineValue()) {
new (&inline_value_) InlineValue(std::move(other.inline_value_));
} else {
new (&heap_value_) HeapValue(std::move(other.heap_value_));
}
}
template <typename T, typename VT,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_move_constructible<VT>::value,
void>::type*>
inline Variant::Variant(T&& value) : is_inline_(CanInlineType<VT>()) {
InsertValue<VT>(std::forward<T>(value));
}
template <typename T, typename VT,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_copy_constructible<VT>::value,
void>::type*>
inline Variant::Variant(const T& value) : is_inline_(CanInlineType<VT>()) {
InsertValue<VT>(value);
}
template <typename T, typename VT,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_move_constructible<VT>::value,
void>::type*>
inline Variant& Variant::operator=(T&& value) {
ResetMemory();
is_inline_ = CanInlineType<VT>();
InsertValue<VT>(std::forward<T>(value));
return *this;
}
template <typename T, typename VT,
typename std::enable_if<!std::is_same<Variant, VT>::value &&
std::is_copy_constructible<VT>::value,
void>::type*>
inline Variant& Variant::operator=(const T& value) {
ResetMemory();
is_inline_ = CanInlineType<VT>();
InsertValue<VT>(value);
return *this;
}
inline void Variant::clear() noexcept {
ResetAndSetHeap(nullptr);
}
inline void Variant::swap(Variant& other) noexcept {
if (is_empty()) {
if (other.IsInlineValue()) {
ResetAndSetInline(std::move(other.inline_value_));
} else {
ResetAndSetHeap(std::move(other.heap_value_));
}
other.clear();
} else if (other.is_empty()) {
if (IsInlineValue()) {
other.ResetAndSetInline(std::move(inline_value_));
} else {
other.ResetAndSetHeap(std::move(heap_value_));
}
clear();
} else {
if (other.IsInlineValue() && IsInlineValue()) {
std::swap(inline_value_, other.inline_value_);
} else if (!other.IsInlineValue() && !IsInlineValue()) {
std::swap(heap_value_, other.heap_value_);
} else if (other.IsInlineValue() && !IsInlineValue()) {
HeapValue v = std::move(heap_value_);
ResetAndSetInline(std::move(other.inline_value_));
other.ResetAndSetHeap(std::move(v));
} else {
HeapValue v = std::move(other.heap_value_);
other.ResetAndSetInline(std::move(inline_value_));
ResetAndSetHeap(std::move(v));
}
}
}
template <>
void* Variant::get();
template <>
const void* Variant::get() const;
}
#endif
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/framework/variant_op_registry.h"
namespace tensorflow {
Variant::~Variant() { ResetMemory(); }
bool Variant::Decode(VariantTensorData data) {
if (!is_empty()) {
return GetValue()->Decode(std::move(data));
}
return true;
}
template <>
void* Variant::get() {
if (is_empty()) {
return nullptr;
}
return GetValue()->RawPtr();
}
template <>
const void* Variant::get() const {
if (is_empty()) {
return nullptr;
}
return GetValue()->RawPtr();
}
template <>
string TypeNameVariant(const VariantTensorDataProto& value) {
return value.type_name();
}
template <>
void EncodeVariant(const VariantTensorDataProto& value,
VariantTensorData* data) {
data->FromConstProto(value);
}
template <>
bool DecodeVariant(VariantTensorData* data, VariantTensorDataProto* value) {
data->ToProto(value);
return true;
}
template <>
void EncodeVariant(const VariantTensorDataProto& value, string* buf) {
value.SerializeToString(buf);
}
template <>
bool DecodeVariant(string* buf, VariantTensorDataProto* value) {
return value->ParseFromString(*buf);
}
void EncodeVariantList(const Variant* variant_array, int64_t n,
std::unique_ptr<port::StringListEncoder> e) {
for (int i = 0; i < n; ++i) {
string s;
variant_array[i].Encode(&s);
e->Append(s);
}
e->Finalize();
}
bool DecodeVariantList(std::unique_ptr<port::StringListDecoder> d,
Variant* variant_array, int64_t n) {
std::vector<uint32> sizes(n);
if (!d->ReadSizes(&sizes)) return false;
for (int i = 0; i < n; ++i) {
if (variant_array[i].is_empty()) {
variant_array[i] = VariantTensorDataProto();
}
string str(d->Data(sizes[i]), sizes[i]);
if (!variant_array[i].Decode(std::move(str))) return false;
if (!DecodeUnaryVariant(&variant_array[i])) {
LOG(ERROR) << "Could not decode variant with type_name: \""
<< variant_array[i].TypeName()
<< "\". Perhaps you forgot to register a "
"decoder via REGISTER_UNARY_VARIANT_DECODE_FUNCTION?";
return false;
}
}
return true;
}
} | #include "tensorflow/core/framework/variant.h"
#include <cstddef>
#if defined(__x86_64__)
#include <xmmintrin.h>
#endif
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/coding.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
template <typename T, bool BIG>
struct Wrapper {
T value;
char big[BIG ? 256 : 1];
string TypeName() const { return "POD"; }
};
template <bool BIG>
using Int = Wrapper<int, BIG>;
template <bool BIG>
using Float = Wrapper<float, BIG>;
template <bool BIG>
class MaybeAlive {
public:
MaybeAlive() : alive_(false) {}
explicit MaybeAlive(bool alive) : alive_(alive) {
if (alive) ++live_counter_;
}
~MaybeAlive() {
if (alive_) --live_counter_;
}
MaybeAlive(const MaybeAlive& rhs) : alive_(rhs.alive_) {
if (alive_) ++live_counter_;
}
MaybeAlive& operator=(const MaybeAlive& rhs) {
if (this == &rhs) return *this;
if (alive_) --live_counter_;
alive_ = rhs.alive_;
if (alive_) ++live_counter_;
return *this;
}
MaybeAlive(MaybeAlive&& rhs) : alive_(false) {
alive_ = std::move(rhs.alive_);
if (alive_) ++live_counter_;
}
MaybeAlive& operator=(MaybeAlive&& rhs) {
if (this == &rhs) return *this;
if (alive_) --live_counter_;
alive_ = std::move(rhs.alive_);
if (alive_) ++live_counter_;
return *this;
}
static int LiveCounter() { return live_counter_; }
string TypeName() const { return "MaybeAlive"; }
void Encode(VariantTensorData* data) const {}
bool Decode(VariantTensorData data) { return false; }
private:
bool alive_;
char big_[BIG ? 256 : 1];
static int live_counter_;
};
template <>
int MaybeAlive<false>::live_counter_ = 0;
template <>
int MaybeAlive<true>::live_counter_ = 0;
template <bool BIG>
class DeleteCounter {
public:
DeleteCounter() : big_{}, counter_(nullptr) {}
explicit DeleteCounter(int* counter) : big_{}, counter_(counter) {}
~DeleteCounter() {
if (counter_) ++*counter_;
}
DeleteCounter& operator=(const DeleteCounter& rhs) = default;
DeleteCounter& operator=(DeleteCounter&& rhs) {
if (this == &rhs) return *this;
counter_ = rhs.counter_;
rhs.counter_ = nullptr;
return *this;
}
DeleteCounter(DeleteCounter&& rhs) {
counter_ = rhs.counter_;
rhs.counter_ = nullptr;
}
DeleteCounter(const DeleteCounter& rhs) = default;
char big_[BIG ? 256 : 1];
int* counter_;
string TypeName() const { return "DeleteCounter"; }
void Encode(VariantTensorData* data) const {}
bool Decode(VariantTensorData data) { return false; }
};
}
TEST(VariantTest, MoveAndCopyBetweenBigAndSmall) {
Variant x;
int deleted_big = 0;
int deleted_small = 0;
x = DeleteCounter<true>(&deleted_big);
EXPECT_EQ(deleted_big, 0);
x = DeleteCounter<false>(&deleted_small);
EXPECT_EQ(deleted_big, 1);
EXPECT_EQ(deleted_small, 0);
x = DeleteCounter<true>(&deleted_big);
EXPECT_EQ(deleted_big, 1);
EXPECT_EQ(deleted_small, 1);
x.clear();
EXPECT_EQ(deleted_big, 2);
EXPECT_EQ(deleted_small, 1);
DeleteCounter<true> big(&deleted_big);
DeleteCounter<false> small(&deleted_small);
EXPECT_EQ(deleted_big, 2);
EXPECT_EQ(deleted_small, 1);
x = big;
EXPECT_EQ(deleted_big, 2);
EXPECT_EQ(deleted_small, 1);
x = small;
EXPECT_EQ(deleted_big, 3);
EXPECT_EQ(deleted_small, 1);
x = std::move(big);
EXPECT_EQ(deleted_big, 3);
EXPECT_EQ(deleted_small, 2);
x = std::move(small);
EXPECT_EQ(deleted_big, 4);
EXPECT_EQ(deleted_small, 2);
x.clear();
EXPECT_EQ(deleted_big, 4);
EXPECT_EQ(deleted_small, 3);
}
TEST(VariantTest, MoveAndCopyBetweenBigAndSmallVariants) {
int deleted_big = 0;
int deleted_small = 0;
{
Variant x = DeleteCounter<true>(&deleted_big);
Variant y = DeleteCounter<false>(&deleted_small);
EXPECT_EQ(deleted_big, 0);
EXPECT_EQ(deleted_small, 0);
x = y;
EXPECT_EQ(deleted_big, 1);
EXPECT_EQ(deleted_small, 0);
x = x;
EXPECT_EQ(deleted_big, 1);
EXPECT_EQ(deleted_small, 0);
EXPECT_NE(x.get<DeleteCounter<false>>(), nullptr);
EXPECT_NE(y.get<DeleteCounter<false>>(), nullptr);
x = std::move(y);
EXPECT_EQ(deleted_small, 1);
EXPECT_NE(x.get<DeleteCounter<false>>(), nullptr);
}
EXPECT_EQ(deleted_big, 1);
EXPECT_EQ(deleted_small, 2);
deleted_big = 0;
deleted_small = 0;
{
Variant x = DeleteCounter<false>(&deleted_small);
Variant y = DeleteCounter<true>(&deleted_big);
EXPECT_EQ(deleted_big, 0);
EXPECT_EQ(deleted_small, 0);
x = y;
EXPECT_EQ(deleted_big, 0);
EXPECT_EQ(deleted_small, 1);
x = x;
EXPECT_EQ(deleted_big, 0);
EXPECT_EQ(deleted_small, 1);
EXPECT_NE(x.get<DeleteCounter<true>>(), nullptr);
EXPECT_NE(y.get<DeleteCounter<true>>(), nullptr);
x = std::move(y);
EXPECT_EQ(deleted_big, 1);
EXPECT_NE(x.get<DeleteCounter<true>>(), nullptr);
}
EXPECT_EQ(deleted_big, 2);
EXPECT_EQ(deleted_small, 1);
}
namespace {
template <bool BIG>
class MoveAndCopyCounter {
public:
MoveAndCopyCounter()
: big_{}, move_counter_(nullptr), copy_counter_(nullptr) {}
explicit MoveAndCopyCounter(int* move_counter, int* copy_counter)
: big_{}, move_counter_(move_counter), copy_counter_(copy_counter) {}
MoveAndCopyCounter& operator=(const MoveAndCopyCounter& rhs) {
copy_counter_ = rhs.copy_counter_;
if (copy_counter_) ++*copy_counter_;
return *this;
}
MoveAndCopyCounter& operator=(MoveAndCopyCounter&& rhs) {
move_counter_ = rhs.move_counter_;
if (move_counter_) ++*move_counter_;
return *this;
}
MoveAndCopyCounter(MoveAndCopyCounter&& rhs) {
move_counter_ = rhs.move_counter_;
if (move_counter_) ++*move_counter_;
}
MoveAndCopyCounter(const MoveAndCopyCounter& rhs) {
copy_counter_ = rhs.copy_counter_;
if (copy_counter_) ++*copy_counter_;
}
char big_[BIG ? 256 : 1];
int* move_counter_;
int* copy_counter_;
string TypeName() const { return "MoveAndCopyCounter"; }
void Encode(VariantTensorData* data) const {}
bool Decode(VariantTensorData data) { return false; }
};
}
TEST(VariantTest, EmplaceBigAndSmallVariants) {
{
int moved_big = 0;
int moved_small = 0;
int copied_big = 0;
int copied_small = 0;
Variant x = MoveAndCopyCounter<true>(&moved_big, &copied_big);
EXPECT_EQ(moved_big, 1);
EXPECT_EQ(copied_big, 0);
Variant y = MoveAndCopyCounter<false>(&moved_small, &copied_small);
EXPECT_EQ(moved_small, 1);
EXPECT_EQ(copied_small, 0);
}
{
int moved_big = 0;
int moved_small = 0;
int copied_big = 0;
int copied_small = 0;
Variant x(MoveAndCopyCounter<true>(&moved_big, &copied_big));
EXPECT_EQ(moved_big, 1);
EXPECT_EQ(copied_big, 0);
Variant y(MoveAndCopyCounter<false>(&moved_small, &copied_small));
EXPECT_EQ(moved_small, 1);
EXPECT_EQ(copied_small, 0);
}
{
int moved_big = 0;
int moved_small = 0;
int copied_big = 0;
int copied_small = 0;
Variant x;
x.emplace<MoveAndCopyCounter<true>>(&moved_big, &copied_big);
EXPECT_EQ(moved_big, 0);
EXPECT_EQ(copied_big, 0);
Variant y;
y.emplace<MoveAndCopyCounter<false>>(&moved_small, &copied_small);
EXPECT_EQ(moved_small, 0);
EXPECT_EQ(copied_small, 0);
}
}
template <bool BIG>
void TestDestructOnVariantMove() {
CHECK_EQ(MaybeAlive<BIG>::LiveCounter(), 0);
{
Variant a = MaybeAlive<BIG>(true);
Variant b = std::move(a);
}
EXPECT_EQ(MaybeAlive<BIG>::LiveCounter(), 0);
}
TEST(VariantTest, RHSDestructOnVariantMoveBig) {
TestDestructOnVariantMove<true>();
}
TEST(VariantTest, RHSDestructOnVariantMoveSmall) {
TestDestructOnVariantMove<false>();
}
TEST(VariantTest, Int) {
Variant x;
EXPECT_EQ(x.get<void>(), nullptr);
x = 3;
EXPECT_NE(x.get<void>(), nullptr);
EXPECT_EQ(*x.get<int>(), 3);
EXPECT_EQ(x.TypeName(), "int");
}
#if defined(__x86_64__)
struct MayCreateAlignmentDifficulties {
int a;
__m128 b;
};
bool M128AllEqual(const __m128& a, const __m128& b) {
return _mm_movemask_ps(_mm_cmpeq_ps(a, b)) == 0xf;
}
TEST(VariantTest, NotAlignable) {
Variant x;
EXPECT_EQ(x.get<void>(), nullptr);
__m128 v = _mm_set_ps(1.0, 2.0, 3.0, 4.0);
x = MayCreateAlignmentDifficulties{-1, v};
EXPECT_NE(x.get<void>(), nullptr);
auto* x_val = x.get<MayCreateAlignmentDifficulties>();
Variant y = x;
EXPECT_EQ(x_val->a, -1);
EXPECT_TRUE(M128AllEqual(x_val->b, v));
auto* y_val = y.get<MayCreateAlignmentDifficulties>();
EXPECT_EQ(y_val->a, -1);
EXPECT_TRUE(M128AllEqual(y_val->b, v));
Variant z = std::move(y);
auto* z_val = z.get<MayCreateAlignmentDifficulties>();
EXPECT_EQ(z_val->a, -1);
EXPECT_TRUE(M128AllEqual(z_val->b, v));
}
#endif
template <bool BIG>
void TestBasic() {
Variant x;
EXPECT_EQ(x.get<void>(), nullptr);
x = Int<BIG>{42};
EXPECT_NE(x.get<void>(), nullptr);
EXPECT_NE(x.get<Int<BIG>>(), nullptr);
EXPECT_EQ(x.get<Int<BIG>>()->value, 42);
EXPECT_EQ(x.TypeName(), "POD");
}
TEST(VariantTest, Basic) { TestBasic<false>(); }
TEST(VariantTest, BasicBig) { TestBasic<true>(); }
template <bool BIG>
void TestConstGet() {
Variant x;
EXPECT_EQ(x.get<void>(), nullptr);
x = Int<BIG>{42};
const Variant y = x;
EXPECT_NE(y.get<void>(), nullptr);
EXPECT_NE(y.get<Int<BIG>>(), nullptr);
EXPECT_EQ(y.get<Int<BIG>>()->value, 42);
}
TEST(VariantTest, ConstGet) { TestConstGet<false>(); }
TEST(VariantTest, ConstGetBig) { TestConstGet<true>(); }
template <bool BIG>
void TestClear() {
Variant x;
EXPECT_EQ(x.get<void>(), nullptr);
x = Int<BIG>{42};
EXPECT_NE(x.get<void>(), nullptr);
EXPECT_NE(x.get<Int<BIG>>(), nullptr);
EXPECT_EQ(x.get<Int<BIG>>()->value, 42);
x.clear();
EXPECT_EQ(x.get<void>(), nullptr);
}
TEST(VariantTest, Clear) { TestClear<false>(); }
TEST(VariantTest, ClearBig) { TestClear<true>(); }
template <bool BIG>
void TestClearDeletes() {
Variant x;
EXPECT_EQ(x.get<void>(), nullptr);
int deleted_count = 0;
using DC = DeleteCounter<BIG>;
DC dc(&deleted_count);
EXPECT_EQ(deleted_count, 0);
x = dc;
EXPECT_EQ(deleted_count, 0);
EXPECT_NE(x.get<void>(), nullptr);
EXPECT_NE(x.get<DC>(), nullptr);
x.clear();
EXPECT_EQ(x.get<void>(), nullptr);
EXPECT_EQ(deleted_count, 1);
x = dc;
EXPECT_EQ(deleted_count, 1);
Variant y = x;
EXPECT_EQ(deleted_count, 1);
x.clear();
EXPECT_EQ(deleted_count, 2);
y.clear();
EXPECT_EQ(deleted_count, 3);
}
TEST(VariantTest, ClearDeletesOnHeap) { TestClearDeletes<true>(); }
TEST(VariantTest, ClearDeletesOnStack) { TestClearDeletes<false>(); }
TEST(VariantTest, Tensor) {
Variant x;
Tensor t(DT_FLOAT, {});
t.flat<float>()(0) = 42.0f;
x = t;
EXPECT_NE(x.get<Tensor>(), nullptr);
EXPECT_EQ(x.get<Tensor>()->flat<float>()(0), 42.0f);
x.get<Tensor>()->flat<float>()(0) += 1.0f;
EXPECT_EQ(x.get<Tensor>()->flat<float>()(0), 43.0f);
EXPECT_EQ(x.TypeName(), "tensorflow::Tensor");
Tensor& foo_t = x.emplace<Tensor>("foo");
EXPECT_NE(x.get<Tensor>(), nullptr);
EXPECT_EQ(x.get<Tensor>()->scalar<tstring>()(), "foo");
EXPECT_EQ(&foo_t, x.get<Tensor>());
EXPECT_EQ(x.TypeName(), "tensorflow::Tensor");
Tensor& bar_t = x.emplace<Tensor>(DT_INT64, TensorShape({1}));
EXPECT_EQ(&bar_t, x.get<Tensor>());
bar_t.vec<int64_t>()(0) = 17;
EXPECT_EQ(x.get<Tensor>()->vec<int64_t>()(0), 17);
bar_t.vec<int64_t>()(0) += 1;
EXPECT_EQ(x.get<Tensor>()->vec<int64_t>()(0), 18);
}
TEST(VariantTest, NontrivialTensorVariantCopy) {
Tensor variants(DT_VARIANT, {});
Tensor t(true);
test::FillValues<Variant>(&variants, absl::Span<const Variant>({t}));
const Tensor* t_c = variants.flat<Variant>()(0).get<Tensor>();
EXPECT_EQ(t_c->dtype(), t.dtype());
EXPECT_EQ(t_c->shape(), t.shape());
EXPECT_EQ(t_c->scalar<bool>()(), t.scalar<bool>()());
}
TEST(VariantTest, TensorProto) {
Variant x;
TensorProto t;
t.set_dtype(DT_FLOAT);
t.mutable_tensor_shape()->set_unknown_rank(true);
x = t;
EXPECT_EQ(x.TypeName(), "tensorflow.TensorProto");
EXPECT_NE(x.get<TensorProto>(), nullptr);
EXPECT_EQ(x.get<TensorProto>()->dtype(), DT_FLOAT);
EXPECT_EQ(x.get<TensorProto>()->tensor_shape().unknown_rank(), true);
}
template <bool BIG>
void TestCopyValue() {
Variant x, y;
x = Int<BIG>{10};
y = x;
EXPECT_EQ(x.get<Int<BIG>>()->value, 10);
EXPECT_EQ(x.get<Int<BIG>>()->value, y.get<Int<BIG>>()->value);
}
TEST(VariantTest, CopyValue) { TestCopyValue<false>(); }
TEST(VariantTest, CopyValueBig) { TestCopyValue<true>(); }
template <bool BIG>
void TestMoveValue() {
Variant x;
x = []() -> Variant {
Variant y;
y = Int<BIG>{10};
return y;
}();
EXPECT_EQ(x.get<Int<BIG>>()->value, 10);
}
TEST(VariantTest, MoveValue) { TestMoveValue<false>(); }
TEST(VariantTest, MoveValueBig) { TestMoveValue<true>(); }
TEST(VariantTest, TypeMismatch) {
Variant x;
x = Int<false>{10};
EXPECT_EQ(x.get<float>(), nullptr);
EXPECT_EQ(x.get<int>(), nullptr);
EXPECT_NE(x.get<Int<false>>(), nullptr);
}
struct TensorList {
void Encode(VariantTensorData* data) const { data->tensors_ = vec; }
bool Decode(VariantTensorData data) {
vec = std::move(data.tensors_);
return true;
}
string TypeName() const { return "TensorList"; }
std::vector<Tensor> vec;
};
TEST(VariantTest, TensorListTest) {
Variant x;
TensorList vec;
for (int i = 0; i < 4; ++i) {
Tensor elem(DT_INT32, {1});
elem.flat<int>()(0) = i;
vec.vec.push_back(elem);
}
for (int i = 0; i < 4; ++i) {
Tensor elem(DT_FLOAT, {1});
elem.flat<float>()(0) = 2 * i;
vec.vec.push_back(elem);
}
x = vec;
EXPECT_EQ(x.TypeName(), "TensorList");
EXPECT_EQ(x.DebugString(), "Variant<type: TensorList value: ?>");
const TensorList& stored_vec = *x.get<TensorList>();
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(stored_vec.vec[i].flat<int>()(0), i);
}
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(stored_vec.vec[i + 4].flat<float>()(0), 2 * i);
}
VariantTensorData serialized;
x.Encode(&serialized);
Variant y = TensorList();
y.Decode(serialized);
const TensorList& decoded_vec = *y.get<TensorList>();
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(decoded_vec.vec[i].flat<int>()(0), i);
}
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(decoded_vec.vec[i + 4].flat<float>()(0), 2 * i);
}
VariantTensorDataProto data;
serialized.ToProto(&data);
const Variant y_unknown = data;
EXPECT_EQ(y_unknown.TypeName(), "TensorList");
EXPECT_EQ(y_unknown.TypeId(), TypeIndex::Make<VariantTensorDataProto>());
EXPECT_EQ(y_unknown.DebugString(),
strings::StrCat(
"Variant<type: TensorList value: ", data.DebugString(), ">"));
}
template <bool BIG>
void TestVariantArray() {
Variant x[2];
x[0] = Int<BIG>{2};
x[1] = Float<BIG>{2.0f};
EXPECT_EQ(x[0].get<Int<BIG>>()->value, 2);
EXPECT_EQ(x[1].get<Float<BIG>>()->value, 2.0f);
}
TEST(VariantTest, VariantArray) { TestVariantArray<false>(); }
TEST(VariantTest, VariantArrayBig) { TestVariantArray<true>(); }
template <bool BIG>
void PodUpdateTest() {
struct Pod {
int x;
float y;
char big[BIG ? 256 : 1];
string TypeName() const { return "POD"; }
};
Variant x = Pod{10, 20.f};
EXPECT_NE(x.get<Pod>(), nullptr);
EXPECT_EQ(x.TypeName(), "POD");
EXPECT_EQ(x.DebugString(), "Variant<type: POD value: ?>");
x.get<Pod>()->x += x.get<Pod>()->y;
EXPECT_EQ(x.get<Pod>()->x, 30);
}
TEST(VariantTest, PodUpdate) { PodUpdateTest<false>(); }
TEST(VariantTest, PodUpdateBig) { PodUpdateTest<true>(); }
template <bool BIG>
void TestEncodeDecodePod() {
struct Pod {
int x;
float y;
char big[BIG ? 256 : 1];
string TypeName() const { return "POD"; }
};
Variant x;
Pod p{10, 20.0f};
x = p;
VariantTensorData serialized;
x.Encode(&serialized);
Variant y = Pod{};
y.Decode(serialized);
EXPECT_EQ(p.x, y.get<Pod>()->x);
EXPECT_EQ(p.y, y.get<Pod>()->y);
}
TEST(VariantTest, EncodeDecodePod) { TestEncodeDecodePod<false>(); }
TEST(VariantTest, EncodeDecodePodBig) { TestEncodeDecodePod<true>(); }
TEST(VariantTest, EncodeDecodeTensor) {
Variant x;
Tensor t(DT_INT32, {});
t.flat<int>()(0) = 42;
x = t;
VariantTensorData serialized;
x.Encode(&serialized);
Variant y = Tensor();
y.Decode(serialized);
EXPECT_EQ(y.DebugString(),
"Variant<type: tensorflow::Tensor value: Tensor<type: int32 shape: "
"[] values: 42>>");
EXPECT_EQ(x.get<Tensor>()->flat<int>()(0), y.get<Tensor>()->flat<int>()(0));
}
TEST(BoolVariantTest, DecodeNonBool) {
Tensor parsed(DT_VARIANT);
TensorProto tensor_proto;
tensor_proto.set_dtype(DT_VARIANT);
VariantTensorDataProto* variant = tensor_proto.add_variant_val();
variant->set_type_name("bool");
variant->set_metadata("-");
EXPECT_TRUE(parsed.FromProto(tensor_proto));
EXPECT_EQ(parsed.NumElements(), 1);
EXPECT_TRUE(parsed.flat<Variant>()(0).get<bool>());
}
} |
962 | cpp | tensorflow/tensorflow | attribute_map | third_party/xla/xla/ffi/attribute_map.cc | third_party/xla/xla/python/ifrt/attribute_map_test.cc | #ifndef XLA_FFI_ATTRIBUTE_MAP_H_
#define XLA_FFI_ATTRIBUTE_MAP_H_
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "xla/ffi/call_frame.h"
namespace xla::ffi {
absl::StatusOr<CallFrameBuilder::FlatAttributesMap> BuildAttributesMap(
mlir::DictionaryAttr dict);
}
#endif
#include "xla/ffi/attribute_map.h"
#include <cstdint>
#include <string_view>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/TypeSwitch.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "xla/ffi/call_frame.h"
#include "tsl/platform/errors.h"
using FlatAttribute = xla::ffi::CallFrameBuilder::FlatAttribute;
using FlatAttributesMap = xla::ffi::CallFrameBuilder::FlatAttributesMap;
namespace xla::ffi {
absl::StatusOr<FlatAttributesMap> BuildAttributesMap(
mlir::DictionaryAttr dict) {
FlatAttributesMap attributes;
for (auto& kv : dict) {
std::string_view name = kv.getName().strref();
auto boolean = [&](mlir::BoolAttr boolean) {
attributes[name] = static_cast<bool>(boolean.getValue());
return absl::OkStatus();
};
auto integer = [&](mlir::IntegerAttr integer) {
if (integer.getType().isUnsignedInteger()) {
switch (integer.getType().getIntOrFloatBitWidth()) {
case 8:
attributes[name] = static_cast<uint8_t>(integer.getUInt());
return absl::OkStatus();
case 16:
attributes[name] = static_cast<uint16_t>(integer.getUInt());
return absl::OkStatus();
case 32:
attributes[name] = static_cast<uint32_t>(integer.getUInt());
return absl::OkStatus();
case 64:
attributes[name] = static_cast<uint64_t>(integer.getUInt());
return absl::OkStatus();
default:
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported integer attribute bit width for attribute: ",
name));
}
} else {
switch (integer.getType().getIntOrFloatBitWidth()) {
case 8:
attributes[name] = static_cast<int8_t>(integer.getInt());
return absl::OkStatus();
case 16:
attributes[name] = static_cast<int16_t>(integer.getInt());
return absl::OkStatus();
case 32:
attributes[name] = static_cast<int32_t>(integer.getInt());
return absl::OkStatus();
case 64:
attributes[name] = static_cast<int64_t>(integer.getInt());
return absl::OkStatus();
default:
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported integer attribute bit width for attribute: ",
name));
}
}
};
auto fp = [&](mlir::FloatAttr fp) {
switch (fp.getType().getIntOrFloatBitWidth()) {
case 32:
attributes[name] = static_cast<float>(fp.getValue().convertToFloat());
return absl::OkStatus();
case 64:
attributes[name] =
static_cast<double>(fp.getValue().convertToDouble());
return absl::OkStatus();
default:
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported float attribute bit width for attribute: ", name));
}
};
auto arr = [&](mlir::DenseArrayAttr arr) {
if (auto dense = mlir::dyn_cast<mlir::DenseI8ArrayAttr>(arr)) {
attributes[name] = dense.asArrayRef().vec();
return absl::OkStatus();
} else if (auto dense = mlir::dyn_cast<mlir::DenseI16ArrayAttr>(arr)) {
attributes[name] = dense.asArrayRef().vec();
return absl::OkStatus();
} else if (auto dense = mlir::dyn_cast<mlir::DenseI32ArrayAttr>(arr)) {
attributes[name] = dense.asArrayRef().vec();
return absl::OkStatus();
} else if (auto dense = mlir::dyn_cast<mlir::DenseI64ArrayAttr>(arr)) {
attributes[name] = dense.asArrayRef().vec();
return absl::OkStatus();
} else if (auto dense = mlir::dyn_cast<mlir::DenseF32ArrayAttr>(arr)) {
attributes[name] = dense.asArrayRef().vec();
return absl::OkStatus();
} else if (auto dense = mlir::dyn_cast<mlir::DenseF64ArrayAttr>(arr)) {
attributes[name] = dense.asArrayRef().vec();
return absl::OkStatus();
} else {
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported array element type for attribute: ", name));
}
};
auto str = [&](mlir::StringAttr str) {
attributes[name] = str.getValue().str();
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
llvm::TypeSwitch<mlir::Attribute, absl::Status>(kv.getValue())
.Case<mlir::BoolAttr>(boolean)
.Case<mlir::IntegerAttr>(integer)
.Case<mlir::FloatAttr>(fp)
.Case<mlir::DenseArrayAttr>(arr)
.Case<mlir::StringAttr>(str)
.Default([&](mlir::Attribute) {
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported attribute type for attribute: ", name));
}));
}
return attributes;
}
} | #include "xla/python/ifrt/attribute_map.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
TEST(AttributeMapTest, MapElements) {
AttributeMap map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list", AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
});
EXPECT_EQ(map.map(), AttributeMap::Map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list", AttributeMap::Int64ListValue(
{int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
}))
<< map.DebugString();
}
TEST(AttributeMapTest, ToFromProto) {
AttributeMap map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list", AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
});
TF_ASSERT_OK_AND_ASSIGN(auto map_copy,
AttributeMap::FromProto(map.ToProto()));
EXPECT_EQ(map_copy.map(), map.map()) << map_copy.DebugString();
}
}
}
} |
963 | cpp | tensorflow/tensorflow | constants | third_party/xla/xla/client/lib/constants.cc | third_party/xla/xla/client/lib/constants_test.cc | #ifndef XLA_CLIENT_LIB_CONSTANTS_H_
#define XLA_CLIENT_LIB_CONSTANTS_H_
#include <type_traits>
#include "xla/client/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
template <typename T>
XlaOp ConstantR0WithType(XlaBuilder* builder, PrimitiveType type, T value) {
if (std::is_floating_point<T>::value &&
!(primitive_util::IsFloatingPointType(type) ||
primitive_util::IsComplexType(type))) {
return builder->ReportError(InvalidArgument(
"Invalid cast from floating point type to %s in ConstantR0WithType.",
PrimitiveType_Name(type)));
}
if (std::is_same<T, complex64>::value &&
!primitive_util::IsComplexType(type)) {
return builder->ReportError(InvalidArgument(
"Invalid cast from complex type to %s in ConstantR0WithType.",
PrimitiveType_Name(type)));
}
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
return ConstantR0<NativeT>(builder, static_cast<NativeT>(value));
}
return builder->ReportError(
InvalidArgument("Invalid type for ConstantR0WithType (%s).",
PrimitiveType_Name(type)));
},
type);
}
template <typename T>
XlaOp ScalarLike(XlaOp prototype, T value) {
XlaBuilder* builder = prototype.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(prototype));
return ConstantR0WithType(builder, shape.element_type(), value);
});
}
template <typename T>
XlaOp FullLike(XlaOp prototype, T value) {
XlaBuilder* builder = prototype.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(prototype));
if (ShapeUtil::IsScalar(shape) || shape.IsArray()) {
return Broadcast(ScalarLike(prototype, value), shape.dimensions());
} else {
return InvalidArgument(
"Prototype shape for BroadcastConstantLike must be a scalar or "
"array, but was %s",
shape.ToString());
}
});
}
XlaOp Zero(XlaBuilder* builder, PrimitiveType type);
XlaOp Zeros(XlaBuilder* builder, const Shape& shape);
XlaOp ZerosLike(XlaOp prototype);
XlaOp One(XlaBuilder* builder, PrimitiveType type);
XlaOp Epsilon(XlaBuilder* builder, PrimitiveType type);
XlaOp MinValue(XlaBuilder* builder, PrimitiveType type);
XlaOp MinFiniteValue(XlaBuilder* builder, PrimitiveType type);
XlaOp MinPositiveNormalValue(XlaBuilder* builder, PrimitiveType type);
XlaOp MaxValue(XlaBuilder* builder, PrimitiveType type);
XlaOp MaxFiniteValue(XlaBuilder* builder, PrimitiveType type);
XlaOp NanValue(XlaBuilder* builder, PrimitiveType type);
}
#endif
#include "xla/client/lib/constants.h"
#include <limits>
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
XlaOp Zero(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::Zero(type));
}
XlaOp Zeros(XlaBuilder* builder, const Shape& shape) {
return Broadcast(Zero(builder, shape.element_type()), shape.dimensions());
}
XlaOp ZerosLike(XlaOp prototype) {
XlaBuilder* builder = prototype.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(prototype));
return Zeros(builder, shape);
});
}
XlaOp One(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::One(type));
}
XlaOp Epsilon(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::epsilon());
}
return builder->ReportError(InvalidArgument(
"Invalid type for Epsilon (%s).", PrimitiveType_Name(type)));
},
type);
}
XlaOp MinValue(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::MinValue(type));
}
XlaOp MinFiniteValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::lowest());
}
return MinValue(builder, type);
},
type);
}
XlaOp MinPositiveNormalValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::min());
}
return builder->ReportError(
InvalidArgument("Invalid type for MinPositiveNormalValue (%s).",
PrimitiveType_Name(type)));
},
type);
}
XlaOp MaxValue(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::MaxValue(type));
}
XlaOp MaxFiniteValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::max());
}
return MaxValue(builder, type);
},
type);
}
XlaOp NanValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::quiet_NaN());
}
return builder->ReportError(InvalidArgument(
"Invalid type for NanValue (%s).", PrimitiveType_Name(type)));
},
type);
}
} | #include "xla/client/lib/constants.h"
#include <limits>
#include "xla/client/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ConstantsTest = ClientLibraryTestBase;
using ::testing::HasSubstr;
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4);
ComputeAndCompareR0<int32_t>(&builder, 4, {});
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32DoesNotAcceptFloats) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4.5);
auto statusor = builder.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("Invalid cast"));
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeF32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::F32, -7);
ComputeAndCompareR0<float>(&builder, -7, {});
ConstantR0WithType(&builder, xla::F32, 0.5);
ComputeAndCompareR0<float>(&builder, 0.5, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeS32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<int32_t>(&builder, 42), -3);
ComputeAndCompareR0<int32_t>(&builder, -3, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeF32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<float>(&builder, 42.75), -3.2);
ComputeAndCompareR0<float>(&builder, -3.2, {});
}
XLA_TEST_F(ConstantsTest, ZeroS32) {
XlaBuilder builder(TestName());
Zero(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 0, {});
}
XLA_TEST_F(ConstantsTest, ZeroF32) {
XlaBuilder builder(TestName());
Zero(&builder, F32);
ComputeAndCompareR0<float>(&builder, 0.0, {});
}
XLA_TEST_F(ConstantsTest, ZerosS32) {
XlaBuilder builder(TestName());
Zeros(&builder, ShapeUtil::MakeShape(S32, {2, 2}));
ComputeAndCompareR2<int32_t>(&builder, {{0, 0}, {0, 0}}, {});
}
XLA_TEST_F(ConstantsTest, ZerosLikeF32) {
XlaBuilder builder(TestName());
ZerosLike(ConstantR1<float>(&builder, {1., 2., 3.}));
ComputeAndCompareR1<float>(&builder, {0., 0., 0.}, {});
}
XLA_TEST_F(ConstantsTest, OneS32) {
XlaBuilder builder(TestName());
One(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 1, {});
}
XLA_TEST_F(ConstantsTest, OneF32) {
XlaBuilder builder(TestName());
One(&builder, F32);
ComputeAndCompareR0<float>(&builder, 1., {});
}
XLA_TEST_F(ConstantsTest, EpsilonF32) {
XlaBuilder builder(TestName());
Epsilon(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::epsilon(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueS32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueS32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueF32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueF32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MinValueS32) {
XlaBuilder builder(TestName());
MinValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueS32) {
XlaBuilder builder(TestName());
MaxValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinValueF32) {
XlaBuilder builder(TestName());
MinValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueF32) {
XlaBuilder builder(TestName());
MaxValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, NanValueF32) {
XlaBuilder builder(TestName());
NanValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::quiet_NaN(),
{});
}
}
} |
964 | cpp | tensorflow/tensorflow | verifier_internal | tensorflow/lite/core/tools/verifier_internal.cc | tensorflow/lite/core/tools/verifier_internal_test.cc | #ifndef TENSORFLOW_LITE_CORE_TOOLS_VERIFIER_INTERNAL_H_
#define TENSORFLOW_LITE_CORE_TOOLS_VERIFIER_INTERNAL_H_
#include <stddef.h>
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace internal {
const Model* VerifyFlatBufferAndGetModel(const void* buf, size_t len);
}
}
#endif
#include "tensorflow/lite/core/tools/verifier_internal.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace internal {
const Model* VerifyFlatBufferAndGetModel(const void* buf, size_t len) {
::flatbuffers::Verifier verifier(static_cast<const uint8_t*>(buf), len);
if (VerifyModelBuffer(verifier)) {
return ::tflite::GetModel(buf);
} else {
return nullptr;
}
}
}
} | #include "tensorflow/lite/core/tools/verifier_internal.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
class TfLiteFlatbufferModelBuilder {
public:
TfLiteFlatbufferModelBuilder() {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
}
TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
const std::vector<std::string>& custom_ops) {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
for (const auto& iter : builtin_ops) {
resolver_.AddBuiltin(iter, &fake_op_);
}
for (const auto& iter : custom_ops) {
resolver_.AddCustom(iter.data(), &fake_op_);
}
}
void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
const std::vector<uint8_t>& buffer, const char* name,
const bool is_variable = false) {
int buffer_index = 0;
if (!buffer.empty()) {
buffer_index = buffers_.size();
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
}
if (shape.empty()) {
tensors_.push_back(CreateTensorDirect(builder_, nullptr, type,
buffer_index, name,
0, is_variable));
return;
}
tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
name, 0,
is_variable));
}
void AddOperator(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
tflite::BuiltinOperator builtin_op, const char* custom_op) {
operator_codes_.push_back(
CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
operators_.push_back(CreateOperator(
builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), BuiltinOptions_NONE,
0,
0, tflite::CustomOptionsFormat_FLEXBUFFERS));
}
enum BuilderMode {
kBuilderModeEmptyVectorIsEmpty,
kBuilderModeEmptyVectorIsNull,
kBuilderModeDefault = kBuilderModeEmptyVectorIsEmpty,
};
void FinishModel(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
BuilderMode mode = kBuilderModeDefault) {
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>({CreateSubGraph(
builder_, CreateVector(tensors_, mode), CreateVector(inputs, mode),
CreateVector(outputs, mode), CreateVector(operators_, mode),
builder_.CreateString("test_subgraph"))});
auto result = CreateModel(
builder_, TFLITE_SCHEMA_VERSION, CreateVector(operator_codes_, mode),
CreateVector(subgraph, mode), builder_.CreateString("test_model"),
CreateVector(buffers_, mode));
tflite::FinishModelBuffer(builder_, result);
}
bool Verify(const void* buf, size_t length) {
return tflite::internal::VerifyFlatBufferAndGetModel(buf, length);
}
bool Verify() {
return Verify(builder_.GetBufferPointer(), builder_.GetSize());
}
private:
template <typename T>
flatbuffers::Offset<flatbuffers::Vector<T>> CreateVector(
const std::vector<T>& v, BuilderMode mode) {
if (mode == kBuilderModeEmptyVectorIsNull && v.empty()) {
return 0;
}
return builder_.CreateVector(v);
}
flatbuffers::FlatBufferBuilder builder_;
MutableOpResolver resolver_;
TfLiteRegistration fake_op_{};
std::vector<flatbuffers::Offset<Operator>> operators_;
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes_;
std::vector<flatbuffers::Offset<Tensor>> tensors_;
std::vector<flatbuffers::Offset<Buffer>> buffers_;
};
TEST(VerifyModel, TestEmptyModel) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0, 0,
0, 0);
::tflite::FinishModelBuffer(builder, model);
ASSERT_TRUE(::tflite::internal::VerifyFlatBufferAndGetModel(
builder.GetBufferPointer(), builder.GetSize()));
}
TEST(VerifyModel, TestSimpleModel) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
}
TEST(VerifyModel, TestCorruptedData) {
std::string model = "123";
ASSERT_FALSE(::tflite::internal::VerifyFlatBufferAndGetModel(model.data(),
model.size()));
}
TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
builder.GetSize());
for (size_t i = 0; i < model_content.size(); i++) {
model_content[i] = (model_content[i] + 137) % 255;
EXPECT_FALSE(tflite::internal::VerifyFlatBufferAndGetModel(
model_content.data(), model_content.size()))
<< "Fail at position: " << i;
}
}
} |
965 | cpp | tensorflow/tensorflow | verifier | tensorflow/lite/core/tools/verifier.cc | tensorflow/lite/core/tools/verifier_test.cc | #ifndef TENSORFLOW_LITE_CORE_TOOLS_VERIFIER_H_
#define TENSORFLOW_LITE_CORE_TOOLS_VERIFIER_H_
#include <stdio.h>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/error_reporter.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
class AlwaysTrueResolver : public OpResolver {
public:
AlwaysTrueResolver() {}
const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
int version) const override {
static TfLiteRegistration null_registration = {nullptr, nullptr, nullptr,
nullptr};
return &null_registration;
}
const TfLiteRegistration* FindOp(const char* op, int version) const override {
static TfLiteRegistration null_registration = {nullptr, nullptr, nullptr,
nullptr};
return &null_registration;
}
};
bool Verify(const void* buf, size_t len, const OpResolver& resolver,
ErrorReporter* error_reporter);
bool Verify(const void* buf, size_t len, ErrorReporter* error_reporter);
}
#endif
#include "tensorflow/lite/core/tools/verifier.h"
#include <algorithm>
#include <climits>
#include <complex>
#include <cstdint>
#include <cstring>
#include "absl/container/flat_hash_set.h"
#include "absl/types/optional.h"
#include "flatbuffers/string.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/tools/verifier_internal.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/util.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
const char* NameOrEmptyString(const flatbuffers::String* str) {
if (str == nullptr || str->c_str() == nullptr) {
return "";
}
return str->c_str();
}
bool IsNullOrEmptyString(const flatbuffers::String* str) {
return strcmp(NameOrEmptyString(str), "") == 0;
}
void ReportError(ErrorReporter* error_reporter, const char* format, ...) {
if (error_reporter) {
va_list args;
va_start(args, format);
TF_LITE_REPORT_ERROR(error_reporter, format, args);
va_end(args);
}
}
const uint32_t GetIntPtr(const char* ptr) {
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return flatbuffers::EndianScalar(*reinterpret_cast<const uint32_t*>(ptr));
#else
return *reinterpret_cast<const uint32_t*>(ptr);
#endif
}
const uint32_t kMaxNumString = UINT_MAX / sizeof(int32_t) - 2;
bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
ErrorReporter* error_reporter) {
uint32_t buffer_size = buffer.data()->size();
if (buffer_size < sizeof(uint32_t)) {
ReportError(error_reporter, "String tensor %s is invalid (empty)",
NameOrEmptyString(tensor.name()));
return false;
}
const char* buffer_ptr = reinterpret_cast<const char*>(buffer.data()->data());
uint32_t num_strings = GetIntPtr(buffer_ptr);
if (num_strings > kMaxNumString) {
ReportError(error_reporter,
"String tensor %s has invalid num of string set: %d",
NameOrEmptyString(tensor.name()), num_strings);
return false;
}
uint32_t header_offsets =
static_cast<uint32_t>(num_strings + 2) * sizeof(int32_t);
if (buffer_size < header_offsets) {
ReportError(error_reporter,
"String tensor %s buffer requires at least %d bytes, but is "
"allocated with %d bytes",
NameOrEmptyString(tensor.name()), header_offsets, buffer_size);
return false;
}
uint32_t prev_ptr = header_offsets;
uint32_t offset = sizeof(int32_t);
if (GetIntPtr(buffer_ptr + offset) != header_offsets) {
ReportError(error_reporter,
"String tensor %s buffer initial offset must be: %d",
NameOrEmptyString(tensor.name()), header_offsets);
return false;
}
offset += sizeof(int32_t);
for (int i = 1, end = num_strings; i <= end; i++, offset += sizeof(int32_t)) {
int string_offset = GetIntPtr(buffer_ptr + offset);
if (string_offset < static_cast<int>(prev_ptr) ||
string_offset > static_cast<int>(buffer_size)) {
ReportError(error_reporter,
"String tensor %s buffer is invalid: index %d",
NameOrEmptyString(tensor.name()), i);
return false;
}
}
if (GetIntPtr(buffer_ptr + offset - sizeof(int32_t)) != buffer_size) {
ReportError(error_reporter,
"String tensor %s buffer last offset must be %d",
NameOrEmptyString(tensor.name()), buffer_size);
return false;
}
return true;
}
bool CheckArraySegments(const DimensionMetadata* dim_metadata) {
if (dim_metadata->array_segments() == nullptr) {
return false;
}
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return (dim_metadata->array_segments_as_Int32Vector()->values() !=
nullptr);
case SparseIndexVector_Uint16Vector:
return (dim_metadata->array_segments_as_Uint16Vector()->values() !=
nullptr);
case SparseIndexVector_Uint8Vector:
return (dim_metadata->array_segments_as_Uint8Vector()->values() !=
nullptr);
default:
return false;
}
}
int GetSizeOfSegments(const DimensionMetadata* dim_metadata) {
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return dim_metadata->array_segments_as_Int32Vector()->values()->size();
case SparseIndexVector_Uint16Vector:
return dim_metadata->array_segments_as_Uint16Vector()->values()->size();
case SparseIndexVector_Uint8Vector:
return dim_metadata->array_segments_as_Uint8Vector()->values()->size();
default:
return -1;
}
}
int GetValueOfSegmentsAt(const DimensionMetadata* dim_metadata, const int i) {
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Int32Vector()->values()->Get(i));
case SparseIndexVector_Uint16Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Uint16Vector()->values()->Get(i));
case SparseIndexVector_Uint8Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Uint8Vector()->values()->Get(i));
default:
return -1;
}
}
bool CheckArrayIndices(const DimensionMetadata* dim_metadata) {
if (dim_metadata->array_indices() == nullptr) {
return false;
}
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return (dim_metadata->array_indices_as_Int32Vector()->values() !=
nullptr);
case SparseIndexVector_Uint16Vector:
return (dim_metadata->array_indices_as_Uint16Vector()->values() !=
nullptr);
case SparseIndexVector_Uint8Vector:
return (dim_metadata->array_indices_as_Uint8Vector()->values() !=
nullptr);
default:
return false;
}
}
int GetSizeOfIndices(const DimensionMetadata* dim_metadata) {
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return dim_metadata->array_indices_as_Int32Vector()->values()->size();
case SparseIndexVector_Uint16Vector:
return dim_metadata->array_indices_as_Uint16Vector()->values()->size();
case SparseIndexVector_Uint8Vector:
return dim_metadata->array_indices_as_Uint8Vector()->values()->size();
default:
return -1;
}
}
int GetValueOfIndicesAt(const DimensionMetadata* dim_metadata, const int i) {
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Int32Vector()->values()->Get(i));
case SparseIndexVector_Uint16Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Uint16Vector()->values()->Get(i));
case SparseIndexVector_Uint8Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Uint8Vector()->values()->Get(i));
default:
return -1;
}
return -1;
}
absl::optional<uint64_t> VerifyAndCountElements(
const SparsityParameters& sparsity, const std::vector<int>& dim_sizes) {
const int total_level = sparsity.traversal_order()->size();
uint64_t num_elements = 1;
for (int i = 0; i < total_level; i++) {
const int original_dim = sparsity.traversal_order()->Get(i);
const auto* dim_metadata = sparsity.dim_metadata()->Get(i);
if (dim_metadata->format() == DimensionType_DENSE) {
if (dim_metadata->dense_size() != dim_sizes[original_dim]) {
return absl::nullopt;
}
num_elements *= dim_metadata->dense_size();
} else {
if (!CheckArraySegments(dim_metadata) ||
!CheckArrayIndices(dim_metadata)) {
return absl::nullopt;
}
int array_segments_size = GetSizeOfSegments(dim_metadata);
int array_indices_size = GetSizeOfIndices(dim_metadata);
for (int j = 0; j < array_segments_size - 1; j++) {
if (GetValueOfSegmentsAt(dim_metadata, j) < 0 ||
GetValueOfSegmentsAt(dim_metadata, j + 1) < 0 ||
GetValueOfSegmentsAt(dim_metadata, j) >
GetValueOfSegmentsAt(dim_metadata, j + 1)) {
return absl::nullopt;
}
}
if (static_cast<int>(num_elements) != array_segments_size - 1) {
return absl::nullopt;
}
if (array_indices_size !=
GetValueOfSegmentsAt(dim_metadata, array_segments_size - 1)) {
return absl::nullopt;
}
for (int j = 0; j < array_indices_size; j++) {
if (GetValueOfIndicesAt(dim_metadata, j) < 0 ||
GetValueOfIndicesAt(dim_metadata, j) >= dim_sizes[original_dim]) {
return absl::nullopt;
}
}
num_elements = array_indices_size;
}
}
return num_elements;
}
absl::optional<uint64_t> VerifyAndCountSparseElements(const Tensor& tensor) {
const auto* sparsity = tensor.sparsity();
if (sparsity->traversal_order() == nullptr ||
sparsity->dim_metadata() == nullptr) {
return absl::nullopt;
}
const int total_dims = sparsity->traversal_order()->size();
const int original_rank = tensor.shape()->size();
const int sparsity_dim_metadata_size = sparsity->dim_metadata()->size();
if (total_dims < original_rank || sparsity_dim_metadata_size != total_dims) {
return absl::nullopt;
}
const int block_rank = total_dims - original_rank;
if (block_rank > 0) {
if (sparsity->block_map() == nullptr) {
return absl::nullopt;
}
const int sparse_rank = sparsity->block_map()->size();
if (sparse_rank != block_rank) {
return absl::nullopt;
}
}
std::vector<int> traversal_order(total_dims);
for (int i = 0; i < total_dims; i++) {
traversal_order[i] = sparsity->traversal_order()->Get(i);
}
std::sort(traversal_order.begin(), traversal_order.begin() + original_rank);
for (int i = 0; i < original_rank; i++) {
if (traversal_order[i] != i) {
return absl::nullopt;
}
}
std::sort(traversal_order.begin() + original_rank, traversal_order.end());
for (int i = original_rank; i < total_dims; i++) {
if (traversal_order[i] != i) {
return absl::nullopt;
}
}
std::vector<int> expanded_dim_sizes;
expanded_dim_sizes.resize(total_dims);
for (int i = 0; i < original_rank; i++) {
expanded_dim_sizes[i] = tensor.shape()->Get(i);
}
for (int i = 0; i < block_rank; i++) {
int original_block_dim =
sparsity->traversal_order()->Get(i + original_rank);
if (original_block_dim < 0 || original_block_dim >= total_dims) {
return absl::nullopt;
}
int block_dim_size =
sparsity->dim_metadata()->Get(i + original_rank)->dense_size();
if (block_dim_size <= 0) {
return absl::nullopt;
}
expanded_dim_sizes[original_block_dim] = block_dim_size;
int mapped_block_dim = sparsity->block_map()->Get(i);
if (mapped_block_dim < 0 || mapped_block_dim >= total_dims) {
return absl::nullopt;
}
expanded_dim_sizes[mapped_block_dim] /= block_dim_size;
}
return VerifyAndCountElements(*sparsity, expanded_dim_sizes);
}
bool VerifyNumericTensorBuffer(const Tensor& tensor, const Buffer& buffer,
ErrorReporter* error_reporter) {
uint64_t bytes_required = 1;
if (!tensor.shape()) {
return true;
}
if (tensor.sparsity() != nullptr) {
const auto num_elements = VerifyAndCountSparseElements(tensor);
if (!num_elements.has_value()) {
ReportError(error_reporter, "Tensor %s has invalid sparsity parameters",
NameOrEmptyString(tensor.name()));
return false;
}
bytes_required = num_elements.value();
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
} else {
for (int dim : *tensor.shape()) {
bytes_required *= dim;
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
}
}
switch (tensor.type()) {
case TensorType_FLOAT32:
bytes_required *= sizeof(float);
break;
case TensorType_FLOAT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_BFLOAT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_FLOAT64:
bytes_required *= sizeof(double);
break;
case TensorType_INT32:
bytes_required *= sizeof(int32_t);
break;
case TensorType_UINT32:
bytes_required *= sizeof(uint32_t);
break;
case TensorType_INT4:
bytes_required *= sizeof(int8_t);
break;
case TensorType_UINT8:
bytes_required *= sizeof(uint8_t);
break;
case TensorType_INT8:
bytes_required *= sizeof(int8_t);
break;
case TensorType_INT64:
bytes_required *= sizeof(int64_t);
break;
case TensorType_UINT64:
bytes_required *= sizeof(uint64_t);
break;
case TensorType_BOOL:
bytes_required *= sizeof(bool);
break;
case TensorType_INT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_UINT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_COMPLEX64:
bytes_required *= sizeof(std::complex<float>);
break;
case TensorType_COMPLEX128:
bytes_required *= sizeof(std::complex<double>);
break;
default:
ReportError(error_reporter, "Tensor %s invalid type: %d",
NameOrEmptyString(tensor.name()), tensor.type());
return false;
}
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
if (bytes_required != buffer.data()->size()) {
ReportError(
error_reporter,
"Tensor %s requires %d bytes, but is allocated with %d bytes buffer",
NameOrEmptyString(tensor.name()), bytes_required,
buffer.data()->size());
return false;
}
return true;
}
using flatbuffers::Offset;
using flatbuffers::Vector;
bool VerifyOperators(const Vector<Offset<Operator>>& operators,
ErrorReporter* error_reporter) {
for (const auto* op : operators) {
if (!op->inputs()) {
ReportError(error_reporter, "Missing 'inputs' for operator.");
return false;
}
if (!op->outputs()) {
ReportError(error_reporter, "Missing 'outputs' for operator.");
return false;
}
}
return true;
}
bool IsConstantTensor(const Tensor& tensor, const Model& model) {
if (!tensor.buffer() || !model.buffers()) return false;
if (tensor.buffer() > 0 && tensor.buffer() < model.buffers()->size()) {
auto* buffer = model.buffers()->Get(tensor.buffer());
if (buffer && buffer->data()) {
return true;
}
}
return false;
}
bool VerifySubGraphConsistency(const Model& model, const SubGraph& subgraph,
ErrorReporter* error_reporter) {
absl::flat_hash_set<int> subgraph_input_tensors, constant_tensors,
variable_tensors, output_tensors;
if (subgraph.tensors()) {
for (int i = 0, end = subgraph.tensors()->size(); i < end; ++i) {
const auto* tensor = subgraph.tensors()->Get(i);
if (IsConstantTensor(*tensor, model)) {
constant_tensors.insert(i);
} else if (tensor->is_variable()) {
variable_tensors.insert(i);
}
}
}
if (subgraph.inputs()) {
for (const int tensor_idx : *subgraph.inputs()) {
subgraph_input_tensors.insert(tensor_idx);
}
}
if (subgraph.operators()) {
for (int op_idx = 0, end = subgraph.operators()->size(); op_idx < end;
++op_idx) {
const auto* op = subgraph.operators()->Get(op_idx);
if (!model.operator_codes() ||
(op->opcode_index() >= model.operator_codes()->size())) {
ReportError(error_reporter,
"Operator %d does not exist in model op codes",
op->opcode_index());
return false;
}
const auto& opcode = model.operator_codes()->Get(op->opcode_index());
auto builtin_code = GetBuiltinCode(opcode);
for (const int input_idx : *op->inputs()) {
if (input_idx == kTfLiteOptionalTensor) continue;
if (constant_tensors.find(input_idx) == constant_tensors.end() &&
variable_tensors.find(input_idx) == variable_tensors.end() &&
subgraph_input_tensors.find(input_idx) ==
subgraph_input_tensors.end() &&
output_tensors.find(input_idx) == output_tensors.end()) {
ReportError(error_reporter,
"Input tensor %d to op %d (%s) is not produced",
input_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
}
}
for (const int output_idx : *op->outputs()) {
if (constant_tensors.find(output_idx) != constant_tensors.end()) {
ReportError(
error_reporter, "Output tensor %d to op %d (%s) is a constant",
output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
} else if (variable_tensors.find(output_idx) !=
variable_tensors.end()) {
ReportError(
error_reporter, "Output tensor %d to op %d (%s) is a variable",
output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
} else if (subgraph_input_tensors.find(output_idx) !=
subgraph_input_tensors.end()) {
ReportError(error_reporter,
"Output tensor %d to op %d (%s) is a subgraph input",
output_idx, op_idx,
EnumNameBuiltinOperator(builtin_code));
return false;
} else if (output_tensors.find(output_idx) != output_tensors.end()) {
ReportError(error_reporter,
"Output tensor %d to op %d (%s) is an output from "
"another op. There is a cycle in the graph",
output_idx, op_idx,
EnumNameBuiltinOperator(builtin_code));
return false;
}
output_tensors.insert(output_idx);
}
}
}
return true;
}
bool VerifySubGraphs(const Model& model, ErrorReporter* error_reporter) {
if (!model.subgraphs()) {
ReportError(error_reporter, "Missing 'subgraphs' section.");
return false;
}
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->operators()) {
ReportError(error_reporter, "Missing 'operators' section in subgraph.");
return false;
}
if (!VerifyOperators(*subgraph->operators(), error_reporter)) {
return false;
}
if (!VerifySubGraphConsistency(model, *subgraph, error_reporter)) {
return false;
}
}
return true;
}
bool VerifyTensors(const Model& model, ErrorReporter* error_reporter) {
if (!model.subgraphs()) {
return true;
}
if (!model.buffers()) {
ReportError(error_reporter, "Missing 'buffers' section.");
return false;
}
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->tensors()) {
continue;
}
for (const auto* tensor : *subgraph->tensors()) {
if (!tensor->buffer()) {
continue;
}
if (tensor->buffer() >= model.buffers()->size()) {
ReportError(error_reporter, "Tensor %s invalid buffer index: %d",
NameOrEmptyString(tensor->name()), tensor->buffer());
return false;
}
auto* buffer = model.buffers()->Get(tensor->buffer());
if (!buffer) {
ReportError(error_reporter, "Tensor %s buffer %d not set",
NameOrEmptyString(tensor->name()), tensor->buffer());
return false;
}
if (buffer->data()) {
if (tensor->type() == TensorType_STRING) {
if (!VerifyStringTensorBuffer(*tensor, *buffer, error_reporter)) {
return false;
}
} else {
if (!VerifyNumericTensorBuffer(*tensor, *buffer, error_reporter)) {
return false;
}
}
}
}
}
return true;
}
bool VerifyOps(const Model& model, const OpResolver& resolver,
ErrorReporter* error_reporter) {
if (!model.operator_codes()) {
return true;
}
absl::flat_hash_set<int> regular_code_indices;
absl::flat_hash_set<int> validation_code_indices;
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->operators()) {
continue;
}
if (subgraph->name() && IsValidationSubgraph(subgraph->name()->c_str())) {
for (const auto& op : *(subgraph->operators())) {
validation_code_indices.insert(op->opcode_index());
}
} else {
for (const auto* op : *(subgraph->operators())) {
regular_code_indices.insert(op->opcode_index());
}
}
}
for (int i = 0; i < model.operator_codes()->size(); i++) {
const auto* opcode = model.operator_codes()->Get(i);
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code < BuiltinOperator_MIN ||
builtin_code > BuiltinOperator_MAX) {
ReportError(error_reporter, "Operator id '%d' is out of range.",
builtin_code);
return false;
}
if (builtin_code == BuiltinOperator_CUSTOM) {
if (IsNullOrEmptyString(opcode->custom_code())) {
ReportError(error_reporter,
"Invalid custom op name, cannot be null/empty.");
return false;
} else if (!resolver.FindOp(opcode->custom_code()->c_str(),
opcode->version())) {
if (regular_code_indices.contains(i) ||
!validation_code_indices.contains(i)) {
ReportError(error_reporter, "Unsupported custom op: %s, version: %d",
opcode->custom_code()->c_str(), opcode->version());
return false;
}
}
} else {
if (!resolver.FindOp(builtin_code, opcode->version())) {
ReportError(error_reporter, "Unsupported builtin op: %s, version: %d",
EnumNameBuiltinOperator(builtin_code), opcode->version());
return false;
}
}
}
return true;
}
bool VerifyModel(const Model* model, ErrorReporter* error_reporter) {
if (model == nullptr) {
ReportError(error_reporter, "Invalid flatbuffer format");
return false;
}
if (model->version() != TFLITE_SCHEMA_VERSION) {
ReportError(error_reporter, "Invalid model version %d", model->version());
return false;
}
if (!VerifySubGraphs(*model, error_reporter)) {
return false;
}
if (!VerifyTensors(*model, error_reporter)) {
return false;
}
return true;
}
}
bool Verify(const void* buf, size_t len, ErrorReporter* error_reporter) {
const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
return VerifyModel(model, error_reporter);
}
bool Verify(const void* buf, size_t len, const OpResolver& resolver,
ErrorReporter* error_reporter) {
const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
if (!VerifyModel(model, error_reporter)) {
return false;
}
if (!VerifyOps(*model, resolver, error_reporter)) {
return false;
}
return true;
}
} | #include "tensorflow/lite/core/tools/verifier.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/error_reporter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/util.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
static const char* kSparseTensorTestModel =
"tensorflow/lite/testdata/sparse_tensor.bin";
}
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ = vsnprintf(buffer_, kBufferSize, format, args);
return buffer_size_;
}
int GetBufferSize() { return buffer_size_; }
string GetAsString() const { return string(buffer_, buffer_size_); }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
class TfLiteFlatbufferModelBuilder {
public:
TfLiteFlatbufferModelBuilder() {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
}
TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
const std::vector<std::string>& custom_ops) {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
for (const auto& iter : builtin_ops) {
resolver_.AddBuiltin(iter, &fake_op_);
}
for (const auto& iter : custom_ops) {
resolver_.AddCustom(iter.data(), &fake_op_);
}
}
void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
const std::vector<uint8_t>& buffer, const char* name,
const bool is_variable = false) {
int buffer_index = 0;
if (!buffer.empty()) {
buffer_index = buffers_.size();
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
}
if (shape.empty()) {
tensors_.push_back(CreateTensorDirect(builder_, nullptr, type,
buffer_index, name,
0, is_variable));
return;
}
tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
name, 0,
is_variable));
}
void AddOperator(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
tflite::BuiltinOperator builtin_op, const char* custom_op) {
operator_codes_.push_back(
CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
operators_.push_back(CreateOperator(
builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), BuiltinOptions_NONE,
0,
0, tflite::CustomOptionsFormat_FLEXBUFFERS));
}
enum BuilderMode {
kBuilderModeEmptyVectorIsEmpty,
kBuilderModeEmptyVectorIsNull,
kBuilderModeDefault = kBuilderModeEmptyVectorIsEmpty,
};
void FinishModel(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
BuilderMode mode = kBuilderModeDefault) {
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>({CreateSubGraph(
builder_, CreateVector(tensors_, mode), CreateVector(inputs, mode),
CreateVector(outputs, mode), CreateVector(operators_, mode),
builder_.CreateString("test_subgraph"))});
auto result = CreateModel(
builder_, TFLITE_SCHEMA_VERSION, CreateVector(operator_codes_, mode),
CreateVector(subgraph, mode), builder_.CreateString("test_model"),
CreateVector(buffers_, mode));
tflite::FinishModelBuffer(builder_, result);
}
bool Verify() {
return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
&mock_reporter_);
}
bool VerifyWithOpResolver() {
return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
resolver_, &mock_reporter_);
}
string GetErrorString() { return mock_reporter_.GetAsString(); }
private:
template <typename T>
flatbuffers::Offset<flatbuffers::Vector<T>> CreateVector(
const std::vector<T>& v, BuilderMode mode) {
if (mode == kBuilderModeEmptyVectorIsNull && v.empty()) {
return 0;
}
return builder_.CreateVector(v);
}
flatbuffers::FlatBufferBuilder builder_;
MutableOpResolver resolver_;
TfLiteRegistration fake_op_{};
MockErrorReporter mock_reporter_;
std::vector<flatbuffers::Offset<Operator>> operators_;
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes_;
std::vector<flatbuffers::Offset<Tensor>> tensors_;
std::vector<flatbuffers::Offset<Buffer>> buffers_;
};
TEST(VerifyModel, TestEmptyModel) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0, 0,
0, 0);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Missing 'subgraphs' section."));
}
TEST(VerifyModel, TestEmptyVector) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor({}, TensorType_UINT8, {}, "empty_vector");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {3});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
}
TEST(VerifyModel, TestSimpleModel) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TestNullTensors) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.FinishModel(
{}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(builder.GetErrorString(),
"Input tensor 0 to op 0 (CUSTOM) is not produced");
}
TEST(VerifyModel, TestNullOperators) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.FinishModel(
{0, 1}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Missing 'operators' section in subgraph"));
}
TEST(VerifyModel, TestNullInputs) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel(
{}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TestCorruptedData) {
std::string model = "123";
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(model.data(), model.size(), MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Invalid flatbuffer format"));
}
TEST(VerifyModel, TestUnsupportedVersion) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, 1, 0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Invalid model version 1"));
}
TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
builder.GetSize());
for (size_t i = 0; i < model_content.size(); i++) {
model_content[i] = (model_content[i] + 137) % 255;
EXPECT_FALSE(Verify(model_content.data(), model_content.size(),
MutableOpResolver{}, DefaultErrorReporter()))
<< "Fail at position: " << i;
}
}
TEST(VerifyModel, TestIntTensorShapeIsGreaterThanBuffer) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires 6 bytes, but is "
"allocated with 4 bytes buffer"));
}
TEST(VerifyModel, TestIntTensorShapeIsSmallerThanBuffer) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2, 1}, TensorType_UINT8, {1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires 2 bytes, but is "
"allocated with 4 bytes buffer"));
}
TEST(VerifyModel, TestIntTensorShapeOverflow) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({1024, 2048, 4096}, TensorType_UINT8, {1, 2, 3, 4},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input dimension overflow"));
}
TEST(VerifyModel, TensorBufferIsNotValid) {
flatbuffers::FlatBufferBuilder builder;
std::vector<int> shape = {2, 3};
auto tensors = builder.CreateVector(std::vector<flatbuffers::Offset<Tensor>>{
CreateTensorDirect(builder, &shape, TensorType_INT32, 2,
"input", 0)});
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>(
{CreateSubGraph(builder, tensors, 0, 0,
0, builder.CreateString("Main"))});
auto buffers = builder.CreateVector(std::vector<flatbuffers::Offset<Buffer>>{
CreateBuffer(builder, builder.CreateVector(
std::vector<uint8_t>{1, 2, 3, 4, 5, 6})),
});
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, 0,
builder.CreateVector(subgraph),
builder.CreateString("SmartReply"), buffers);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(
mock_reporter.GetAsString(),
::testing::ContainsRegex("Missing 'operators' section in subgraph."));
}
TEST(VerifyModel, StringTensorIsEmpty) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2}, TensorType_STRING, {0x00}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(builder.GetErrorString(), "String tensor input is invalid (empty)");
}
TEST(VerifyModel, StringTensorHasInvalidNumString) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{0x00, 0x00, 0x00, 0x20, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer requires at least -2147483640 bytes, "
"but is allocated with 18 bytes"));
}
TEST(VerifyModel, StringTensorOffsetTooSmall) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 12, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer initial offset must be: 16"));
}
TEST(VerifyModel, StringTensorOffsetOutOfRange) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 22, 0, 0, 0, 'A', 'B'}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer is invalid: index 2"));
}
TEST(VerifyModel, StringTensorIsLargerThanRequired) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B', 'C'},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer last offset must be 19"));
}
TEST(VerifyModel, AllOpsAreSupported) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output2");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "CustomOp");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, UseUnsupportedBuiltinOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_SUB}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Unsupported builtin op: ADD, version: 1"));
}
TEST(VerifyModel, UseUnsupportedCustomOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "Not supported");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"Unsupported custom op: Not supported, version: 1"));
}
TEST(VerifyModel, UseUnnamedCustomOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"Invalid custom op name, cannot be null/empty."));
}
TEST(VerifyModel, UnpopulatedInputToOp) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({1, 2}, {3}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor({2, 3}, TensorType_UINT8, {}, "invalid_input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 2}, {3});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Input tensor 1 to op 0 (CUSTOM) is not produced",
builder.GetErrorString());
}
TEST(VerifyModel, MultipleOpsOutputToSameTensor) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "CustomOp");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(
"Output tensor 2 to op 1 (CUSTOM) is an output from another op. "
"There is a cycle in the graph",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsAConstantTensor) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {1, 2, 3, 4, 5, 6}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a constant",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsSubgraphInput) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1, 2}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a subgraph input",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsAVariable) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output", true);
builder.FinishModel({0, 1}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a variable",
builder.GetErrorString());
}
TEST(VerifyModel, OpWithOptionalTensor) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({kTfLiteOptionalTensor, 0, 1}, {2},
BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TypedTensorShapeMismatchWithTensorBufferSize) {
TfLiteFlatbufferModelBuilder builder;
for (int tensor_type = TensorType_MIN; tensor_type <= TensorType_MAX;
++tensor_type) {
if (tensor_type == TensorType_STRING) continue;
builder.AddTensor({2, 3}, static_cast<TensorType>(tensor_type),
{1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires .* bytes, but is "
"allocated with 4 bytes buffer"));
}
}
TEST(VerifyModel, TypedTensorShapeMatchesTensorBufferSize) {
TfLiteFlatbufferModelBuilder builder;
for (int tensor_type = TensorType_MIN; tensor_type <= TensorType_MAX;
++tensor_type) {
if (tensor_type == TensorType_STRING ||
tensor_type == TensorType_RESOURCE || tensor_type == TensorType_VARIANT)
continue;
TfLiteType lite_type = kTfLiteNoType;
ASSERT_EQ(ConvertTensorType(static_cast<TensorType>(tensor_type),
&lite_type, nullptr),
kTfLiteOk);
size_t size_bytes = 0;
ASSERT_EQ(GetSizeOfType(nullptr, lite_type, &size_bytes),
kTfLiteOk);
std::vector<uint8_t> buffer(size_bytes);
builder.AddTensor({1}, static_cast<TensorType>(tensor_type), buffer,
"input");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
}
}
TEST(VerifyModel, SimpleValidSparseTensor) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_TRUE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_TRUE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
}
TEST(VerifyModel, InvalidSparseTensorMissingBlockMap) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->block_map = {};
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, InvalidSparseTensorIndexOutOfBound) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->dim_metadata[1]->array_indices.AsUint8Vector()->values[1] =
5;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, InvalidSparseTensorInvalidBuffer) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
scoped_model->buffers[1]->data = {0, 1, 2, 3, 4, 5, 6, 7};
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex(
"requires 12 bytes, but is allocated with 8 bytes buffer"));
}
TEST(VerifyModel, InvalidSparseTensorInvalidTraversalOrder) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->traversal_order[0] = 10;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, ValidSparseTensorBCSC) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->traversal_order = {1, 0, 3, 2};
tensor->sparsity->block_map = {0, 1};
tensor->sparsity->dim_metadata[0]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[0]->dense_size = 2;
tensor->sparsity->dim_metadata[1]->format = DimensionType_SPARSE_CSR;
tensor->sparsity->dim_metadata[1]->array_segments.AsUint8Vector()->values = {
0, 1, 3};
tensor->sparsity->dim_metadata[1]->array_indices.AsUint8Vector()->values = {
0, 0, 1};
tensor->sparsity->dim_metadata[2]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[2]->dense_size = 2;
tensor->sparsity->dim_metadata[3]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[3]->dense_size = 2;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_TRUE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_TRUE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
}
} |
966 | cpp | tensorflow/tensorflow | stable_delegate_registry | tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.cc | tensorflow/lite/core/acceleration/configuration/stable_delegate_registry_test.cc | #ifndef TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_STABLE_DELEGATE_REGISTRY_H_
#define TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_STABLE_DELEGATE_REGISTRY_H_
#include <string>
#include <unordered_map>
#include "absl/synchronization/mutex.h"
#include "tensorflow/lite/core/acceleration/configuration/c/stable_delegate.h"
namespace tflite {
namespace delegates {
class StableDelegateRegistry {
public:
static void RegisterStableDelegate(const TfLiteStableDelegate* delegate);
static const TfLiteStableDelegate* RetrieveStableDelegate(
const std::string& name);
private:
static StableDelegateRegistry* GetSingleton();
void RegisterStableDelegateImpl(const TfLiteStableDelegate* delegate);
const TfLiteStableDelegate* RetrieveStableDelegateImpl(
const std::string& name);
absl::Mutex mutex_;
std::unordered_map<std::string, const TfLiteStableDelegate*> registry_
ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h"
#include <string>
#include "absl/synchronization/mutex.h"
namespace tflite {
namespace delegates {
void StableDelegateRegistry::RegisterStableDelegate(
const TfLiteStableDelegate* delegate) {
auto* const instance = StableDelegateRegistry::GetSingleton();
instance->RegisterStableDelegateImpl(delegate);
}
const TfLiteStableDelegate* StableDelegateRegistry::RetrieveStableDelegate(
const std::string& name) {
auto* const instance = StableDelegateRegistry::GetSingleton();
return instance->RetrieveStableDelegateImpl(name);
}
void StableDelegateRegistry::RegisterStableDelegateImpl(
const TfLiteStableDelegate* delegate) {
absl::MutexLock lock(&mutex_);
registry_[delegate->delegate_name] = delegate;
}
const TfLiteStableDelegate* StableDelegateRegistry::RetrieveStableDelegateImpl(
const std::string& name) {
absl::MutexLock lock(&mutex_);
if (registry_.find(name) == registry_.end()) {
return nullptr;
} else {
return registry_[name];
}
}
StableDelegateRegistry* StableDelegateRegistry::GetSingleton() {
static auto* instance = new StableDelegateRegistry();
return instance;
}
}
} | #include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h"
#include <gtest/gtest.h>
namespace {
using tflite::delegates::StableDelegateRegistry;
TfLiteStableDelegate CreateTestStableDelegate() {
TfLiteStableDelegate stable_delegate = {TFL_STABLE_DELEGATE_ABI_VERSION,
"test_delegate", "V1.0.0", nullptr};
return stable_delegate;
}
class StableDelegateRegistryTest : public testing::Test {
public:
void SetUp() override {
stable_delegate_ = CreateTestStableDelegate();
StableDelegateRegistry::RegisterStableDelegate(&stable_delegate_);
}
protected:
TfLiteStableDelegate stable_delegate_;
};
TEST_F(StableDelegateRegistryTest, TestRetrieval) {
EXPECT_EQ(StableDelegateRegistry::RetrieveStableDelegate("test_delegate"),
&stable_delegate_);
}
TEST_F(StableDelegateRegistryTest, NoRegistrationFound) {
EXPECT_EQ(
StableDelegateRegistry::RetrieveStableDelegate("not_valid_delegate"),
nullptr);
}
} |
967 | cpp | tensorflow/tensorflow | nnapi_plugin | tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin_test.cc | #ifndef TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_NNAPI_PLUGIN_H_
#define TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_NNAPI_PLUGIN_H_
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#ifdef __cplusplus
extern "C" {
#endif
const TfLiteDelegatePlugin* TfLiteNnapiDelegatePluginCApi();
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/nnapi_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
tflite::delegates::NnapiPlugin nnapi_plugin(*tflite_settings);
auto support_library_handle = nnapi_plugin.GetSupportLibraryHandle();
if (support_library_handle) {
auto nnapi_support_library_driver =
reinterpret_cast<const NnApiSLDriverImplFL5*>(support_library_handle);
return new tflite::StatefulNnApiDelegate(nnapi_support_library_driver,
nnapi_plugin.Options());
}
return new tflite::StatefulNnApiDelegate(nnapi_plugin.Options());
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) {
auto nnapi_delegate =
static_cast<tflite::StatefulNnApiDelegate*>(from_delegate);
return nnapi_delegate->GetNnApiErrno();
}
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteNnapiDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
class NnapiTest : public testing::Test {
public:
void SetUp() override {
NNAPISettingsBuilder nnapi_settings_builder(flatbuffer_builder_);
flatbuffers::Offset<NNAPISettings> nnapi_settings =
nnapi_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_nnapi_settings(nnapi_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~NnapiTest() override {}
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
TEST_F(NnapiTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate = TfLiteNnapiDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteNnapiDelegatePluginCApi()->destroy(delegate);
}
TEST_F(NnapiTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate = TfLiteNnapiDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteNnapiDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteNnapiDelegatePluginCApi()->destroy(delegate);
}
} |
968 | cpp | tensorflow/tensorflow | xnnpack_plugin | tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin_test.cc | #ifndef TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_XNNPACK_PLUGIN_H_
#define TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_XNNPACK_PLUGIN_H_
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#ifdef __cplusplus
extern "C" {
#endif
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi();
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
auto options(TfLiteXNNPackDelegateOptionsDefault());
const auto* xnnpack_settings = tflite_settings->xnnpack_settings();
if (xnnpack_settings) {
options.num_threads = xnnpack_settings->num_threads();
if (xnnpack_settings->flags()) {
options.flags = xnnpack_settings->flags();
}
if (xnnpack_settings->experimental_weight_cache_file_path()) {
options.experimental_weight_cache_file_path =
xnnpack_settings->experimental_weight_cache_file_path()->c_str();
}
}
return TfLiteXNNPackDelegateCreate(&options);
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
TfLiteXNNPackDelegateDelete(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class XnnpackTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
void SetUp() override {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~XnnpackTest() override = default;
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
constexpr int XnnpackTest::kNumThreadsForTest;
TEST_F(XnnpackTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteXnnpackDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, SetsCorrectThreadCount) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
pthreadpool_t threadpool =
static_cast<pthreadpool_t>(TfLiteXNNPackDelegateGetThreadPool(delegate));
int thread_count = pthreadpool_get_threads_count(threadpool);
EXPECT_EQ(thread_count, kNumThreadsForTest);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsByDefault) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesSpecifiedFlagsWhenNonzero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsWhenZero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
} |
969 | cpp | tensorflow/tensorflow | register | tensorflow/lite/core/kernels/register.cc | tensorflow/lite/core/kernels/register_test.cc | #ifndef MLIR_HLO_DIALECT_MHLO_IR_REGISTER_H_
#define MLIR_HLO_DIALECT_MHLO_IR_REGISTER_H_
namespace mlir {
class DialectRegistry;
namespace mhlo {
void registerAllMhloDialects(DialectRegistry ®istry);
}
}
#endif
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tflite_with_xnnpack_optional.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_NUMERIC_VERIFY();
TfLiteRegistration* Register_AUDIO_SPECTROGRAM();
TfLiteRegistration* Register_MFCC();
TfLiteRegistration* Register_DETECTION_POSTPROCESS();
}
namespace builtin {
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_ABS, Register_ABS(), 1,
5);
AddBuiltin(BuiltinOperator_HARD_SWISH, Register_HARD_SWISH());
AddBuiltin(BuiltinOperator_RELU, Register_RELU(), 1,
3);
AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
AddBuiltin(BuiltinOperator_RELU_0_TO_1, Register_RELU_0_TO_1());
AddBuiltin(BuiltinOperator_RELU6, Register_RELU6(), 1,
3);
AddBuiltin(BuiltinOperator_TANH, Register_TANH(), 1,
3);
AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC(),
1,
3);
AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D());
AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(),
1,
8);
AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(),
1,
7);
AddBuiltin(BuiltinOperator_SVDF, Register_SVDF(),
1,
4);
AddBuiltin(BuiltinOperator_RNN, Register_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
Register_BIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
Register_UNIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP(),
1,
3);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
Register_EMBEDDING_LOOKUP_SPARSE());
AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(),
1,
12);
AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(),
1,
3);
AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION(),
1,
4);
AddBuiltin(BuiltinOperator_ADD, Register_ADD(),
1,
5);
AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND(),
1,
4);
AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND(),
1,
4);
AddBuiltin(BuiltinOperator_MUL, Register_MUL(), 1,
7);
AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION(),
1,
2);
AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
Register_LOCAL_RESPONSE_NORMALIZATION());
AddBuiltin(BuiltinOperator_LSTM, Register_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
Register_BIDIRECTIONAL_SEQUENCE_LSTM(), 1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
Register_UNIDIRECTIONAL_SEQUENCE_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_PAD, Register_PAD(), 1,
4);
AddBuiltin(BuiltinOperator_PADV2, Register_PADV2(), 1,
4);
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR(),
1,
4);
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
Register_RESIZE_NEAREST_NEIGHBOR(),
1,
4);
AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH(),
1,
2);
AddBuiltin(BuiltinOperator_DEPTH_TO_SPACE, Register_DEPTH_TO_SPACE(),
1,
2);
AddBuiltin(BuiltinOperator_GATHER, Register_GATHER(),
1,
7);
AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE(),
1,
6);
AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(),
1,
3);
AddBuiltin(BuiltinOperator_DIV, Register_DIV(),
1,
2);
AddBuiltin(BuiltinOperator_SUB, Register_SUB(),
1,
5);
AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(),
1,
4);
AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V(),
1,
2);
AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE(),
1,
2);
AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE(),
1,
8);
AddBuiltin(BuiltinOperator_EXP, Register_EXP(),
1,
2);
AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2(),
1,
3);
AddBuiltin(BuiltinOperator_LOG, Register_LOG(),
1,
2);
AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX(),
1,
2);
AddBuiltin(BuiltinOperator_CAST, Register_CAST(),
1,
6);
AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(),
1,
6);
AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_GREATER, Register_GREATER(),
1,
2);
AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_LESS, Register_LESS(),
1,
3);
AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL(),
1,
2);
AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR());
AddBuiltin(BuiltinOperator_CEIL, Register_CEIL());
AddBuiltin(BuiltinOperator_ROUND, Register_ROUND());
AddBuiltin(BuiltinOperator_NEG, Register_NEG());
AddBuiltin(BuiltinOperator_SELECT, Register_SELECT(),
1,
4);
AddBuiltin(BuiltinOperator_SELECT_V2, Register_SELECT_V2(),
1,
2);
AddBuiltin(BuiltinOperator_SLICE, Register_SLICE(),
1,
6);
AddBuiltin(BuiltinOperator_SIN, Register_SIN());
AddBuiltin(BuiltinOperator_COS, Register_COS());
AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV(),
1,
5);
AddBuiltin(BuiltinOperator_TILE, Register_TILE(),
1,
3);
AddBuiltin(BuiltinOperator_SUM, Register_SUM(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_ANY, Register_REDUCE_ANY());
AddBuiltin(BuiltinOperator_REDUCE_ALL, Register_REDUCE_ALL());
AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE(),
1,
3);
AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL(),
1,
4);
AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_SQRT, Register_SQRT());
AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT(),
1,
3);
AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
AddBuiltin(BuiltinOperator_RANK, Register_RANK());
AddBuiltin(BuiltinOperator_POW, Register_POW());
AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
AddBuiltin(BuiltinOperator_PACK, Register_PACK(),
1,
4);
AddBuiltin(BuiltinOperator_ONE_HOT, Register_ONE_HOT());
AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR());
AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND());
AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT());
AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK(),
1,
4);
AddBuiltin(BuiltinOperator_FLOOR_DIV, Register_FLOOR_DIV(),
1,
3);
AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE());
AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE());
AddBuiltin(BuiltinOperator_FLOOR_MOD, Register_FLOOR_MOD(),
1,
2);
AddBuiltin(BuiltinOperator_RANGE, Register_RANGE(),
1,
2);
AddBuiltin(BuiltinOperator_LEAKY_RELU, Register_LEAKY_RELU(),
1,
2);
AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, Register_SQUARED_DIFFERENCE(),
1,
2);
AddBuiltin(BuiltinOperator_FILL, Register_FILL(),
1,
4);
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD(),
1,
3);
AddBuiltin(BuiltinOperator_UNIQUE, Register_UNIQUE());
AddBuiltin(BuiltinOperator_REVERSE_V2, Register_REVERSE_V2(),
1,
3);
AddBuiltin(BuiltinOperator_ADD_N, Register_ADD_N());
AddBuiltin(BuiltinOperator_GATHER_ND, Register_GATHER_ND(),
1,
5);
AddBuiltin(BuiltinOperator_WHERE, Register_WHERE(),
1,
2);
AddBuiltin(BuiltinOperator_ELU, Register_ELU());
AddBuiltin(BuiltinOperator_REVERSE_SEQUENCE, Register_REVERSE_SEQUENCE());
AddBuiltin(BuiltinOperator_MATRIX_DIAG, Register_MATRIX_DIAG());
AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(),
1,
3);
AddBuiltin(BuiltinOperator_MATRIX_SET_DIAG, Register_MATRIX_SET_DIAG());
AddBuiltin(BuiltinOperator_IF, tflite::ops::builtin::Register_IF());
AddBuiltin(BuiltinOperator_WHILE, tflite::ops::builtin::Register_WHILE());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V4,
Register_NON_MAX_SUPPRESSION_V4());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V5,
Register_NON_MAX_SUPPRESSION_V5());
AddBuiltin(BuiltinOperator_SCATTER_ND, Register_SCATTER_ND());
AddBuiltin(BuiltinOperator_DENSIFY, Register_DENSIFY());
AddBuiltin(BuiltinOperator_SEGMENT_SUM, Register_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_BATCH_MATMUL, Register_BATCH_MATMUL(),
1,
4);
AddBuiltin(BuiltinOperator_CUMSUM, Register_CUMSUM());
AddBuiltin(BuiltinOperator_BROADCAST_TO, Register_BROADCAST_TO(),
2,
3);
AddBuiltin(BuiltinOperator_CALL_ONCE,
tflite::ops::builtin::Register_CALL_ONCE());
AddBuiltin(BuiltinOperator_RFFT2D, Register_RFFT2D());
AddBuiltin(BuiltinOperator_CONV_3D, Register_CONV_3D());
AddBuiltin(BuiltinOperator_IMAG, Register_IMAG());
AddBuiltin(BuiltinOperator_REAL, Register_REAL());
AddBuiltin(BuiltinOperator_COMPLEX_ABS, Register_COMPLEX_ABS());
AddBuiltin(BuiltinOperator_BROADCAST_ARGS, Register_BROADCAST_ARGS());
AddBuiltin(BuiltinOperator_HASHTABLE, Register_HASHTABLE());
AddBuiltin(BuiltinOperator_HASHTABLE_FIND, Register_HASHTABLE_FIND());
AddBuiltin(BuiltinOperator_HASHTABLE_IMPORT, Register_HASHTABLE_IMPORT());
AddBuiltin(BuiltinOperator_HASHTABLE_SIZE, Register_HASHTABLE_SIZE());
AddBuiltin(BuiltinOperator_CONV_3D_TRANSPOSE, Register_CONV_3D_TRANSPOSE());
AddBuiltin(BuiltinOperator_VAR_HANDLE, Register_VAR_HANDLE());
AddBuiltin(BuiltinOperator_READ_VARIABLE, Register_READ_VARIABLE());
AddBuiltin(BuiltinOperator_ASSIGN_VARIABLE, Register_ASSIGN_VARIABLE());
AddBuiltin(BuiltinOperator_MULTINOMIAL, Register_MULTINOMIAL());
AddBuiltin(BuiltinOperator_RANDOM_STANDARD_NORMAL,
Register_RANDOM_STANDARD_NORMAL());
AddBuiltin(BuiltinOperator_BUCKETIZE, Register_BUCKETIZE());
AddBuiltin(BuiltinOperator_RANDOM_UNIFORM, Register_RANDOM_UNIFORM());
AddBuiltin(BuiltinOperator_GELU, Register_GELU(),
1,
2);
AddBuiltin(BuiltinOperator_DYNAMIC_UPDATE_SLICE,
Register_DYNAMIC_UPDATE_SLICE(),
1,
2);
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_PROD,
Register_UNSORTED_SEGMENT_PROD());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MAX,
Register_UNSORTED_SEGMENT_MAX());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MIN,
Register_UNSORTED_SEGMENT_MIN());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_SUM,
Register_UNSORTED_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_ATAN2, Register_ATAN2());
AddBuiltin(BuiltinOperator_SIGN, Register_SIGN(),
1,
2);
AddBuiltin(BuiltinOperator_BITCAST, Register_BITCAST());
AddBuiltin(BuiltinOperator_BITWISE_XOR, Register_BITWISE_XOR());
AddBuiltin(BuiltinOperator_RIGHT_SHIFT, Register_RIGHT_SHIFT());
AddBuiltin(BuiltinOperator_STABLEHLO_SCATTER, Register_STABLEHLO_SCATTER());
AddBuiltin(BuiltinOperator_DILATE, Register_DILATE());
AddBuiltin(BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR,
Register_STABLEHLO_RNG_BIT_GENERATOR());
AddBuiltin(BuiltinOperator_REDUCE_WINDOW, Register_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
Register_STABLEHLO_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_GATHER, Register_STABLEHLO_GATHER());
AddBuiltin(BuiltinOperator_STABLEHLO_ADD, Register_STABLEHLO_ADD());
AddBuiltin(BuiltinOperator_STABLEHLO_MULTIPLY, Register_STABLEHLO_MULTIPLY());
AddBuiltin(BuiltinOperator_STABLEHLO_MAXIMUM, Register_STABLEHLO_MAXIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_MINIMUM, Register_STABLEHLO_MINIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_PAD, Register_STABLEHLO_PAD());
AddBuiltin(BuiltinOperator_STABLEHLO_COMPOSITE,
Register_STABLEHLO_COMPOSITE());
AddCustom("NumericVerify", tflite::ops::custom::Register_NUMERIC_VERIFY());
AddCustom("Mfcc", tflite::ops::custom::Register_MFCC());
AddCustom("AudioSpectrogram",
tflite::ops::custom::Register_AUDIO_SPECTROGRAM());
AddCustom("TFLite_Detection_PostProcess",
tflite::ops::custom::Register_DETECTION_POSTPROCESS());
may_directly_contain_user_defined_ops_ = false;
delegate_creators_.push_back([](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context,
XNNPackQS8Options::default_value);
});
}
BuiltinOpResolverWithXNNPACK::BuiltinOpResolverWithXNNPACK(
bool enable_xnnpack_unsigned_quantized) {
delegate_creators_.clear();
XNNPackQS8Options xnnpack_qs8_options = enable_xnnpack_unsigned_quantized
? XNNPackQS8Options::enabled
: XNNPackQS8Options::disabled;
delegate_creators_.push_back([xnnpack_qs8_options](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context, xnnpack_qs8_options);
});
}
}
}
} | #include "tensorflow/lite/core/kernels/register.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite::ops::builtin {
namespace {
TEST(BuiltinOpResolverTest, SupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
const TfLiteRegistration *add =
builtin_op_resolver.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
TEST(BuiltinOpResolverTest, CopySupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
MutableOpResolver copy = builtin_op_resolver;
const TfLiteRegistration *add = copy.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
#if defined(TFLITE_WITHOUT_XNNPACK)
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8_QU8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, Disable_QU8) {
BuiltinOpResolverWithXNNPACK builtin_op_resolver(false);
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, 0);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
#endif
}
} |
970 | cpp | tensorflow/tensorflow | serialization | tensorflow/lite/delegates/gpu/gl/serialization.cc | tensorflow/lite/delegates/gpu/gl/serialization_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_SERIALIZATION_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_SERIALIZATION_H_
#include <cstdint>
#include <functional>
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiled_model_generated.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
struct CompiledModelOptions {
bool dynamic_batch = false;
};
class SerializedCompiledModelBuilder {
public:
SerializedCompiledModelBuilder() : builder_(32 * 1024) {}
void AddShader(const std::string& shader_src);
void AddProgram(const std::vector<Variable>& parameters,
const std::vector<Object>& objects,
const uint3& workgroup_size, const uint3& num_workgroups,
size_t shader_index);
absl::Span<const uint8_t> Finalize(const CompiledModelOptions& options);
private:
std::vector<flatbuffers::Offset<flatbuffers::String>> shaders_;
std::vector<flatbuffers::Offset<data::Program>> programs_;
::flatbuffers::FlatBufferBuilder builder_;
};
class DeserializationHandler {
public:
virtual ~DeserializationHandler() = default;
virtual absl::Status OnShader(absl::Span<const char> shader_src) = 0;
virtual absl::Status OnProgram(const std::vector<Variable>& parameters,
const std::vector<Object>& objects,
const uint3& workgroup_size,
const uint3& num_workgroups,
size_t shader_index) = 0;
virtual void OnOptions(const CompiledModelOptions& options) = 0;
};
absl::Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
DeserializationHandler* handler);
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/serialization.h"
#include <string>
#include <utility>
#include <variant>
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
using flatbuffers::Offset;
using flatbuffers::Vector;
namespace {
struct ParameterValueGetter {
Offset<void> operator()(int32_t value) {
auto offset = builder->CreateVector(std::vector<int32_t>{value});
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const int2& value) {
auto offset = builder->CreateVector(std::vector<int32_t>{value.x, value.y});
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const int4& value) {
auto offset = builder->CreateVector(
std::vector<int32_t>{value.x, value.y, value.z, value.w});
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const std::vector<int2>& value) {
std::vector<int32_t> d(value.size() * 2);
for (size_t i = 0; i < value.size(); ++i) {
d[i * 2] = value[i].x;
d[i * 2 + 1] = value[i].y;
}
auto offset = builder->CreateVector(d);
data::DataInt32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(uint32_t value) {
auto offset = builder->CreateVector(std::vector<uint32_t>{value});
data::DataUint32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const uint4& value) {
auto offset = builder->CreateVector(
std::vector<uint32_t>{value.x, value.y, value.z, value.w});
data::DataUint32Builder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(float value) {
auto offset = builder->CreateVector(std::vector<float>{value});
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const float2& value) {
auto offset = builder->CreateVector(std::vector<float>{value.x, value.y});
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const float4& value) {
auto offset = builder->CreateVector(
std::vector<float>{value.x, value.y, value.z, value.w});
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
Offset<void> operator()(const std::vector<float4>& value) {
std::vector<float> d(value.size() * 4);
for (size_t i = 0; i < value.size(); ++i) {
d[i * 4] = value[i].x;
d[i * 4 + 1] = value[i].y;
d[i * 4 + 2] = value[i].z;
d[i * 4 + 3] = value[i].w;
}
auto offset = builder->CreateVector(d);
data::DataFloatBuilder data(*builder);
data.add_data(offset);
return data.Finish().Union();
}
::flatbuffers::FlatBufferBuilder* builder;
};
struct DataVariantTypeGetter {
data::DataVariant operator()(int32_t) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(const int2&) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(const int4&) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(const std::vector<int2>&) const {
return data::DataVariant::DataInt32;
}
data::DataVariant operator()(uint32_t) const {
return data::DataVariant::DataUint32;
}
data::DataVariant operator()(const uint4&) const {
return data::DataVariant::DataUint32;
}
data::DataVariant operator()(float) const {
return data::DataVariant::DataFloat;
}
data::DataVariant operator()(const float2&) const {
return data::DataVariant::DataFloat;
}
data::DataVariant operator()(const float4&) const {
return data::DataVariant::DataFloat;
}
data::DataVariant operator()(const std::vector<float4>&) const {
return data::DataVariant::DataFloat;
}
};
struct ParameterTypeGetter {
data::ParameterType operator()(int32_t) const {
return data::ParameterType::INT32;
}
data::ParameterType operator()(const int2&) const {
return data::ParameterType::INT32;
}
data::ParameterType operator()(const int4&) const {
return data::ParameterType::INT32;
}
data::ParameterType operator()(const std::vector<int2>&) const {
return data::ParameterType::INT32_2;
}
data::ParameterType operator()(uint32_t) const {
return data::ParameterType::UINT32;
}
data::ParameterType operator()(const uint4&) const {
return data::ParameterType::UINT32;
}
data::ParameterType operator()(float) const {
return data::ParameterType::FLOAT32;
}
data::ParameterType operator()(const float2&) const {
return data::ParameterType::FLOAT32;
}
data::ParameterType operator()(const float4&) const {
return data::ParameterType::FLOAT32;
}
data::ParameterType operator()(const std::vector<float4>&) const {
return data::ParameterType::FLOAT32;
}
};
data::DataType ToFB(DataType type) {
switch (type) {
case DataType::INT16:
return data::DataType::INT16;
case DataType::INT32:
return data::DataType::INT32;
case DataType::FLOAT16:
return data::DataType::FLOAT16;
case DataType::FLOAT32:
return data::DataType::FLOAT32;
default:
return data::DataType::UNKNOWN;
}
}
data::ObjectType ToFB(ObjectType type) {
switch (type) {
case ObjectType::TEXTURE:
return data::ObjectType::TEXTURE;
case ObjectType::BUFFER:
return data::ObjectType::BUFFER;
default:
return data::ObjectType::UNKNOWN;
}
}
struct ObjectSizeGetter {
Offset<void> operator()(const uint3& shape) {
data::Uint3Builder shape_builder(*builder);
shape_builder.add_x(shape.x);
shape_builder.add_y(shape.y);
shape_builder.add_z(shape.z);
return shape_builder.Finish().Union();
}
Offset<void> operator()(const uint2& shape) {
data::Uint2Builder shape_builder(*builder);
shape_builder.add_x(shape.x);
shape_builder.add_y(shape.y);
return shape_builder.Finish().Union();
}
Offset<void> operator()(uint32_t shape) {
data::Uint1Builder shape_builder(*builder);
shape_builder.add_x(shape);
return shape_builder.Finish().Union();
}
::flatbuffers::FlatBufferBuilder* builder;
};
struct ObjectSizeTypeGetter {
data::ObjectSize operator()(const uint3&) const {
return data::ObjectSize::Uint3;
}
data::ObjectSize operator()(const uint2&) const {
return data::ObjectSize::Uint2;
}
data::ObjectSize operator()(const uint32_t) const {
return data::ObjectSize::Uint1;
}
};
struct ObjectGetter {
Offset<void> operator()(const ObjectData& data) {
auto fb_data = builder->CreateVector(data);
data::ObjectDataBuilder data_builder(*builder);
data_builder.add_data(fb_data);
return data_builder.Finish().Union();
}
Offset<void> operator()(ObjectRef ref) {
data::ObjectRefBuilder ref_builder(*builder);
ref_builder.add_global_id(ref);
return ref_builder.Finish().Union();
}
::flatbuffers::FlatBufferBuilder* builder;
};
struct ObjectTypeGetter {
data::ObjectVariant operator()(const ObjectData&) const {
return data::ObjectVariant::ObjectData;
}
data::ObjectVariant operator()(const ObjectRef&) const {
return data::ObjectVariant::ObjectRef;
}
};
data::AccessType ToFB(AccessType type) {
switch (type) {
case AccessType::READ:
return data::AccessType::READ;
case AccessType::WRITE:
return data::AccessType::WRITE;
case AccessType::READ_WRITE:
return data::AccessType::READ_WRITE;
}
}
Offset<data::Uint3> Encode(const uint3& v,
::flatbuffers::FlatBufferBuilder* builder) {
data::Uint3Builder uint3_builder(*builder);
uint3_builder.add_x(v.x);
uint3_builder.add_y(v.y);
uint3_builder.add_z(v.z);
return uint3_builder.Finish();
}
Offset<data::Parameters> Encode(const CompiledModelOptions& options,
::flatbuffers::FlatBufferBuilder* builder) {
data::ParametersBuilder params_builder(*builder);
params_builder.add_dynamic_batch(options.dynamic_batch);
return params_builder.Finish();
}
}
void SerializedCompiledModelBuilder::AddShader(const std::string& shader_src) {
shaders_.push_back(builder_.CreateString(shader_src));
}
void SerializedCompiledModelBuilder::AddProgram(
const std::vector<Variable>& parameters, const std::vector<Object>& objects,
const uint3& workgroup_size, const uint3& num_workgroups,
size_t shader_index) {
Offset<data::Uint3> fb_workgroups = Encode(num_workgroups, &builder_);
Offset<data::Uint3> fb_workgroup_size = Encode(workgroup_size, &builder_);
Offset<Vector<Offset<data::UniformParameter>>> fb_params;
{
std::vector<Offset<data::UniformParameter>> offsets;
for (const Variable& param : parameters) {
auto name = builder_.CreateString(param.name);
auto data = std::visit(ParameterValueGetter{&builder_}, param.value);
data::UniformParameterBuilder builder(builder_);
builder.add_name(name);
builder.add_data_type(std::visit(DataVariantTypeGetter{}, param.value));
builder.add_data(data);
builder.add_type(std::visit(ParameterTypeGetter{}, param.value));
offsets.push_back(builder.Finish());
}
fb_params = builder_.CreateVector(offsets);
}
Offset<Vector<Offset<data::Object>>> fb_objects;
{
std::vector<Offset<data::Object>> offsets;
for (const Object& object : objects) {
auto object_variant = std::visit(ObjectGetter{&builder_}, object.object);
auto size = std::visit(ObjectSizeGetter{&builder_}, object.size);
data::ObjectBuilder builder(builder_);
builder.add_access(ToFB(object.access));
builder.add_binding(object.binding);
builder.add_type(ToFB(object.object_type));
builder.add_data_type(ToFB(object.data_type));
builder.add_size_type(std::visit(ObjectSizeTypeGetter{}, object.size));
builder.add_size(size);
builder.add_object_type(std::visit(ObjectTypeGetter{}, object.object));
builder.add_object(object_variant);
offsets.push_back(builder.Finish());
}
fb_objects = builder_.CreateVector(offsets);
}
data::ProgramBuilder program_builder(builder_);
program_builder.add_number_workgroups(fb_workgroups);
program_builder.add_workgroup_size(fb_workgroup_size);
program_builder.add_parameters(fb_params);
program_builder.add_objects(fb_objects);
program_builder.add_shader_index(shader_index);
programs_.push_back(program_builder.Finish());
}
absl::Span<const uint8_t> SerializedCompiledModelBuilder::Finalize(
const CompiledModelOptions& options) {
auto shaders = builder_.CreateVector(shaders_);
auto programs = builder_.CreateVector(programs_);
auto parameters = Encode(options, &builder_);
data::CompiledModelBuilder model_builder(builder_);
model_builder.add_shaders(shaders);
model_builder.add_programs(programs);
model_builder.add_parameters(parameters);
data::FinishCompiledModelBuffer(builder_, model_builder.Finish());
return absl::MakeConstSpan(builder_.GetBufferPointer(), builder_.GetSize());
}
namespace {
absl::Status ParseParameter(const data::UniformParameter& fb_parameter,
Variable* parameter) {
parameter->name = fb_parameter.name()->str();
switch (fb_parameter.type()) {
case data::ParameterType::INT32: {
auto* ptr = fb_parameter.data_as_DataInt32();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
switch (ptr->data()->size()) {
case 1:
parameter->value = (*ptr->data())[0];
break;
case 2:
parameter->value = int2((*ptr->data())[0], (*ptr->data())[1]);
break;
case 4:
parameter->value = int4((*ptr->data())[0], (*ptr->data())[1],
(*ptr->data())[2], (*ptr->data())[3]);
break;
default:
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
break;
}
case data::ParameterType::UINT32: {
auto* ptr = fb_parameter.data_as_DataUint32();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
switch (ptr->data()->size()) {
case 1:
parameter->value = (*ptr->data())[0];
break;
case 4:
parameter->value = uint4((*ptr->data())[0], (*ptr->data())[1],
(*ptr->data())[2], (*ptr->data())[3]);
break;
default:
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
break;
}
case data::ParameterType::FLOAT32: {
auto* ptr = fb_parameter.data_as_DataFloat();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
switch (ptr->data()->size()) {
case 1:
parameter->value = (*ptr->data())[0];
break;
case 2:
parameter->value = float2((*ptr->data())[0], (*ptr->data())[1]);
break;
case 4:
parameter->value = float4((*ptr->data())[0], (*ptr->data())[1],
(*ptr->data())[2], (*ptr->data())[3]);
break;
default:
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
break;
}
case data::ParameterType::INT32_2: {
auto* ptr = fb_parameter.data_as_DataInt32();
if (ptr == nullptr) {
return absl::InvalidArgumentError("Unexpected data type '" +
parameter->name + "'");
}
if (ptr->data()->size() % 2 != 0) {
return absl::InvalidArgumentError("Unexpected size for parameter '" +
parameter->name + "'");
}
std::vector<int2> values(ptr->data()->size() / 2);
for (int i = 0; i < values.size(); ++i) {
values[i] = int2((*ptr->data())[i * 2], (*ptr->data())[i * 2 + 1]);
}
parameter->value = values;
break;
}
}
return absl::OkStatus();
}
DataType ToEnum(data::DataType type) {
switch (type) {
case data::DataType::INT16:
return DataType::INT16;
case data::DataType::INT32:
return DataType::INT32;
case data::DataType::FLOAT16:
return DataType::FLOAT16;
case data::DataType::FLOAT32:
return DataType::FLOAT32;
default:
return DataType::UNKNOWN;
}
}
ObjectType ToEnum(data::ObjectType type) {
switch (type) {
case data::ObjectType::TEXTURE:
return ObjectType::TEXTURE;
case data::ObjectType::BUFFER:
return ObjectType::BUFFER;
default:
return ObjectType::UNKNOWN;
}
}
AccessType ToEnum(data::AccessType type) {
switch (type) {
case data::AccessType::READ:
return AccessType::READ;
case data::AccessType::WRITE:
return AccessType::WRITE;
case data::AccessType::READ_WRITE:
return AccessType::READ_WRITE;
}
}
absl::Status ParseObject(const data::Object& fb_object, Object* object) {
object->access = ToEnum(fb_object.access());
object->binding = fb_object.binding();
object->object_type = ToEnum(fb_object.type());
object->data_type = ToEnum(fb_object.data_type());
switch (fb_object.size_type()) {
case data::ObjectSize::Uint3: {
auto* size = fb_object.size_as_Uint3();
object->size = uint3(size->x(), size->y(), size->z());
break;
}
case data::ObjectSize::Uint2: {
auto* size = fb_object.size_as_Uint2();
object->size = uint2(size->x(), size->y());
break;
}
case data::ObjectSize::Uint1: {
auto* size = fb_object.size_as_Uint1();
object->size = size->x();
break;
}
case data::ObjectSize::NONE:
return absl::InvalidArgumentError("Texture size is not set");
}
switch (fb_object.object_type()) {
case data::ObjectVariant::ObjectData: {
auto* fb_data = fb_object.object_as_ObjectData();
object->object = std::vector<uint8_t>(
fb_data->data()->data(),
fb_data->data()->data() + fb_data->data()->size());
break;
}
case data::ObjectVariant::ObjectRef: {
auto* fb_ref = fb_object.object_as_ObjectRef();
object->object = fb_ref->global_id();
break;
}
case data::ObjectVariant::NONE: {
return absl::InvalidArgumentError("Object is not set");
}
}
return absl::OkStatus();
}
CompiledModelOptions ParseParameters(const data::Parameters& fb_parameters) {
CompiledModelOptions options;
options.dynamic_batch = fb_parameters.dynamic_batch();
return options;
}
}
absl::Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
DeserializationHandler* handler) {
flatbuffers::Verifier verifier(serialized.data(), serialized.size());
if (!data::VerifyCompiledModelBuffer(verifier)) {
return absl::InvalidArgumentError("Serialized model is corrupted.");
}
auto model = data::GetCompiledModel(serialized.data());
for (auto shader : *model->shaders()) {
RETURN_IF_ERROR(
handler->OnShader(absl::MakeSpan(shader->c_str(), shader->size())));
}
std::vector<Variable> parameters;
std::vector<Object> objects;
for (auto program : *model->programs()) {
parameters.clear();
objects.clear();
for (auto fb_parameter : *program->parameters()) {
Variable parameter;
RETURN_IF_ERROR(ParseParameter(*fb_parameter, ¶meter));
parameters.push_back(std::move(parameter));
}
for (auto fb_object : *program->objects()) {
Object object;
RETURN_IF_ERROR(ParseObject(*fb_object, &object));
objects.push_back(std::move(object));
}
uint3 workgroup_size(program->workgroup_size()->x(),
program->workgroup_size()->y(),
program->workgroup_size()->z());
uint3 num_workgroups(program->number_workgroups()->x(),
program->number_workgroups()->y(),
program->number_workgroups()->z());
RETURN_IF_ERROR(handler->OnProgram(parameters, objects, workgroup_size,
num_workgroups,
program->shader_index()));
}
handler->OnOptions(ParseParameters(*model->parameters()));
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/serialization.h"
#include <stddef.h>
#include <sys/types.h>
#include <cstdint>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
struct ProgramDesc {
std::vector<Variable> parameters;
std::vector<Object> objects;
uint3 workgroup_size;
uint3 num_workgroups;
size_t shader_index;
};
struct Handler : public DeserializationHandler {
absl::Status OnShader(absl::Span<const char> shader_src) final {
shaders.push_back(std::string(shader_src.data(), shader_src.size()));
return absl::OkStatus();
}
absl::Status OnProgram(const std::vector<Variable>& parameters,
const std::vector<Object>& objects,
const uint3& workgroup_size,
const uint3& num_workgroups,
size_t shader_index) final {
programs.push_back(
{parameters, objects, workgroup_size, num_workgroups, shader_index});
return absl::OkStatus();
}
void OnOptions(const CompiledModelOptions& o) final { options = o; }
std::vector<std::string> shaders;
std::vector<ProgramDesc> programs;
CompiledModelOptions options;
};
struct ParameterComparator {
bool operator()(int32_t value) const {
return value == std::get<int32_t>(a.value);
}
bool operator()(const int2& value) const {
auto v = std::get<int2>(a.value);
return value.x == v.x && value.y == v.y;
}
bool operator()(const int4& value) const {
auto v = std::get<int4>(a.value);
return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w;
}
bool operator()(const std::vector<int2>& value) const {
auto v = std::get<std::vector<int2>>(a.value);
if (v.size() != value.size()) {
return false;
}
for (int i = 0; i < v.size(); ++i) {
if (v[i].x != value[i].x || v[i].y != value[i].y) {
return false;
}
}
return true;
}
bool operator()(uint32_t value) const {
return value == std::get<uint32_t>(a.value);
}
bool operator()(const uint4& value) const {
auto v = std::get<uint4>(a.value);
return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w;
}
bool operator()(float value) const {
return value == std::get<float>(a.value);
}
bool operator()(float2 value) const {
auto v = std::get<float2>(a.value);
return value.x == v.x && value.y == v.y;
}
bool operator()(const float4& value) const {
auto v = std::get<float4>(a.value);
return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w;
}
bool operator()(const std::vector<float4>& value) const {
auto v = std::get<std::vector<float4>>(a.value);
if (v.size() != value.size()) {
return false;
}
for (int i = 0; i < v.size(); ++i) {
if (v[i].x != value[i].x || v[i].y != value[i].y) {
return false;
}
}
return true;
}
Variable a;
};
bool Eq(const Variable& a, const Variable& b) {
return a.name == b.name && std::visit(ParameterComparator{a}, b.value);
}
struct ObjectComparator {
bool operator()(const ObjectData& data) const {
return std::get<ObjectData>(a.object) == data;
}
bool operator()(const ObjectRef& ref) const {
return std::get<ObjectRef>(a.object) == ref;
}
Object a;
};
bool Eq(const Object& a, const Object& b) {
return a.access == b.access && a.binding == b.binding &&
std::visit(ObjectComparator{a}, b.object);
}
TEST(Smoke, Read) {
std::string shader1 = "A";
std::string shader2 = "B";
SerializedCompiledModelBuilder builder;
builder.AddShader(shader1);
builder.AddShader(shader2);
std::vector<Variable> parameters;
parameters.push_back({"1", int32_t(1)});
parameters.push_back({"2", int2(1, 2)});
parameters.push_back({"3", int4(1, 2, 3, 4)});
parameters.push_back({"4", uint32_t(10)});
parameters.push_back({"5", uint4(10, 20, 30, 40)});
parameters.push_back({"6", -2.0f});
parameters.push_back({"7", float2(1, -1)});
parameters.push_back({"8", float4(1, -1, 2, -2)});
parameters.push_back(
{"9", std::vector<int2>{int2(1, 2), int2(3, 4), int2(5, 6)}});
std::vector<Object> objects;
objects.push_back(MakeReadonlyBuffer(std::vector<float>{1, 2, 3, 4}));
objects.push_back(Object{AccessType::WRITE, DataType::FLOAT32,
ObjectType::TEXTURE, 5, uint3(1, 2, 3), 100u});
objects.push_back(Object{AccessType::READ_WRITE, DataType::INT8,
ObjectType::BUFFER, 6, uint2(2, 1),
std::vector<uint8_t>{7, 9}});
uint3 num_workgroups(10, 20, 30);
uint3 workgroup_size(1, 2, 3);
builder.AddProgram(parameters, objects, workgroup_size, num_workgroups, 1);
Handler handler;
CompiledModelOptions options;
options.dynamic_batch = true;
ASSERT_TRUE(
DeserializeCompiledModel(builder.Finalize(options), &handler).ok());
EXPECT_EQ(num_workgroups.data_, handler.programs[0].num_workgroups.data_);
EXPECT_EQ(workgroup_size.data_, handler.programs[0].workgroup_size.data_);
EXPECT_THAT(handler.shaders, ::testing::ElementsAre(shader1, shader2));
EXPECT_EQ(handler.programs[0].parameters.size(), parameters.size());
for (int i = 0; i < parameters.size(); ++i) {
EXPECT_TRUE(Eq(parameters[i], handler.programs[0].parameters[i])) << i;
}
EXPECT_EQ(handler.programs[0].objects.size(), objects.size());
for (int i = 0; i < objects.size(); ++i) {
EXPECT_TRUE(Eq(objects[i], handler.programs[0].objects[i])) << i;
}
EXPECT_TRUE(handler.options.dynamic_batch);
}
}
}
}
} |
971 | cpp | tensorflow/tensorflow | interpreter_utils | tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc | tensorflow/lite/delegates/interpreter_utils_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TESTING_INTERPRETER_UTILS_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TESTING_INTERPRETER_UTILS_H_
#include <vector>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace gpu {
namespace testing {
absl::Status InterpreterInvokeWithOpResolver(
const ::tflite::Model* model, TfLiteDelegate* delegate,
const OpResolver& op_resolver, const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs);
absl::Status InterpreterInvoke(const ::tflite::Model* model,
TfLiteDelegate* delegate,
const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs);
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.h"
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace gpu {
namespace testing {
absl::Status InterpreterInvokeWithOpResolver(
const ::tflite::Model* model, TfLiteDelegate* delegate,
const OpResolver& op_resolver, const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs) {
auto interpreter = std::make_unique<Interpreter>();
if (InterpreterBuilder(model, op_resolver)(&interpreter) != kTfLiteOk) {
return absl::InternalError("Unable to create TfLite InterpreterBuilder");
}
if (delegate && interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
return absl::InternalError(
"Unable to modify TfLite graph with the delegate");
}
interpreter->SetNumThreads(1);
if (interpreter->AllocateTensors() != kTfLiteOk) {
return absl::InternalError("Unable to allocate TfLite tensors");
}
for (int i = 0; i < inputs.size(); ++i) {
if (interpreter->tensor(interpreter->inputs()[i])->type != kTfLiteFloat32) {
return absl::InternalError("input data_type is not float32");
}
float* tflite_data =
interpreter->typed_tensor<float>(interpreter->inputs()[i]);
if (inputs[i].data.size() * sizeof(float) >
interpreter->tensor(interpreter->inputs()[i])->bytes) {
return absl::InternalError("too big input data");
}
std::memcpy(tflite_data, inputs[i].data.data(),
inputs[i].data.size() * sizeof(float));
}
if (interpreter->Invoke() != kTfLiteOk) {
return absl::InternalError("Unable to invoke TfLite interpreter");
}
if (!outputs || !outputs->empty()) {
return absl::InternalError("Invalid outputs pointer");
}
outputs->reserve(interpreter->outputs().size());
for (auto t : interpreter->outputs()) {
const TfLiteTensor* out_tensor = interpreter->tensor(t);
TensorFloat32 bhwc;
bhwc.id = t;
if (out_tensor->dims->data[0] != 1) {
return absl::InternalError("Batch dimension is expected to be 1");
}
bhwc.shape.b = out_tensor->dims->data[0];
switch (out_tensor->dims->size) {
case 2:
bhwc.shape.h = 1;
bhwc.shape.w = 1;
bhwc.shape.c = out_tensor->dims->data[1];
break;
case 3:
bhwc.shape.h = 1;
bhwc.shape.w = out_tensor->dims->data[1];
bhwc.shape.c = out_tensor->dims->data[2];
break;
case 4:
bhwc.shape.h = out_tensor->dims->data[1];
bhwc.shape.w = out_tensor->dims->data[2];
bhwc.shape.c = out_tensor->dims->data[3];
break;
default:
return absl::InternalError("Unsupported dimensions size " +
std::to_string(out_tensor->dims->size));
}
bhwc.data = std::vector<float>(
out_tensor->data.f,
out_tensor->data.f + out_tensor->bytes / sizeof(float));
outputs->push_back(bhwc);
}
return absl::OkStatus();
}
absl::Status InterpreterInvoke(const ::tflite::Model* model,
TfLiteDelegate* delegate,
const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs) {
ops::builtin::BuiltinOpResolver builtin_op_resolver;
return InterpreterInvokeWithOpResolver(model, delegate, builtin_op_resolver,
inputs, outputs);
}
}
}
} | #include "tensorflow/lite/delegates/interpreter_utils.h"
#include <string.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/delegate_test_util.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace delegates {
using test_utils::SimpleDelegate;
using test_utils::TestDelegate;
using test_utils::TestFP16Delegation;
namespace {
TEST_F(TestDelegate, DelegateNodeInvokeFailureFallback) {
delegate_ = std::unique_ptr<SimpleDelegate>(new SimpleDelegate(
{0, 1, 2}, kTfLiteDelegateFlagsNone, false ,
0 , true ));
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 1);
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
constexpr int kOutputTensorIndex = 3;
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
TfLiteTensor* tensor = interpreter_->tensor(kOutputTensorIndex);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
}
}
TEST_F(TestDelegate, TestFallbackWithMultipleDelegates) {
delegate_ = std::unique_ptr<SimpleDelegate>(
new SimpleDelegate({0}, kTfLiteDelegateFlagsAllowDynamicTensors));
delegate2_ = std::unique_ptr<SimpleDelegate>(new SimpleDelegate(
{1, 2}, kTfLiteDelegateFlagsNone, false ,
0 , true ));
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate2_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 2);
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
constexpr int kOutputTensorIndex = 2;
TfLiteTensor* tensor = interpreter_->tensor(kOutputTensorIndex);
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
EXPECT_EQ(interpreter_->execution_plan().size(), 3);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
}
}
TEST_P(TestFP16Delegation, DelegateInvokeWithCPUFallback) {
delegate_ = std::make_unique<FP16Delegate>(
GetParam(), false,
true);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
std::vector<float> input = {3.0f};
std::vector<float> expected_output = {16.0f};
const int input_tensor_idx = interpreter_->inputs()[0];
const int output_tensor_idx = interpreter_->outputs()[0];
memcpy(interpreter_->typed_tensor<float>(input_tensor_idx), input.data(),
sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
TfLiteTensor* output_tensor = interpreter_->tensor(output_tensor_idx);
for (int i = 0; i < 1; ++i) {
EXPECT_EQ(output_tensor->data.f[i], expected_output[i]) << i;
}
ASSERT_EQ(interpreter_->execution_plan().size(), 8);
VerifyInvoke();
}
INSTANTIATE_TEST_SUITE_P(TestFP16Delegation, TestFP16Delegation,
::testing::Values(1, 2));
}
}
} |
972 | cpp | tensorflow/tensorflow | simple_delegate | tensorflow/lite/delegates/utils/simple_delegate.cc | tensorflow/lite/delegates/utils/simple_delegate_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_UTILS_SIMPLE_DELEGATE_H_
#define TENSORFLOW_LITE_DELEGATES_UTILS_SIMPLE_DELEGATE_H_
#include <stdint.h>
#include <memory>
#include <utility>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
using TfLiteDelegateUniquePtr =
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>;
class SimpleDelegateKernelInterface {
public:
virtual ~SimpleDelegateKernelInterface() = default;
virtual TfLiteStatus Init(TfLiteContext* context,
const TfLiteDelegateParams* params) = 0;
virtual TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) = 0;
virtual TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) = 0;
};
class SimpleDelegateInterface {
public:
struct Options {
int max_delegated_partitions = 0;
int min_nodes_per_partition = 0;
};
virtual ~SimpleDelegateInterface() = default;
virtual bool IsNodeSupportedByDelegate(const TfLiteRegistration* registration,
const TfLiteNode* node,
TfLiteContext* context) const = 0;
virtual TfLiteStatus Initialize(TfLiteContext* context) = 0;
virtual const char* Name() const = 0;
virtual std::unique_ptr<SimpleDelegateKernelInterface>
CreateDelegateKernelInterface() = 0;
virtual SimpleDelegateInterface::Options DelegateOptions() const = 0;
virtual TfLiteStatus CopyFromBufferHandle(TfLiteContext* context,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) {
return kTfLiteError;
}
virtual TfLiteStatus CopyToBufferHandle(TfLiteContext* context,
TfLiteBufferHandle buffer_handle,
const TfLiteTensor* tensor) {
return kTfLiteError;
}
virtual void FreeBufferHandle(TfLiteContext* context,
TfLiteBufferHandle* handle) {}
};
class TfLiteDelegateFactory {
public:
static TfLiteDelegate* CreateSimpleDelegate(
std::unique_ptr<SimpleDelegateInterface> simple_delegate,
int64_t flags = kTfLiteDelegateFlagsNone);
static void DeleteSimpleDelegate(TfLiteDelegate* delegate);
inline static TfLiteDelegateUniquePtr Create(
std::unique_ptr<SimpleDelegateInterface> simple_delegate) {
return TfLiteDelegateUniquePtr(
CreateSimpleDelegate(std::move(simple_delegate)), DeleteSimpleDelegate);
}
};
}
#endif
#include "tensorflow/lite/delegates/utils/simple_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace {
TfLiteRegistration GetDelegateKernelRegistration(
SimpleDelegateInterface* delegate) {
TfLiteRegistration kernel_registration{};
kernel_registration.profiling_string = nullptr;
kernel_registration.builtin_code = kTfLiteBuiltinDelegate;
kernel_registration.custom_name = delegate->Name();
kernel_registration.version = 1;
kernel_registration.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleDelegateKernelInterface*>(buffer);
};
kernel_registration.init = [](TfLiteContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteDelegateParams* params =
reinterpret_cast<const TfLiteDelegateParams*>(buffer);
if (params == nullptr) {
TF_LITE_KERNEL_LOG(context, "NULL TfLiteDelegateParams passed.");
return nullptr;
}
auto* delegate =
reinterpret_cast<SimpleDelegateInterface*>(params->delegate->data_);
std::unique_ptr<SimpleDelegateKernelInterface> delegate_kernel(
delegate->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
};
kernel_registration.prepare = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
if (node->user_data == nullptr) {
TF_LITE_KERNEL_LOG(context, "Delegate kernel was not initialized");
return kTfLiteError;
}
SimpleDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleDelegateKernelInterface*>(node->user_data);
return delegate_kernel->Prepare(context, node);
};
kernel_registration.invoke = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
SimpleDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleDelegateKernelInterface*>(node->user_data);
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, node);
};
return kernel_registration;
}
TfLiteStatus DelegatePrepare(TfLiteContext* context,
TfLiteDelegate* base_delegate) {
auto* delegate =
reinterpret_cast<SimpleDelegateInterface*>(base_delegate->data_);
auto delegate_options = delegate->DelegateOptions();
if (delegate_options.max_delegated_partitions <= 0)
delegate_options.max_delegated_partitions = std::numeric_limits<int>::max();
TF_LITE_ENSURE_STATUS(delegate->Initialize(context));
delegates::IsNodeSupportedFn node_supported_fn =
[=](TfLiteContext* context, TfLiteNode* node,
TfLiteRegistration* registration,
std::string* unsupported_details) -> bool {
return delegate->IsNodeSupportedByDelegate(registration, node, context);
};
delegates::GraphPartitionHelper helper(context, node_supported_fn);
TF_LITE_ENSURE_STATUS(helper.Partition(nullptr));
std::vector<int> supported_nodes = helper.GetNodesOfFirstNLargestPartitions(
delegate_options.max_delegated_partitions,
delegate_options.min_nodes_per_partition);
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"%s delegate: %d nodes delegated out of %d nodes with "
"%d partitions.\n",
delegate->Name(), supported_nodes.size(),
helper.num_total_nodes(), helper.num_partitions());
TfLiteRegistration delegate_kernel_registration =
GetDelegateKernelRegistration(delegate);
return context->ReplaceNodeSubsetsWithDelegateKernels(
context, delegate_kernel_registration,
BuildTfLiteArray(supported_nodes).get(), base_delegate);
}
}
TfLiteDelegate* TfLiteDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleDelegateInterface> simple_delegate, int64_t flag) {
if (simple_delegate == nullptr) {
return nullptr;
}
auto delegate = new TfLiteDelegate{};
delegate->Prepare = &DelegatePrepare;
delegate->flags = flag;
delegate->data_ = simple_delegate.release();
delegate->CopyFromBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) -> TfLiteStatus {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
return simple_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
};
delegate->CopyToBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) -> TfLiteStatus {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
return simple_delegate->CopyToBufferHandle(context, buffer_handle, tensor);
};
delegate->FreeBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle* buffer_handle) {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
simple_delegate->FreeBufferHandle(context, buffer_handle);
};
return delegate;
}
void TfLiteDelegateFactory::DeleteSimpleDelegate(TfLiteDelegate* delegate) {
if (!delegate) return;
SimpleDelegateInterface* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
delete simple_delegate;
delete delegate;
}
} | #include <stdlib.h>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/delegates/utils/dummy_delegate/dummy_delegate.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace {
class TestDelegate : public ::testing::Test {
protected:
void SetUp() override {
interpreter_ = std::make_unique<Interpreter>();
interpreter_->AddTensors(5);
interpreter_->SetInputs({0, 1});
interpreter_->SetOutputs({3, 4});
TfLiteQuantizationParams quant;
interpreter_->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(4, kTfLiteFloat32, "", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
void* builtin_data_2 = malloc(sizeof(int));
void* builtin_data_3 = malloc(sizeof(int));
interpreter_->AddNodeWithParameters({0, 0}, {2}, nullptr, 0, builtin_data_1,
reg);
interpreter_->AddNodeWithParameters({1, 1}, {3}, nullptr, 0, builtin_data_2,
reg);
interpreter_->AddNodeWithParameters({2, 1}, {4}, nullptr, 0, builtin_data_3,
reg);
}
void TearDown() override { interpreter_.reset(); }
protected:
std::unique_ptr<Interpreter> interpreter_;
};
TEST_F(TestDelegate, BasicDelegate) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
interpreter_->ModifyGraphWithDelegate(std::move(delegate));
ASSERT_EQ(interpreter_->execution_plan().size(), 1);
int node = interpreter_->execution_plan()[0];
const auto* node_and_reg = interpreter_->node_and_registration(node);
EXPECT_STREQ("DummyDelegate", node_and_reg->second.custom_name);
EXPECT_EQ(1, node_and_reg->second.version);
const TfLiteDelegateParams* params = static_cast<const TfLiteDelegateParams*>(
node_and_reg->first.builtin_data);
ASSERT_EQ(params->nodes_to_replace->size, 3);
EXPECT_EQ(params->nodes_to_replace->data[0], 0);
EXPECT_EQ(params->nodes_to_replace->data[1], 1);
EXPECT_EQ(params->nodes_to_replace->data[2], 2);
ASSERT_EQ(params->input_tensors->size, 2);
EXPECT_EQ(params->input_tensors->data[0], 0);
EXPECT_EQ(params->input_tensors->data[1], 1);
ASSERT_EQ(params->output_tensors->size, 2);
EXPECT_EQ(params->output_tensors->data[0], 3);
EXPECT_EQ(params->output_tensors->data[1], 4);
}
TEST_F(TestDelegate, NoNodesToDelegate) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinSub;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
interpreter_->ModifyGraphWithDelegate(std::move(delegate));
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
}
TEST_F(TestDelegate, DelegateFailedPrepare) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_prepare = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteDelegateError,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
}
TEST_F(TestDelegate, DelegateFailedInvoke) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_invoke = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteOk,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
ASSERT_EQ(kTfLiteError, interpreter_->Invoke());
}
TEST_F(TestDelegate, DelegateFailedInit) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_init = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteDelegateError,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
}
}
} |
973 | cpp | tensorflow/tensorflow | simple_opaque_delegate | tensorflow/lite/delegates/utils/simple_opaque_delegate.cc | tensorflow/lite/delegates/utils/simple_opaque_delegate_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_UTILS_SIMPLE_OPAQUE_DELEGATE_H_
#define TENSORFLOW_LITE_DELEGATES_UTILS_SIMPLE_OPAQUE_DELEGATE_H_
#include <stdint.h>
#include <memory>
#include <utility>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
namespace tflite {
using TfLiteOpaqueDelegateUniquePtr =
std::unique_ptr<TfLiteOpaqueDelegate, void (*)(TfLiteOpaqueDelegate*)>;
class SimpleOpaqueDelegateKernelInterface {
public:
virtual ~SimpleOpaqueDelegateKernelInterface() = default;
virtual TfLiteStatus Init(TfLiteOpaqueContext* context,
const TfLiteOpaqueDelegateParams* params) = 0;
virtual TfLiteStatus Prepare(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) = 0;
virtual TfLiteStatus Eval(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) = 0;
};
class SimpleOpaqueDelegateInterface {
public:
virtual ~SimpleOpaqueDelegateInterface() = default;
virtual bool IsNodeSupportedByDelegate(
const TfLiteOperator* registration_external, const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const = 0;
virtual TfLiteStatus Initialize(TfLiteOpaqueContext* context) = 0;
virtual const char* Name() const = 0;
virtual std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
CreateDelegateKernelInterface() = 0;
virtual TfLiteStatus CopyFromBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) {
return kTfLiteError;
}
virtual TfLiteStatus CopyToBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer_handle,
const TfLiteOpaqueTensor* tensor) {
return kTfLiteError;
}
virtual void FreeBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle* handle) {}
};
class TfLiteOpaqueDelegateFactory {
public:
static TfLiteOpaqueDelegate* CreateSimpleDelegate(
std::unique_ptr<SimpleOpaqueDelegateInterface> simple_delegate,
int64_t flags = kTfLiteDelegateFlagsNone);
static void DeleteSimpleDelegate(TfLiteOpaqueDelegate* opaque_delegate);
inline static TfLiteOpaqueDelegateUniquePtr Create(
std::unique_ptr<SimpleOpaqueDelegateInterface> simple_delegate) {
return TfLiteOpaqueDelegateUniquePtr(
CreateSimpleDelegate(std::move(simple_delegate)), DeleteSimpleDelegate);
}
};
}
#endif
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace {
TfLiteOperator* CreateDelegateKernelRegistration(
SimpleOpaqueDelegateInterface* delegate) {
TfLiteOperator* kernel_registration =
TfLiteOperatorCreateWithData(kTfLiteBuiltinDelegate, delegate->Name(),
1, nullptr);
TfLiteOperatorSetFreeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleOpaqueDelegateInterface*>(buffer);
});
TfLiteOperatorSetInitWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteOpaqueDelegateParams* params =
reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
if (params == nullptr) {
return nullptr;
}
auto* delegate_data = reinterpret_cast<SimpleOpaqueDelegateInterface*>(
params->delegate_data);
std::unique_ptr<SimpleOpaqueDelegateKernelInterface> delegate_kernel(
delegate_data->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
});
TfLiteOperatorSetPrepareWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
return delegate_kernel->Prepare(context, opaque_node);
});
TfLiteOperatorSetInvokeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, opaque_node);
});
return kernel_registration;
}
TfLiteStatus DelegatePrepare(TfLiteOpaqueContext* opaque_context,
TfLiteOpaqueDelegate* opaque_delegate,
void* data) {
auto* simple_opaque_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
TF_LITE_ENSURE_STATUS(simple_opaque_delegate->Initialize(opaque_context));
std::vector<int> supported_nodes;
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(
TfLiteOpaqueContextGetExecutionPlan(opaque_context, &execution_plan));
IntArrayUniquePtr plan(TfLiteIntArrayCopy(execution_plan));
for (int i = 0; i < plan->size; ++i) {
const int node_id = plan->data[i];
TfLiteOpaqueNode* opaque_node;
TfLiteOperator* registration_external;
TfLiteOpaqueContextGetNodeAndRegistration(
opaque_context, node_id, &opaque_node, ®istration_external);
if (simple_opaque_delegate->IsNodeSupportedByDelegate(
registration_external, opaque_node, opaque_context)) {
supported_nodes.push_back(node_id);
}
}
TfLiteOperator* delegate_kernel_registration =
CreateDelegateKernelRegistration(simple_opaque_delegate);
return TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
opaque_context, delegate_kernel_registration,
BuildTfLiteArray(supported_nodes).get(), opaque_delegate);
}
}
TfLiteOpaqueDelegate* TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleOpaqueDelegateInterface> simple_delegate,
int64_t flags) {
if (simple_delegate == nullptr) {
return {};
}
TfLiteOpaqueDelegateBuilder opaque_delegate_builder{};
opaque_delegate_builder.Prepare = &DelegatePrepare;
opaque_delegate_builder.flags = flags;
opaque_delegate_builder.data = simple_delegate.release();
opaque_delegate_builder.CopyFromBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
return simple_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
};
opaque_delegate_builder.CopyToBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
return simple_delegate->CopyToBufferHandle(context, buffer_handle,
tensor);
};
opaque_delegate_builder.FreeBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle* buffer_handle) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
simple_delegate->FreeBufferHandle(context, buffer_handle);
};
return TfLiteOpaqueDelegateCreate(&opaque_delegate_builder);
}
void TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(
TfLiteOpaqueDelegate* opaque_delegate) {
if (!opaque_delegate) return;
auto* simple_delegate = reinterpret_cast<SimpleOpaqueDelegateInterface*>(
TfLiteOpaqueDelegateGetData(opaque_delegate));
delete simple_delegate;
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
} | #include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <array>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/delegate_test_util.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
namespace tflite {
class TestDelegate : public ::testing::Test {};
TEST_F(TestDelegate, TestDataAddBin_SingleInputSingleOutput_FullyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(input_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(input_tensor), "input");
TfLiteQuantizationParams input_params =
TfLiteTensorQuantizationParams(input_tensor);
EXPECT_EQ(input_params.scale, 0.f);
EXPECT_EQ(input_params.zero_point, 0);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(output_tensor), "output");
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(DelegateTest,
TestDataAddBin_SingleInputSingleOutput_FullyDelegated_ResizeInputTensors) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
std::array<int, 1> input_dims = {2};
ASSERT_EQ(TfLiteInterpreterResizeInputTensor(
interpreter, 0, input_dims.data(), input_dims.size()),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
EXPECT_EQ(TfLiteTensorNumDims(input_tensor), 1);
EXPECT_EQ(TfLiteTensorDim(input_tensor, 0), 2);
EXPECT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2);
EXPECT_NE(TfLiteTensorData(input_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(input_tensor), "input");
TfLiteQuantizationParams input_params =
TfLiteTensorQuantizationParams(input_tensor);
EXPECT_EQ(input_params.scale, 0.f);
EXPECT_EQ(input_params.zero_point, 0);
std::array<float, 2> input = {1.f, 3.f};
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_EQ(TfLiteTensorNumDims(output_tensor), 1);
EXPECT_EQ(TfLiteTensorDim(output_tensor, 0), 2);
EXPECT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(output_tensor), "output");
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
std::array<float, 2> output;
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
EXPECT_EQ(output[0], 3.f);
EXPECT_EQ(output[1], 9.f);
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(DelegateTest, TestDataMultiAddBin_MultiInputMultiOutput_FullyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/multi_add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 4);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 2);
TfLiteTensor* input_tensor0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensor* input_tensor1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
TfLiteTensor* input_tensor2 =
TfLiteInterpreterGetInputTensor(interpreter, 2);
TfLiteTensor* input_tensor3 =
TfLiteInterpreterGetInputTensor(interpreter, 3);
std::vector<TfLiteTensor*> input_tensors{input_tensor0, input_tensor1,
input_tensor2, input_tensor3};
for (TfLiteTensor* input_tensor : input_tensors) {
const float kTensorCellValue = 1.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
}
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor0 =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
const TfLiteTensor* output_tensor1 =
TfLiteInterpreterGetOutputTensor(interpreter, 1);
std::vector<const TfLiteTensor*> output_tensors{output_tensor0,
output_tensor1};
for (const TfLiteTensor* output_tensor : output_tensors) {
int64_t n = tflite::NumElements(output_tensor);
std::vector<float> output_tensor_values(n, 0);
ASSERT_EQ(
TfLiteTensorCopyToBuffer(output_tensor, output_tensor_values.data(),
output_tensor_values.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < n; ++i) {
EXPECT_EQ(output_tensor_values[i], 3.f);
}
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TfLiteOperator* CreateDelegateKernelRegistrationImpl(
SimpleOpaqueDelegateInterface* delegate) {
TfLiteOperator* kernel_registration = TfLiteOperatorCreateWithData(
kTfLiteBuiltinDelegate, delegate->Name(), 1, nullptr);
TfLiteOperatorSetFreeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleOpaqueDelegateInterface*>(buffer);
});
TfLiteOperatorSetInitWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, const char* buffer,
size_t length) -> void* {
auto* params =
reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
if (params == nullptr) {
return nullptr;
}
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(
params->delegate_data);
std::unique_ptr<SimpleOpaqueDelegateKernelInterface> delegate_kernel(
simple_delegate->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
});
TfLiteOperatorSetPrepareWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
return delegate_kernel->Prepare(context, opaque_node);
});
TfLiteOperatorSetInvokeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, opaque_node);
});
return kernel_registration;
}
using ::tflite::delegates::test_utils::TestFP16Delegation;
TEST_F(TestFP16Delegation, MultipleDelegateKernels) {
auto my_simple_delegate = std::make_unique<example::SampleStableDelegate>();
TfLiteOpaqueDelegate* opaque_delegate =
TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::move(my_simple_delegate));
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(
reinterpret_cast<TfLiteDelegate*>(opaque_delegate)),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 7);
VerifyInvoke();
TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(opaque_delegate);
}
class MySimpleOpaqueDelegateWithBufferHandleSupport
: public example::SampleStableDelegate {
public:
static constexpr int kDelegateOutputValue = 42;
TfLiteStatus CopyFromBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) override {
auto* output = reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
std::vector<float> test_output(
example::helpers::CalculateNumElements(tensor), kDelegateOutputValue);
memcpy(output, test_output.data(), test_output.size() * sizeof(float));
return kTfLiteOk;
}
void FreeBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle* handle) override {
recorded_buffer_handle_ = *handle;
free_buffer_handle_called_ = true;
}
int recorded_buffer_handle_ = -1;
bool free_buffer_handle_called_ = false;
};
TEST_F(TestDelegate, SetBufferHandle) {
MySimpleOpaqueDelegateWithBufferHandleSupport my_simple_delegate;
TfLiteOpaqueDelegateBuilder opaque_delegate_builder{};
opaque_delegate_builder.Prepare = [](TfLiteOpaqueContext* opaque_context,
TfLiteOpaqueDelegate* opaque_delegate,
void* data) {
auto* simple_opaque_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
TF_LITE_ENSURE_STATUS(simple_opaque_delegate->Initialize(opaque_context));
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(
TfLiteOpaqueContextGetExecutionPlan(opaque_context, &execution_plan));
TfLiteOperator* delegate_kernel_registration =
CreateDelegateKernelRegistrationImpl(simple_opaque_delegate);
return TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
opaque_context, delegate_kernel_registration, execution_plan,
opaque_delegate);
};
opaque_delegate_builder.flags = kTfLiteDelegateFlagsNone;
opaque_delegate_builder.data = &my_simple_delegate;
opaque_delegate_builder.CopyFromBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) -> TfLiteStatus {
auto* simple_opaque_delegate =
reinterpret_cast<MySimpleOpaqueDelegateWithBufferHandleSupport*>(data);
simple_opaque_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
return kTfLiteOk;
};
opaque_delegate_builder.FreeBufferHandle = [](TfLiteOpaqueContext* context,
TfLiteOpaqueDelegate* delegate,
void* data,
TfLiteBufferHandle* handle) {
auto* simple_opaque_delegate =
reinterpret_cast<MySimpleOpaqueDelegateWithBufferHandleSupport*>(data);
simple_opaque_delegate->FreeBufferHandle(context, handle);
};
TfLiteDelegate tflite_delegate{};
tflite_delegate.opaque_delegate_builder = &opaque_delegate_builder;
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*model, resolver);
builder.AddDelegate(&tflite_delegate);
std::unique_ptr<tflite::Interpreter> interpreter;
builder(&interpreter);
ASSERT_NE(interpreter, nullptr);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
constexpr int kTensorDimensions = 1 * 8 * 8 * 3;
std::vector<float> floats(kTensorDimensions, 1);
memcpy(interpreter->typed_input_tensor<float>(0), floats.data(),
floats.size() * sizeof(float));
EXPECT_FALSE(my_simple_delegate.free_buffer_handle_called_);
int first_buffer_handle = 1;
const int kOutputTensorIndex = 2;
interpreter->SetBufferHandle(
kOutputTensorIndex, first_buffer_handle,
reinterpret_cast<TfLiteDelegate*>(&tflite_delegate));
TfLiteTensor* output_t = interpreter->output_tensor(0);
output_t->data_is_stale = true;
EXPECT_FALSE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_NE(my_simple_delegate.recorded_buffer_handle_, first_buffer_handle);
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
std::vector<float> outputs(kTensorDimensions, 0);
memcpy(outputs.data(), interpreter->typed_output_tensor<float>(0),
outputs.size() * sizeof(float));
for (int i = 0; i < outputs.size(); ++i) {
EXPECT_EQ(
outputs[i],
MySimpleOpaqueDelegateWithBufferHandleSupport::kDelegateOutputValue);
}
int next_buffer_handle = first_buffer_handle + 1;
interpreter->SetBufferHandle(kOutputTensorIndex, next_buffer_handle,
&tflite_delegate);
EXPECT_TRUE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_EQ(my_simple_delegate.recorded_buffer_handle_, first_buffer_handle);
my_simple_delegate.free_buffer_handle_called_ = false;
my_simple_delegate.recorded_buffer_handle_ = first_buffer_handle = -1;
interpreter.reset();
EXPECT_TRUE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_EQ(my_simple_delegate.recorded_buffer_handle_, next_buffer_handle);
}
TEST(DelegateTest,
TestDataConvHugeIm2ColBin_MultiInputSingleOutput_PartiallyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/conv_huge_im2col.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 4);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
TfLiteTensor* input_tensor0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensor* input_tensor1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
TfLiteTensor* input_tensor2 =
TfLiteInterpreterGetInputTensor(interpreter, 2);
TfLiteTensor* input_tensor3 =
TfLiteInterpreterGetInputTensor(interpreter, 3);
std::vector<TfLiteTensor*> input_tensors{input_tensor0, input_tensor1,
input_tensor2, input_tensor3};
for (TfLiteTensor* input_tensor : input_tensors) {
const float kTensorCellValue = 4.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
}
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
int64_t n = tflite::NumElements(output_tensor);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < n; ++i) {
EXPECT_EQ(output[i], 4);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} |
974 | cpp | tensorflow/tensorflow | sample_stable_delegate | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_SAMPLE_STABLE_DELEGATE_SAMPLE_STABLE_DELEGATE_H_
#define TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_SAMPLE_STABLE_DELEGATE_SAMPLE_STABLE_DELEGATE_H_
#include <memory>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
namespace helpers {
int CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor);
}
static const char kSampleStableDelegateName[] = "google_sample_delegate";
static const char kSampleStableDelegateVersion[] = "1.0.0";
class SampleStableDelegate : public SimpleOpaqueDelegateInterface {
public:
bool IsNodeSupportedByDelegate(const TfLiteOperator* registration_external,
const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const override;
TfLiteStatus Initialize(TfLiteOpaqueContext* context) override;
const char* Name() const override;
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
CreateDelegateKernelInterface() override;
};
}
}
#endif
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
namespace {
class SampleStableDelegateKernel : public SimpleOpaqueDelegateKernelInterface {
bool IsExternalTensor(const TfLiteOpaqueTensor* opaque_tensor) const {
return external_tensors_.count(opaque_tensor) != 0;
}
void DeriveExternalTensors() {
for (const TfLiteOpaqueTensor* tensor : node_input_tensors_set_) {
if (node_output_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_set_) {
if (node_input_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
}
public:
TfLiteStatus Init(TfLiteOpaqueContext* context,
const TfLiteOpaqueDelegateParams* params) override {
if (params->delegate == nullptr) return kTfLiteDelegateError;
context_ = context;
builtin_code_.resize(params->nodes_to_replace->size);
node_input_tensors_.resize(params->nodes_to_replace->size);
node_output_tensors_.resize(params->nodes_to_replace->size);
for (int i = 0; i < params->nodes_to_replace->size; ++i) {
const int node_index = params->nodes_to_replace->data[i];
TfLiteOpaqueNode* delegated_node = nullptr;
TfLiteOperator* delegated_node_registration = nullptr;
TfLiteOpaqueContextGetNodeAndRegistration(
context, node_index, &delegated_node, &delegated_node_registration);
auto input_tensor1 = TfLiteOpaqueNodeGetInput(context, delegated_node, 0);
node_input_tensors_[i].push_back(input_tensor1);
node_input_tensors_set_.insert(input_tensor1);
auto input_tensor2 = TfLiteOpaqueNodeGetInput(context, delegated_node, 1);
node_input_tensors_[i].push_back(input_tensor2);
node_input_tensors_set_.insert(input_tensor2);
auto output_tensor =
TfLiteOpaqueNodeGetOutput(context, delegated_node, 0);
node_output_tensors_[i] = output_tensor;
node_output_tensors_set_.insert(output_tensor);
builtin_code_[i] =
TfLiteOperatorGetBuiltInCode(delegated_node_registration);
}
DeriveExternalTensors();
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
if (external_tensors_.empty()) return kTfLiteOk;
const int kTheInputTensorSize =
helpers::CalculateNumElements((*external_tensors_.begin()));
for (std::vector<const TfLiteOpaqueTensor*>& vecs : node_input_tensors_) {
for (const TfLiteOpaqueTensor* tensor : vecs) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory = internal_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory = internal_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
return kTfLiteOk;
}
void ComputeImpl(float* input_1, float* input_2, float* output,
int builtin_code, int number_of_elements) {
for (int i = 0; i < number_of_elements; ++i) {
if (builtin_code == kTfLiteBuiltinAdd) {
output[i] = input_1[i] + input_2[i];
} else {
output[i] = input_1[i] - input_2[i];
}
}
}
float* GetRawDataSource(TfLiteOpaqueContext* context,
const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_tensors_memory_[tensor].data();
}
}
TfLiteStatus Eval(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
for (int i = 0; i < node_input_tensors_.size(); ++i) {
float* input1 = GetRawDataSource(context, node_input_tensors_[i][0]);
float* input2 = GetRawDataSource(context, node_input_tensors_[i][1]);
float* output = GetRawDataSource(context, node_output_tensors_[i]);
ComputeImpl(input1, input2, output, builtin_code_[i],
helpers::CalculateNumElements(node_output_tensors_[i]));
}
return kTfLiteOk;
}
private:
std::vector<std::vector<const TfLiteOpaqueTensor*>> node_input_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_input_tensors_set_;
std::vector<const TfLiteOpaqueTensor*> node_output_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_output_tensors_set_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> external_tensors_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<float>>
internal_tensors_memory_;
TfLiteOpaqueContext* context_;
std::vector<int> builtin_code_;
};
}
int helpers::CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor) {
int total_num_elements = 1;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(opaque_tensor); ++i) {
total_num_elements *= TfLiteOpaqueTensorDim(opaque_tensor, i);
}
return total_num_elements;
}
bool SampleStableDelegate::IsNodeSupportedByDelegate(
const TfLiteOperator* registration_external, const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const {
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration_external);
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
if (builtin_operator == kTfLiteBuiltinAdd) {
TfLiteAddParams* params = reinterpret_cast<TfLiteAddParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
} else if (builtin_operator == kTfLiteBuiltinSub) {
TfLiteSubParams* params = reinterpret_cast<TfLiteSubParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
} else {
return false;
}
if (TfLiteOpaqueNodeNumberOfInputs(node) != 2) return false;
const TfLiteOpaqueTensor* tensor_1 =
TfLiteOpaqueNodeGetInput(context, node, 0);
const TfLiteOpaqueTensor* tensor_2 =
TfLiteOpaqueNodeGetInput(context, node, 1);
if (!tensor_1 || TfLiteOpaqueTensorType(tensor_1) != kTfLiteFloat32)
return false;
if (!tensor_2 || TfLiteOpaqueTensorType(tensor_2) != kTfLiteFloat32)
return false;
if (TfLiteOpaqueTensorNumDims(tensor_1) !=
TfLiteOpaqueTensorNumDims(tensor_2))
return false;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(tensor_1); ++i) {
if (TfLiteOpaqueTensorDim(tensor_1, i) !=
TfLiteOpaqueTensorDim(tensor_2, i)) {
return false;
}
}
return true;
}
TfLiteStatus SampleStableDelegate::Initialize(TfLiteOpaqueContext* context) {
return kTfLiteOk;
}
const char* SampleStableDelegate::Name() const {
return kSampleStableDelegateName;
}
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
SampleStableDelegate::CreateDelegateKernelInterface() {
return std::make_unique<SampleStableDelegateKernel>();
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithAdd) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithSub) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/sub.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor_0, nullptr);
const float kTensor0CellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor_0);
std::vector<float> input_0(n, kTensor0CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_0, input_0.data(),
input_0.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
ASSERT_NE(input_tensor_1, nullptr);
n = tflite::NumElements(input_tensor_1);
const float kTensor1CellValue = 2.f;
std::vector<float> input_1(n, kTensor1CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_1, input_1.data(),
input_1.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensor0CellValue - kTensor1CellValue);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} |
975 | cpp | tensorflow/tensorflow | sample_stable_delegate_with_control_flow | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_SAMPLE_STABLE_DELEGATE_SAMPLE_STABLE_DELEGATE_WITH_CONTROL_FLOW_H_
#define TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_SAMPLE_STABLE_DELEGATE_SAMPLE_STABLE_DELEGATE_WITH_CONTROL_FLOW_H_
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
namespace helpers {
int CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor);
}
static const char kSampleStableDelegateName[] = "google_sample_delegate";
static const char kSampleStableDelegateVersion[] = "1.0.0";
class SampleStableDelegate : public SimpleOpaqueDelegateInterface {
public:
bool IsNodeSupportedByDelegate(const TfLiteOperator* registration_external,
const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const override;
TfLiteStatus Initialize(TfLiteOpaqueContext* context) override;
const char* Name() const override;
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
CreateDelegateKernelInterface() override;
private:
TfLiteStatus ComputeCompatibleCalleeSubgraphs(
TfLiteOpaqueContext* opaque_context, int subgraph_index);
TfLiteStatus PrepareControlFlow(TfLiteOpaqueContext* opaque_context);
void AddCalleeSubgraphToCallerSubgraph(int callee_subgraph_index,
int caller_subgraph_index) {
control_flow_subgraph_tree_[caller_subgraph_index].insert(
callee_subgraph_index);
}
void AddCompatibleCalleeSubgraph(int subgraph_index) {
compatible_callee_subgraph_indices_.insert(subgraph_index);
}
bool IsCompatibleCalleeSubgraph(int subgraph_index) const {
return compatible_callee_subgraph_indices_.contains(subgraph_index);
}
absl::flat_hash_map<int, absl::flat_hash_set<int>>
control_flow_subgraph_tree_;
absl::flat_hash_set<int> compatible_callee_subgraph_indices_;
bool has_been_initialized_ = false;
};
}
}
#endif
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
static const int kTopLevelSubgraphIndex = -1;
namespace {
class SampleStableDelegateKernel : public SimpleOpaqueDelegateKernelInterface {
bool IsExternalTensor(const TfLiteOpaqueTensor* opaque_tensor) const {
return external_tensors_.count(opaque_tensor) != 0;
}
void DeriveExternalTensors() {
for (const TfLiteOpaqueTensor* tensor : node_input_tensors_set_) {
if (node_output_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_set_) {
if (node_input_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
}
public:
TfLiteStatus Init(TfLiteOpaqueContext* context,
const TfLiteOpaqueDelegateParams* params) override {
if (params->delegate == nullptr) return kTfLiteDelegateError;
context_ = context;
std::vector<int> callee_subgraph_indices;
TfLiteStatus status =
InitSubgraphNodes(context, kTopLevelSubgraphIndex,
params->nodes_to_replace, callee_subgraph_indices);
if (status != kTfLiteOk) return status;
DeriveExternalTensors();
return kTfLiteOk;
}
TfLiteStatus InitSubgraphNodes(TfLiteOpaqueContext* context,
int subgraph_index,
const TfLiteIntArray* nodes_to_execute,
std::vector<int>& callee_subgraph_indices) {
node_input_tensors_[subgraph_index].resize(nodes_to_execute->size);
node_output_tensors_[subgraph_index].resize(nodes_to_execute->size);
builtin_codes_[subgraph_index].resize(nodes_to_execute->size);
for (int i = 0; i < nodes_to_execute->size; ++i) {
const int node_index = nodes_to_execute->data[i];
TfLiteOpaqueNode* delegated_node = nullptr;
TfLiteOperator* delegated_node_registration = nullptr;
TfLiteOpaqueContextGetNodeAndRegistration(
context, node_index, &delegated_node, &delegated_node_registration);
builtin_codes_[subgraph_index][i] =
TfLiteOperatorGetBuiltInCode(delegated_node_registration);
for (int n = 0; n < TfLiteOpaqueNodeNumberOfInputs(delegated_node); ++n) {
auto input_tensor =
TfLiteOpaqueNodeGetInput(context, delegated_node, n);
node_input_tensors_[subgraph_index][i].push_back(input_tensor);
if (subgraph_index == kTopLevelSubgraphIndex) {
node_input_tensors_set_.insert(input_tensor);
}
}
for (int n = 0; n < TfLiteOpaqueNodeNumberOfOutputs(delegated_node);
++n) {
auto output_tensor =
TfLiteOpaqueNodeGetOutput(context, delegated_node, n);
node_output_tensors_[subgraph_index][i].push_back(output_tensor);
if (subgraph_index == kTopLevelSubgraphIndex) {
node_output_tensors_set_.insert(output_tensor);
}
}
if (builtin_codes_[subgraph_index][i] == kTfLiteBuiltinWhile) {
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(delegated_node);
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(builtin_data);
control_flow_branch_indices_[subgraph_index][i] = {
params->cond_subgraph_index, params->body_subgraph_index};
for (int branch_index :
control_flow_branch_indices_[subgraph_index][i]) {
callee_subgraph_indices.push_back(branch_index);
TfLiteStatus status;
TfLiteIntArray* execution_plan;
TfLiteOpaqueContext* branch_context;
status = TfLiteOpaqueContextAcquireSubgraphContext(
context, branch_index, &branch_context);
if (status != kTfLiteOk) return status;
status = TfLiteOpaqueContextGetExecutionPlan(branch_context,
&execution_plan);
if (status != kTfLiteOk) return status;
status = InitSubgraphNodes(branch_context, branch_index,
execution_plan, callee_subgraph_indices);
if (status != kTfLiteOk) return status;
status =
TfLiteOpaqueContextReleaseSubgraphContext(context, branch_index);
if (status != kTfLiteOk) return status;
}
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
if (external_tensors_.empty()) return kTfLiteOk;
const int kTheInputTensorSize =
helpers::CalculateNumElements((*external_tensors_.begin()));
for (auto [_, node_input_tensors] : node_input_tensors_) {
for (std::vector<const TfLiteOpaqueTensor*>& vecs : node_input_tensors) {
for (const TfLiteOpaqueTensor* tensor : vecs) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory =
internal_float_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
}
for (auto [subgraph_index, node_output_tensors] : node_output_tensors_) {
for (int i = 0; i < node_output_tensors.size(); ++i) {
std::vector<const TfLiteOpaqueTensor*>& vecs = node_output_tensors[i];
for (int j = 0; j < vecs.size(); ++j) {
const TfLiteOpaqueTensor* tensor = vecs[j];
if (IsExternalTensor(tensor)) break;
if (builtin_codes_[subgraph_index][i] == kTfLiteBuiltinEqual) {
std::vector<int>& vec_memory = internal_int_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
} else {
std::vector<float>& vec_memory =
internal_float_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
}
}
return kTfLiteOk;
}
int* GetIntRawDataSource(const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<int*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_int_tensors_memory_[tensor].data();
}
}
float* GetFloatRawDataSource(const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_float_tensors_memory_[tensor].data();
}
}
void CopyRawDataSource(const TfLiteOpaqueTensor* from_tensor,
const TfLiteOpaqueTensor* to_tensor) {
float* from_data = GetFloatRawDataSource(from_tensor);
float* to_data = GetFloatRawDataSource(to_tensor);
int number_of_elements = helpers::CalculateNumElements(to_tensor);
memcpy(to_data, from_data, number_of_elements * sizeof(float));
}
TfLiteStatus EvalArithmeticOp(int subgraph_index, int node_index) {
auto node_input_tensors = node_input_tensors_[subgraph_index];
auto node_output_tensors = node_output_tensors_[subgraph_index];
auto builtin_codes = builtin_codes_[subgraph_index];
float* input1 = GetFloatRawDataSource(node_input_tensors[node_index][0]);
float* input2 = GetFloatRawDataSource(node_input_tensors[node_index][1]);
float* output = GetFloatRawDataSource(node_output_tensors[node_index][0]);
int number_of_elements =
helpers::CalculateNumElements(node_output_tensors[node_index][0]);
for (int i = 0; i < number_of_elements; ++i) {
switch (builtin_codes[node_index]) {
case kTfLiteBuiltinAdd:
output[i] = input1[i] + input2[i];
break;
case kTfLiteBuiltinSub:
output[i] = input1[i] - input2[i];
break;
case kTfLiteBuiltinMul:
output[i] = input1[i] * input2[i];
break;
default:
return kTfLiteDelegateError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalComparisonOp(int subgraph_index, int node_index) {
auto node_input_tensors = node_input_tensors_[subgraph_index];
auto node_output_tensors = node_output_tensors_[subgraph_index];
auto builtin_codes = builtin_codes_[subgraph_index];
float* input1 = GetFloatRawDataSource(node_input_tensors[node_index][0]);
float* input2 = GetFloatRawDataSource(node_input_tensors[node_index][1]);
int* output = GetIntRawDataSource(node_output_tensors[node_index][0]);
int number_of_elements =
helpers::CalculateNumElements(node_output_tensors[node_index][0]);
for (int i = 0; i < number_of_elements; ++i) {
switch (builtin_codes[node_index]) {
case kTfLiteBuiltinEqual:
output[i] = input1[i] == input2[i];
break;
default:
return kTfLiteDelegateError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalWhileOp(int while_subgraph_index, int while_node_index) {
auto branch_indices =
control_flow_branch_indices_[while_subgraph_index][while_node_index];
int cond_subgraph_index = branch_indices[0];
int body_subgraph_index = branch_indices[1];
int last_cond_node_index =
node_output_tensors_[cond_subgraph_index].size() - 1;
int last_body_node_index =
node_output_tensors_[body_subgraph_index].size() - 1;
CopyRawDataSource(
node_input_tensors_[while_subgraph_index][while_node_index][0],
node_input_tensors_[cond_subgraph_index][0][0]);
TfLiteStatus status;
while (true) {
status = EvalSubgraph(cond_subgraph_index);
if (status != kTfLiteOk) return status;
int* cond_output = GetIntRawDataSource(
node_output_tensors_[cond_subgraph_index][last_cond_node_index][0]);
int number_of_elements = helpers::CalculateNumElements(
node_output_tensors_[cond_subgraph_index][last_cond_node_index][0]);
bool condition = true;
for (int i = 0; i < number_of_elements; ++i) {
if (cond_output[i] == 0) {
condition = false;
break;
}
}
if (!condition) {
CopyRawDataSource(
node_output_tensors_[body_subgraph_index][last_body_node_index][0],
node_output_tensors_[while_subgraph_index][while_node_index][0]);
break;
}
CopyRawDataSource(node_input_tensors_[cond_subgraph_index][0][0],
node_input_tensors_[body_subgraph_index][0][0]);
status = EvalSubgraph(body_subgraph_index);
if (status != kTfLiteOk) return status;
CopyRawDataSource(
node_output_tensors_[body_subgraph_index][last_body_node_index][0],
node_input_tensors_[cond_subgraph_index][0][0]);
}
return kTfLiteOk;
}
TfLiteStatus EvalSubgraph(int subgraph_index) {
TfLiteStatus status;
for (int i = 0; i < node_input_tensors_[subgraph_index].size(); ++i) {
status = EvalNode(subgraph_index, i);
if (status != kTfLiteOk) return status;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
return EvalSubgraph(kTopLevelSubgraphIndex);
}
TfLiteStatus EvalNode(int subgraph_index, int node_index) {
TfLiteStatus status;
switch (builtin_codes_[subgraph_index][node_index]) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinMul:
status = EvalArithmeticOp(subgraph_index, node_index);
break;
case kTfLiteBuiltinEqual:
status = EvalComparisonOp(subgraph_index, node_index);
break;
case kTfLiteBuiltinWhile:
status = EvalWhileOp(subgraph_index, node_index);
break;
default:
return kTfLiteDelegateError;
}
if (status != kTfLiteOk) {
return status;
}
return kTfLiteOk;
}
private:
absl::flat_hash_map<int, absl::flat_hash_map<int, std::vector<int>>>
control_flow_branch_indices_;
absl::flat_hash_map<int, std::vector<std::vector<const TfLiteOpaqueTensor*>>>
node_input_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_input_tensors_set_;
absl::flat_hash_map<int, std::vector<std::vector<const TfLiteOpaqueTensor*>>>
node_output_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_output_tensors_set_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> external_tensors_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<float>>
internal_float_tensors_memory_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<int>>
internal_int_tensors_memory_;
TfLiteOpaqueContext* context_;
absl::flat_hash_map<int, std::vector<int>> builtin_codes_;
};
}
TfLiteStatus SampleStableDelegate::ComputeCompatibleCalleeSubgraphs(
TfLiteOpaqueContext* opaque_context, int subgraph_index) {
TfLiteStatus status;
TfLiteOpaqueContext* current_context;
status = TfLiteOpaqueContextAcquireSubgraphContext(
opaque_context, subgraph_index, ¤t_context);
if (status != kTfLiteOk) {
return status;
}
TfLiteIntArray* execution_plan;
status =
TfLiteOpaqueContextGetExecutionPlan(current_context, &execution_plan);
if (status != kTfLiteOk) {
return status;
}
bool is_compatible_subgraph = true;
for (int i = 0; i < execution_plan->size; ++i) {
int node_index = execution_plan->data[i];
TfLiteOpaqueNode* node = nullptr;
TfLiteOperator* registration = nullptr;
status = TfLiteOpaqueContextGetNodeAndRegistration(
current_context, node_index, &node, ®istration);
if (status != kTfLiteOk) {
return status;
}
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration);
if (builtin_operator == kTfLiteBuiltinWhile) {
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
const auto* op_data =
reinterpret_cast<const TfLiteWhileParams*>(builtin_data);
AddCalleeSubgraphToCallerSubgraph(op_data->cond_subgraph_index,
subgraph_index);
ComputeCompatibleCalleeSubgraphs(opaque_context,
op_data->cond_subgraph_index);
AddCalleeSubgraphToCallerSubgraph(op_data->body_subgraph_index,
subgraph_index);
ComputeCompatibleCalleeSubgraphs(opaque_context,
op_data->body_subgraph_index);
}
if (!IsNodeSupportedByDelegate(registration, node, current_context)) {
is_compatible_subgraph = false;
}
}
if (is_compatible_subgraph) {
AddCompatibleCalleeSubgraph(subgraph_index);
}
status =
TfLiteOpaqueContextReleaseSubgraphContext(opaque_context, subgraph_index);
if (status != kTfLiteOk) {
return status;
}
return kTfLiteOk;
}
TfLiteStatus SampleStableDelegate::PrepareControlFlow(
TfLiteOpaqueContext* opaque_context) {
constexpr int kPrimarySubgraphIndex = 0;
ComputeCompatibleCalleeSubgraphs(opaque_context, kPrimarySubgraphIndex);
for (const auto& [caller_subgraph_index, callee_subgraph_indices] :
control_flow_subgraph_tree_) {
if (callee_subgraph_indices.empty()) {
continue;
}
bool callee_subgraphs_all_delegatable = true;
for (int callee_subgraph_index : callee_subgraph_indices) {
if (!IsCompatibleCalleeSubgraph(callee_subgraph_index)) {
callee_subgraphs_all_delegatable = false;
}
}
if (!callee_subgraphs_all_delegatable) {
continue;
}
for (int callee_subgraph_index : callee_subgraph_indices) {
TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable(
opaque_context, callee_subgraph_index);
}
}
return kTfLiteOk;
}
int helpers::CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor) {
int total_num_elements = 1;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(opaque_tensor); ++i) {
total_num_elements *= TfLiteOpaqueTensorDim(opaque_tensor, i);
}
return total_num_elements;
}
bool SampleStableDelegate::IsNodeSupportedByDelegate(
const TfLiteOperator* registration_external, const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const {
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration_external);
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
switch (builtin_operator) {
case kTfLiteBuiltinAdd: {
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinSub: {
TfLiteSubParams* params =
reinterpret_cast<TfLiteSubParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinMul: {
TfLiteMulParams* params =
reinterpret_cast<TfLiteMulParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinEqual:
break;
case kTfLiteBuiltinWhile: {
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(builtin_data);
if (!params || !IsCompatibleCalleeSubgraph(params->cond_subgraph_index) ||
!IsCompatibleCalleeSubgraph(params->body_subgraph_index)) {
return false;
}
break;
}
default:
return false;
}
if (builtin_operator == kTfLiteBuiltinWhile) {
if (TfLiteOpaqueNodeNumberOfInputs(node) != 1) return false;
const TfLiteOpaqueTensor* tensor =
TfLiteOpaqueNodeGetInput(context, node, 0);
if (!tensor || TfLiteOpaqueTensorType(tensor) != kTfLiteFloat32)
return false;
} else {
if (TfLiteOpaqueNodeNumberOfInputs(node) != 2) return false;
const TfLiteOpaqueTensor* tensor_1 =
TfLiteOpaqueNodeGetInput(context, node, 0);
const TfLiteOpaqueTensor* tensor_2 =
TfLiteOpaqueNodeGetInput(context, node, 1);
if (!tensor_1 || TfLiteOpaqueTensorType(tensor_1) != kTfLiteFloat32)
return false;
if (!tensor_2 || TfLiteOpaqueTensorType(tensor_2) != kTfLiteFloat32)
return false;
if (TfLiteOpaqueTensorNumDims(tensor_1) !=
TfLiteOpaqueTensorNumDims(tensor_2))
return false;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(tensor_1); ++i) {
if (TfLiteOpaqueTensorDim(tensor_1, i) !=
TfLiteOpaqueTensorDim(tensor_2, i)) {
return false;
}
}
}
return true;
}
TfLiteStatus SampleStableDelegate::Initialize(TfLiteOpaqueContext* context) {
if (!has_been_initialized_) {
PrepareControlFlow(context);
has_been_initialized_ = true;
}
return kTfLiteOk;
}
const char* SampleStableDelegate::Name() const {
return kSampleStableDelegateName;
}
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
SampleStableDelegate::CreateDelegateKernelInterface() {
return std::make_unique<SampleStableDelegateKernel>();
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithAdd) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithSub) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/sub.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor_0, nullptr);
const float kTensor0CellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor_0);
std::vector<float> input_0(n, kTensor0CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_0, input_0.data(),
input_0.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
ASSERT_NE(input_tensor_1, nullptr);
n = tflite::NumElements(input_tensor_1);
const float kTensor1CellValue = 2.f;
std::vector<float> input_1(n, kTensor1CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_1, input_1.data(),
input_1.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensor0CellValue - kTensor1CellValue);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithNestedWhile) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/nested_while.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 1.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 2);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} |
976 | cpp | tensorflow/tensorflow | tflite_settings_json_parser | tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.cc | tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_STABLE_DELEGATE_TFLITE_SETTINGS_JSON_PARSER_H_
#define TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_STABLE_DELEGATE_TFLITE_SETTINGS_JSON_PARSER_H_
#include <string>
#include "flatbuffers/idl.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace tflite {
namespace delegates {
namespace utils {
class TfLiteSettingsJsonParser {
public:
TfLiteSettingsJsonParser();
const TFLiteSettings* Parse(const std::string& json_file_path);
const uint8_t* GetBufferPointer();
flatbuffers::uoffset_t GetBufferSize();
private:
bool LoadFromJsonFile(const std::string& json_file_path);
flatbuffers::Parser parser_;
uint8_t* buffer_pointer_;
flatbuffers::uoffset_t buffer_size_;
};
}
}
}
#endif
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include <string>
#include "flatbuffers/idl.h"
#include "tensorflow/lite/acceleration/configuration/configuration_fbs_contents-inl.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace delegates {
namespace utils {
TfLiteSettingsJsonParser::TfLiteSettingsJsonParser() {
TFLITE_DCHECK(parser_.Parse(configuration_fbs_contents) &&
parser_.SetRootType("TFLiteSettings"));
}
const TFLiteSettings* TfLiteSettingsJsonParser::Parse(
const std::string& json_file_path) {
if (!LoadFromJsonFile(json_file_path) || buffer_pointer_ == nullptr) {
return nullptr;
}
return flatbuffers::GetRoot<TFLiteSettings>(buffer_pointer_);
}
const uint8_t* TfLiteSettingsJsonParser::GetBufferPointer() {
return buffer_pointer_;
}
flatbuffers::uoffset_t TfLiteSettingsJsonParser::GetBufferSize() {
return buffer_size_;
}
bool TfLiteSettingsJsonParser::LoadFromJsonFile(
const std::string& json_file_path) {
buffer_size_ = 0;
buffer_pointer_ = nullptr;
if (json_file_path.empty()) {
TFLITE_LOG(ERROR) << "Invalid JSON file path.";
return false;
}
std::string json_file;
if (!flatbuffers::LoadFile(json_file_path.c_str(), false, &json_file)) {
TFLITE_LOG(ERROR) << "Failed to load the delegate settings file ("
<< json_file_path << ").";
return false;
}
if (!parser_.Parse(json_file.c_str())) {
TFLITE_LOG(ERROR) << "Failed to parse the delegate settings file ("
<< json_file_path << ").";
return false;
}
buffer_size_ = parser_.builder_.GetSize();
buffer_pointer_ = parser_.builder_.GetBufferPointer();
return true;
}
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
namespace {
using tflite::TFLiteSettings;
using tflite::delegates::utils::TfLiteSettingsJsonParser;
TEST(TfLiteSettingsJsonParserTest, SuccessWithValidXNNPackDelegateSettings) {
TfLiteSettingsJsonParser parser;
const TFLiteSettings* tflite_settings = parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
EXPECT_NE(parser.GetBufferPointer(), nullptr);
EXPECT_NE(parser.GetBufferSize(), 0);
ASSERT_NE(tflite_settings, nullptr);
EXPECT_EQ(tflite_settings->delegate(), tflite::Delegate_XNNPACK);
ASSERT_NE(tflite_settings->xnnpack_settings(), nullptr);
EXPECT_EQ(tflite_settings->xnnpack_settings()->num_threads(), 5);
}
TEST(TfLiteSettingsJsonParserTest, GetBufferPointerReturnsValidBufferPointers) {
TfLiteSettingsJsonParser parser;
parser.Parse(
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/test_xnnpack_settings.json");
const uint8_t* buffer_pointer = parser.GetBufferPointer();
ASSERT_NE(buffer_pointer, nullptr);
ASSERT_NE(parser.GetBufferSize(), 0);
const TFLiteSettings* tflite_settings =
flatbuffers::GetRoot<TFLiteSettings>(buffer_pointer);
ASSERT_NE(tflite_settings, nullptr);
EXPECT_EQ(tflite_settings->delegate(), tflite::Delegate_XNNPACK);
ASSERT_NE(tflite_settings->xnnpack_settings(), nullptr);
EXPECT_EQ(tflite_settings->xnnpack_settings()->num_threads(), 5);
}
TEST(TfLiteSettingsJsonParserTest, FailedToParseInvalidSettings) {
TfLiteSettingsJsonParser parser;
EXPECT_EQ(
parser.Parse("tensorflow/lite/tools/delegates/experimental/"
"stable_delegate/test_invalid_settings.json"),
nullptr);
EXPECT_EQ(parser.GetBufferPointer(), nullptr);
EXPECT_EQ(parser.GetBufferSize(), 0);
}
} |
977 | cpp | tensorflow/tensorflow | delegate_loader | tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.cc | tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_STABLE_DELEGATE_DELEGATE_LOADER_H_
#define TENSORFLOW_LITE_DELEGATES_UTILS_EXPERIMENTAL_STABLE_DELEGATE_DELEGATE_LOADER_H_
#include <string>
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
namespace tflite {
namespace delegates {
namespace utils {
constexpr char kTfLiteStableDelegateSymbol[] = "TFL_TheStableDelegate";
constexpr char kTfLiteLibraryPathEnvironmentVariable[] =
"TFLITE_STABLE_DELEGATE_LIBRARY_PATH";
const TfLiteStableDelegate* LoadDelegateFromSharedLibrary(
const std::string& delegate_path);
void* LoadSymbolFromSharedLibrary(const std::string& delegate_path,
const std::string& delegate_symbol);
}
}
}
#endif
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <cerrno>
#include <string>
#include "absl/strings/numbers.h"
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace delegates {
namespace utils {
namespace {
void setLibraryPathEnvironmentVariable(const std::string& delegate_path) {
std::string directory_path = "";
size_t last_slash_index = delegate_path.rfind('/');
if (last_slash_index != std::string::npos) {
directory_path = delegate_path.substr(0, last_slash_index);
}
if (setenv(kTfLiteLibraryPathEnvironmentVariable, directory_path.c_str(),
1) != 0) {
TFLITE_LOG(WARN) << "Error setting environment variable "
<< kTfLiteLibraryPathEnvironmentVariable
<< " with error: " << strerror(errno);
}
}
}
using ::tflite::acceleration::AndroidInfo;
using ::tflite::acceleration::RequestAndroidInfo;
const TfLiteStableDelegate* LoadDelegateFromSharedLibrary(
const std::string& delegate_path) {
void* symbol_pointer =
LoadSymbolFromSharedLibrary(delegate_path, kTfLiteStableDelegateSymbol);
if (!symbol_pointer) {
return nullptr;
}
return reinterpret_cast<const TfLiteStableDelegate*>(symbol_pointer);
}
void* LoadSymbolFromSharedLibrary(const std::string& delegate_path,
const std::string& delegate_symbol) {
void* delegate_lib_handle = nullptr;
int dlopen_flags = RTLD_NOW | RTLD_LOCAL;
int sdk_version;
AndroidInfo android_info;
if (RequestAndroidInfo(&android_info).ok() &&
absl::SimpleAtoi(android_info.android_sdk_version, &sdk_version) &&
sdk_version >= 23) {
dlopen_flags |= RTLD_NODELETE;
TFLITE_LOG(INFO) << "Android SDK level is " << sdk_version
<< ", using dlopen with RTLD_NODELETE.";
}
setLibraryPathEnvironmentVariable(delegate_path);
delegate_lib_handle = dlopen(delegate_path.c_str(), dlopen_flags);
if (!delegate_lib_handle) {
TFLITE_LOG(ERROR) << "Failed to open library " << delegate_path << ": "
<< dlerror();
return nullptr;
}
void* symbol_pointer = dlsym(delegate_lib_handle, delegate_symbol.c_str());
if (!symbol_pointer) {
TFLITE_LOG(ERROR) << "Failed to find " << delegate_symbol
<< " symbol: " << dlerror();
dlclose(delegate_lib_handle);
return nullptr;
}
return symbol_pointer;
}
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include <cstdlib>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
namespace {
using tflite::TFLiteSettings;
using tflite::TFLiteSettingsBuilder;
using tflite::delegates::utils::LoadDelegateFromSharedLibrary;
using tflite::delegates::utils::LoadSymbolFromSharedLibrary;
TEST(TfLiteDelegateLoaderUtilsTest, Simple) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/"
"libtensorflowlite_sample_stable_delegate.so"
);
ASSERT_NE(stable_delegate_handle, nullptr);
EXPECT_STREQ(stable_delegate_handle->delegate_abi_version,
TFL_STABLE_DELEGATE_ABI_VERSION);
EXPECT_STREQ(stable_delegate_handle->delegate_name,
tflite::example::kSampleStableDelegateName);
EXPECT_STREQ(stable_delegate_handle->delegate_version,
tflite::example::kSampleStableDelegateVersion);
EXPECT_NE(stable_delegate_handle->delegate_plugin, nullptr);
EXPECT_STREQ(
getenv(tflite::delegates::utils::kTfLiteLibraryPathEnvironmentVariable),
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate");
flatbuffers::FlatBufferBuilder flatbuffer_builder;
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder.Finish(tflite_settings);
const TFLiteSettings* settings = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder.GetBufferPointer());
auto delegate = stable_delegate_handle->delegate_plugin->create(settings);
ASSERT_NE(delegate, nullptr);
EXPECT_EQ(
stable_delegate_handle->delegate_plugin->get_delegate_errno(delegate), 0);
stable_delegate_handle->delegate_plugin->destroy(delegate);
}
TEST(TfLiteDelegateLoaderUtilsTest, WrongSymbolReturnsNullptr) {
void* symbol_pointer = LoadSymbolFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/libtensorflowlite_sample_stable_delegate.so",
"NOT_REAL_SYMBOL");
EXPECT_EQ(symbol_pointer, nullptr);
}
TEST(TfLiteDelegateLoaderUtilsTest, MissingLibReturnsNullptr) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary("not_real_delegate.so");
EXPECT_EQ(stable_delegate_handle, nullptr);
}
} |
978 | cpp | tensorflow/tensorflow | delegate | tensorflow/lite/delegates/gpu/delegate.cc | tensorflow/lite/delegates/flex/delegate_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_DELEGATE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_DELEGATE_H_
#include <stdint.h>
#include <cstddef>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/delegate_options.h"
#ifdef __cplusplus
extern "C" {
#endif
TFL_CAPI_EXPORT TfLiteDelegate* TfLiteGpuDelegateV2Create(
const TfLiteGpuDelegateOptionsV2* options);
#if defined(__ANDROID__)
TFL_CAPI_EXPORT TfLiteDelegate* TfLiteGpuDelegateV2CreateAsync(
const TfLiteGpuDelegateOptionsV2* options);
#endif
TFL_CAPI_EXPORT void TfLiteGpuDelegateV2Delete(TfLiteDelegate* delegate);
TFL_CAPI_EXPORT TfLiteDelegate* tflite_plugin_create_delegate(
const char* const* options_keys, const char* const* options_values,
size_t num_options, void (*report_error)(const char*));
TFL_CAPI_EXPORT void tflite_plugin_destroy_delegate(TfLiteDelegate* delegate);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/logger.h"
#if defined(__ANDROID__)
#include <android/hardware_buffer.h>
#endif
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/numbers.h"
#include "absl/types/span.h"
#include "tensorflow/lite/builtin_ops.h"
#if defined(__ANDROID__)
#include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include "tensorflow/lite/core/async/c/task.h"
#include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include "tensorflow/lite/core/async/interop/c/constants.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#endif
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include "tensorflow/lite/delegates/gpu/api.h"
#include "tensorflow/lite/delegates/gpu/cl/api.h"
#include "tensorflow/lite/delegates/gpu/cl/util.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder.h"
#include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include "tensorflow/lite/delegates/gpu/common/quantization_util.h"
#include "tensorflow/lite/delegates/gpu/delegate_options.h"
#include "tensorflow/lite/delegates/gpu/tflite_profile.h"
#include "tensorflow/lite/delegates/serialization.h"
#if defined(__ANDROID__)
#include "tensorflow/lite/delegates/gpu/async_buffers.h"
#include "tensorflow/lite/delegates/gpu/gl/android_sync.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/utils/async_type_helpers.h"
#include "tensorflow/lite/delegates/utils/ret_macros.h"
#include "tensorflow/lite/delegates/utils/sync_fence.h"
#include "tensorflow/lite/delegates/utils/utils.h"
#endif
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/telemetry/c/telemetry_setting_internal.h"
#include "tensorflow/lite/profiling/telemetry/telemetry.h"
#include "tensorflow/lite/profiling/telemetry/telemetry_status.h"
#ifndef CL_DELEGATE_NO_GL
#include "tensorflow/lite/delegates/gpu/gl/api2.h"
#endif
#if defined(__ANDROID__)
using tflite::delegates::utils::BufferAttributes;
using tflite::delegates::utils::BufferType;
using tflite::delegates::utils::ConvertToTfLiteStatus;
using tflite::delegates::utils::IsPowerOfTwo;
using tflite::delegates::utils::ReadBufferAttrs;
using tflite::delegates::utils::ReadSyncAttrs;
using tflite::delegates::utils::SyncAttributes;
using tflite::delegates::utils::SyncType;
using tflite::delegates::utils::WaitForAllFds;
using tflite::delegates::utils::WriteBufferAttrs;
using tflite::delegates::utils::WriteSyncAttrs;
#endif
#define TFLITE_RETURN_IF_ABSL_ERROR(expr) \
do { \
if (const absl::Status val = (expr); !val.ok()) { \
return ConvertToTfLiteStatus(val); \
} \
} while (false)
#define TFLITE_RETURN_IF_ERROR(expr) \
do { \
if (const TfLiteStatus val = (expr); val != kTfLiteOk) { \
return val; \
} \
} while (false)
#define TFLITE_AHWB_AVAILABLE() \
::tflite::gpu::OptionalAndroidHardwareBuffer::Instance().Supported()
namespace tflite {
namespace gpu {
namespace {
using delegates::Serialization;
using delegates::SerializationParams;
using tflite::TFLITE_LOG_WARNING;
constexpr char kSerializedDataPrefix[] = "gpuv2_data_";
#if defined(__ANDROID__)
constexpr size_t kRequiredByteAlignment = 1;
constexpr size_t kRequiredBytePadding = 1;
#endif
InferencePriority ToPriority(int32_t priority) {
switch (priority) {
case TFLITE_GPU_INFERENCE_PRIORITY_AUTO:
return InferencePriority::AUTO;
case TFLITE_GPU_INFERENCE_PRIORITY_MAX_PRECISION:
return InferencePriority::MAX_PRECISION;
case TFLITE_GPU_INFERENCE_PRIORITY_MIN_LATENCY:
return InferencePriority::MIN_LATENCY;
case TFLITE_GPU_INFERENCE_PRIORITY_MIN_MEMORY_USAGE:
return InferencePriority::MIN_MEMORY_USAGE;
}
return InferencePriority::UNKNOWN;
}
InferenceUsage ToUsage(int32_t usage) {
switch (usage) {
case TFLITE_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER:
return InferenceUsage::FAST_SINGLE_ANSWER;
case TFLITE_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED:
return InferenceUsage::SUSTAINED_SPEED;
case TFLITE_GPU_INFERENCE_PREFERENCE_BALANCED:
return InferenceUsage::BALANCED;
}
return InferenceUsage::UNKNOWN;
}
bool ParseOptions(const char* const* options_keys,
const char* const* options_values, size_t num_options,
TfLiteGpuDelegateOptionsV2* options) {
for (size_t i = 0; i < num_options; ++i) {
if (strcmp(options_keys[i], "is_precision_loss_allowed")) {
if (!absl::SimpleAtoi(options_values[i],
&options->is_precision_loss_allowed)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: malformed option %s.",
options_keys[i]);
return false;
}
} else if (strcmp(options_keys[i], "inference_preference")) {
if (!absl::SimpleAtoi(options_values[i],
&options->inference_preference)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: malformed option %s.",
options_keys[i]);
return false;
}
} else if (strcmp(options_keys[i], "inference_priority1")) {
if (!absl::SimpleAtoi(options_values[i], &options->inference_priority1)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: malformed option %s.",
options_keys[i]);
return false;
}
} else if (strcmp(options_keys[i], "inference_priority2")) {
if (!absl::SimpleAtoi(options_values[i], &options->inference_priority2)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: malformed option %s.",
options_keys[i]);
return false;
}
} else if (strcmp(options_keys[i], "inference_priority3")) {
if (!absl::SimpleAtoi(options_values[i], &options->inference_priority3)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: malformed option %s.",
options_keys[i]);
return false;
}
} else if (strcmp(options_keys[i], "experimental_flags")) {
if (!absl::SimpleAtoi(options_values[i], &options->experimental_flags)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: malformed option %s.",
options_keys[i]);
return false;
}
} else if (strcmp(options_keys[i], "max_delegated_partitions")) {
if (!absl::SimpleAtoi(options_values[i],
&options->max_delegated_partitions)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: malformed option %s.",
options_keys[i]);
return false;
}
} else if (strcmp(options_keys[i], "serialization_dir")) {
options->serialization_dir = options_values[i];
} else if (strcmp(options_keys[i], "model_token")) {
options->model_token = options_values[i];
} else {
TFLITE_LOG(TFLITE_LOG_WARNING, "ParseOptions: unknown option %s.",
options_keys[i]);
return false;
}
}
return true;
}
TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate);
#if defined(__ANDROID__)
class DelegateAsyncKernel;
#endif
class Delegate {
public:
explicit Delegate(const TfLiteGpuDelegateOptionsV2* options, bool async)
: async_(async) {
telemetry_settings_ =
std::make_unique<TfLiteTelemetryGpuDelegateSettings>();
delegate_.data_ = reinterpret_cast<void*>(this);
delegate_.Prepare = DelegatePrepare;
delegate_.CopyFromBufferHandle = nullptr;
delegate_.CopyToBufferHandle = nullptr;
delegate_.FreeBufferHandle = nullptr;
delegate_.flags = kTfLiteDelegateFlagsPerOperatorProfiling;
options_ = options ? *options : TfLiteGpuDelegateOptionsV2Default();
if (options_.max_delegated_partitions <= 0) {
options_.max_delegated_partitions = 1;
}
if (options_.experimental_flags &
TFLITE_GPU_EXPERIMENTAL_FLAGS_ENABLE_SERIALIZATION &&
options_.model_token && options_.serialization_dir) {
SerializationParams params;
params.model_token = options_.model_token;
params.cache_dir = options_.serialization_dir;
serialization_ = std::make_unique<Serialization>(params);
telemetry_settings_ =
std::make_unique<TfLiteTelemetryGpuDelegateSettings>();
}
}
TfLiteDelegate* tflite_delegate() { return &delegate_; }
Serialization* serialization() { return serialization_.get(); }
const TfLiteGpuDelegateOptionsV2& options() const { return options_; }
bool async() const { return async_; }
bool IsQuantOpsAllowed() const {
return options_.experimental_flags &
TFLITE_GPU_EXPERIMENTAL_FLAGS_ENABLE_QUANT;
}
int MaxDelegatedPartitions() const {
return options_.max_delegated_partitions;
}
int num_delegate_kernels() const { return num_delegate_kernels_; }
TfLiteTelemetryGpuDelegateSettings* telemetry_settings() {
return telemetry_settings_.get();
}
private:
TfLiteDelegate delegate_;
TfLiteGpuDelegateOptionsV2 options_;
std::atomic<int> num_delegate_kernels_ = 0;
std::unique_ptr<Serialization> serialization_;
std::unique_ptr<TfLiteTelemetryGpuDelegateSettings> telemetry_settings_;
bool async_;
friend class DelegateKernelCore;
#if defined(__ANDROID__)
friend TfLiteRegistration CreateAsyncRegistration();
#endif
};
class DelegateKernelCore {
public:
explicit DelegateKernelCore(Delegate* delegate) : delegate_(delegate) {
++delegate_->num_delegate_kernels_;
telemetry_settings_ =
std::make_unique<TfLiteTelemetryGpuDelegateSettings>();
}
~DelegateKernelCore() { --delegate_->num_delegate_kernels_; }
bool enforce_same_thread() const { return enforce_same_thread_; }
const std::vector<int64_t>& input_indices() const { return input_indices_; }
const std::vector<int64_t>& output_indices() const { return output_indices_; }
const absl::flat_hash_map<int, int>& quant_conversion_map() const {
return quant_conversion_map_;
}
const std::unique_ptr<InferenceRunner>& runner() const { return runner_; }
absl::Status Setup(TfLiteContext* context,
const TfLiteDelegateParams* delegate_params);
private:
ObjectDef GetObjectDef(int index,
DataType data_type = DataType::FLOAT32) const;
absl::Status InitializeGraph(TfLiteContext* context,
const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph,
std::vector<uint32_t>* input_refs,
std::vector<uint32_t>* output_refs);
absl::Status InitializeOpenClApi(GraphFloat32* graph,
std::unique_ptr<InferenceBuilder>* builder,
bool* graph_is_destroyed,
TfLiteContext* context,
const TfLiteDelegateParams* delegate_params,
Serialization* serialization);
absl::Status InitializeOpenGlApi(GraphFloat32* graph,
std::unique_ptr<InferenceBuilder>* builder);
absl::Status MaybeInitializeSerializedOpenCL(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
std::unique_ptr<InferenceBuilder>* builder, cl::InferenceOptions* options,
cl::InferenceEnvironmentOptions* env_options,
cl::InferenceEnvironmentProperties* properties,
Serialization* serialization);
absl::Status SaveSerializedOpenCL(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
cl::InferenceOptions* options, Serialization* serialization,
const std::vector<uint8_t>& serialized_model);
Delegate* const delegate_;
std::unique_ptr<cl::InferenceEnvironment> cl_environment_;
#ifndef CL_DELEGATE_NO_GL
std::unique_ptr<gl::InferenceEnvironment> gl_environment_;
#endif
std::unique_ptr<InferenceRunner> runner_;
std::vector<int64_t> input_indices_;
std::vector<int64_t> output_indices_;
absl::flat_hash_map<int, int> quant_conversion_map_;
bool enforce_same_thread_ = false;
std::unique_ptr<TfLiteTelemetryGpuDelegateSettings> telemetry_settings_;
};
ObjectDef DelegateKernelCore::GetObjectDef(int index,
DataType data_type) const {
ObjectDef default_object_def;
default_object_def.data_type = data_type;
default_object_def.data_layout = DataLayout::BHWC;
default_object_def.object_type =
delegate_->async() ? ObjectType::OPENGL_SSBO : ObjectType::CPU_MEMORY;
default_object_def.user_provided = true;
return default_object_def;
}
absl::Status DelegateKernelCore::InitializeGraph(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
GraphFloat32* graph, std::vector<uint32_t>* input_refs,
std::vector<uint32_t>* output_refs) {
quant_conversion_map_.clear();
if (delegate_->IsQuantOpsAllowed()) {
RETURN_IF_ERROR(BuildFinalModel(context, delegate_params, graph,
&quant_conversion_map_));
} else {
RETURN_IF_ERROR(BuildFinalModel(context, delegate_params, graph));
}
const std::vector<Value*> inputs = graph->inputs();
input_refs->clear();
input_refs->reserve(delegate_params->input_tensors->size);
for (int i = 0, j = 0; i < delegate_params->input_tensors->size; ++i) {
const TfLiteTensor* tensor =
context->tensors + delegate_params->input_tensors->data[i];
if (tflite::IsConstantTensor(tensor)) continue;
input_refs->push_back(inputs[j]->tensor.ref);
++j;
}
const std::vector<Value*> outputs = graph->outputs();
output_refs->clear();
const int output_size = std::min(static_cast<int>(graph->outputs().size()),
delegate_params->output_tensors->size);
output_refs->reserve(output_size);
for (int i = 0; i < output_size; ++i) {
output_refs->push_back(outputs[i]->tensor.ref);
}
return absl::OkStatus();
}
absl::Status DelegateKernelCore::Setup(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params) {
GraphFloat32 graph;
std::vector<uint32_t> input_refs;
std::vector<uint32_t> output_refs;
RETURN_IF_ERROR(InitializeGraph(context, delegate_params, &graph, &input_refs,
&output_refs));
std::unique_ptr<InferenceBuilder> builder;
bool graph_is_destroyed;
bool backend_opencl = false;
const int experimental_flags = delegate_->options().experimental_flags;
if (experimental_flags & TFLITE_GPU_EXPERIMENTAL_FLAGS_CL_ONLY) {
RETURN_IF_ERROR(InitializeOpenClApi(&graph, &builder, &graph_is_destroyed,
context, delegate_params,
delegate_->serialization()));
backend_opencl = true;
} else if (experimental_flags & TFLITE_GPU_EXPERIMENTAL_FLAGS_GL_ONLY) {
RETURN_IF_ERROR(InitializeOpenGlApi(&graph, &builder));
} else {
absl::Status status =
InitializeOpenClApi(&graph, &builder, &graph_is_destroyed, context,
delegate_params, delegate_->serialization());
if (!status.ok()) {
TF_LITE_KERNEL_LOG(context, std::string(status.message()).c_str());
TF_LITE_KERNEL_LOG(context, "Falling back to OpenGL");
GraphFloat32 graph2;
if (graph_is_destroyed) {
RETURN_IF_ERROR(InitializeGraph(context, delegate_params, &graph2,
&input_refs, &output_refs));
}
RETURN_IF_ERROR(
InitializeOpenGlApi(graph_is_destroyed ? &graph2 : &graph, &builder));
} else {
backend_opencl = true;
}
}
telemetry_settings_->backend =
backend_opencl ? TfLiteTelemetryGpuDelegateSettings::OPENCL
: TfLiteTelemetryGpuDelegateSettings::OPENGL;
telemetry::TelemetryReportDelegateSettings(
context, "GpuDelegateKernel::Prepare",
telemetry::TelemetrySource::TFLITE_GPU, telemetry_settings_.get());
input_indices_.reserve(input_refs.size());
for (uint32_t tensor_index : input_refs) {
const int64_t object_index = input_indices_.size();
input_indices_.push_back(tensor_index);
const TfLiteTensor& tflite_tensor = context->tensors[tensor_index];
const DataType data_type = ToDataType(tflite_tensor.type);
RETURN_IF_ERROR(builder->SetInputObjectDef(
object_index, GetObjectDef(tensor_index, data_type)));
}
output_indices_.reserve(output_refs.size());
for (uint32_t tensor_index : output_refs) {
const int64_t object_index = output_indices_.size();
output_indices_.push_back(tensor_index);
const TfLiteTensor& tflite_tensor = context->tensors[tensor_index];
const DataType data_type = ToDataType(tflite_tensor.type);
RETURN_IF_ERROR(builder->SetOutputObjectDef(
object_index, GetObjectDef(tensor_index, data_type)));
}
return builder->Build(&runner_);
}
absl::Status DelegateKernelCore::InitializeOpenClApi(
GraphFloat32* graph, std::unique_ptr<InferenceBuilder>* builder,
bool* graph_is_destroyed, TfLiteContext* context,
const TfLiteDelegateParams* delegate_params,
Serialization* serialization = nullptr) {
*graph_is_destroyed = false;
cl::InferenceEnvironmentOptions env_options;
cl::InferenceEnvironmentProperties properties;
auto delegate_options = delegate_->options();
cl::InferenceOptions options;
if (delegate_options.is_precision_loss_allowed == -1) {
options.priority1 = ToPriority(delegate_options.inference_priority1);
options.priority2 = ToPriority(delegate_options.inference_priority2);
options.priority3 = ToPriority(delegate_options.inference_priority3);
} else {
if (delegate_options.is_precision_loss_allowed == 0) {
options.priority1 = InferencePriority::MAX_PRECISION;
} else {
options.priority1 = InferencePriority::MIN_LATENCY;
}
}
options.usage = ToUsage(delegate_options.inference_preference);
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
options.gpu_invoke_loop_times = delegate_options.gpu_invoke_loop_times;
#endif
if (!serialization) {
RETURN_IF_ERROR(cl::NewInferenceEnvironment(env_options, &cl_environment_,
&properties));
*graph_is_destroyed = true;
RETURN_IF_ERROR(cl_environment_->NewInferenceBuilder(
options, std::move(*graph), builder));
} else {
if (MaybeInitializeSerializedOpenCL(context, delegate_params, builder,
&options, &env_options, &properties,
serialization)
.ok()) {
return absl::OkStatus();
}
RETURN_IF_ERROR(cl::NewInferenceEnvironment(env_options, &cl_environment_,
&properties));
*graph_is_destroyed = true;
std::vector<uint8_t> serialized_model;
RETURN_IF_ERROR(cl_environment_->BuildSerializedModel(
options, std::move(*graph), &serialized_model));
RETURN_IF_ERROR(
cl_environment_->NewInferenceBuilder(serialized_model, builder));
RETURN_IF_ERROR(SaveSerializedOpenCL(context, delegate_params, &options,
serialization, serialized_model));
}
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"Initialized OpenCL-based API.");
return absl::OkStatus();
}
absl::Status DelegateKernelCore::InitializeOpenGlApi(
GraphFloat32* graph, std::unique_ptr<InferenceBuilder>* builder) {
#ifndef CL_DELEGATE_NO_GL
gl::InferenceEnvironmentOptions env_options;
gl::InferenceEnvironmentProperties properties;
RETURN_IF_ERROR(
NewInferenceEnvironment(env_options, &gl_environment_, &properties));
auto delegate_options = delegate_->options();
gl::InferenceOptions options;
options.usage = ToUsage(delegate_options.inference_preference);
if (delegate_options.is_precision_loss_allowed == -1) {
options.priority1 = ToPriority(delegate_options.inference_priority1);
options.priority2 = ToPriority(delegate_options.inference_priority2);
options.priority3 = ToPriority(delegate_options.inference_priority3);
} else {
if (delegate_options.is_precision_loss_allowed == 0) {
options.priority1 = InferencePriority::MAX_PRECISION;
} else {
options.priority1 = InferencePriority::MIN_LATENCY;
}
}
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
options.gpu_invoke_loop_times = delegate_options.gpu_invoke_loop_times;
#endif
RETURN_IF_ERROR(gl_environment_->NewInferenceBuilder(std::move(*graph),
options, builder));
enforce_same_thread_ = true;
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"Initialized OpenGL-based API.");
return absl::OkStatus();
#else
return absl::UnavailableError("OpenGL-based API disabled");
#endif
}
absl::Status DelegateKernelCore::MaybeInitializeSerializedOpenCL(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
std::unique_ptr<InferenceBuilder>* builder, cl::InferenceOptions* options,
cl::InferenceEnvironmentOptions* env_options,
cl::InferenceEnvironmentProperties* properties,
Serialization* serialization) {
if (!serialization) return absl::InvalidArgumentError("No serialization");
std::string options_fingerprint =
delegates::StrFingerprint(options, sizeof(cl::InferenceOptions));
auto data_key = serialization->GetEntryForKernel(
std::string(kSerializedDataPrefix) + options_fingerprint, context,
delegate_params);
std::string model_data;
auto model_data_status = data_key.GetData(context, &model_data);
if (model_data_status == kTfLiteOk) {
absl::Span<const uint8_t> model_span = absl::Span<const uint8_t>{
reinterpret_cast<const uint8_t*>(model_data.data()), model_data.size()};
RETURN_IF_ERROR(cl::NewInferenceEnvironment(*env_options, &cl_environment_,
properties));
RETURN_IF_ERROR(cl_environment_->NewInferenceBuilder(model_span, builder));
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"Initialized OpenCL-based API from serialized data.");
return absl::OkStatus();
}
return absl::NotFoundError("Serialization data not found");
}
absl::Status DelegateKernelCore::SaveSerializedOpenCL(
TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
cl::InferenceOptions* options, Serialization* serialization,
const std::vector<uint8_t>& serialized_model) {
if (!serialization) return absl::InvalidArgumentError("No serialization");
std::string options_fingerprint =
delegates::StrFingerprint(options, sizeof(cl::InferenceOptions));
auto data_key = serialization->GetEntryForKernel(
std::string(kSerializedDataPrefix) + options_fingerprint, context,
delegate_params);
auto save_status = data_key.SetData(
context, reinterpret_cast<const char*>(serialized_model.data()),
serialized_model.size());
if (save_status != kTfLiteOk) {
return absl::InvalidArgumentError("Failed to save serialized data");
}
return absl::OkStatus();
}
class DelegateKernel {
public:
explicit DelegateKernel(Delegate* delegate) : core_(delegate) {}
~DelegateKernel() = default;
absl::Status Prepare(TfLiteContext* context,
const TfLiteDelegateParams* delegate_params) {
thread_id_prepare_ = std::this_thread::get_id();
return core_.Setup(context, delegate_params);
}
absl::Status GetRequiredTemporaries(TfLiteContext* context, TfLiteNode* node,
TfLiteIntArray** temporaries_array_ptr) {
if (core_.quant_conversion_map().empty()) return absl::OkStatus();
std::vector<int> temporary_tensors;
for (auto index : core_.input_indices()) {
if (core_.quant_conversion_map().find(index) !=
core_.quant_conversion_map().end()) {
temporary_tensors.push_back(index);
}
}
for (auto index : core_.output_indices()) {
if (core_.quant_conversion_map().find(index) !=
core_.quant_conversion_map().end()) {
temporary_tensors.push_back(index);
}
}
*temporaries_array_ptr = TfLiteIntArrayCreate(temporary_tensors.size());
for (int i = 0; i < temporary_tensors.size(); ++i) {
(*temporaries_array_ptr)->data[i] = temporary_tensors[i];
}
return absl::OkStatus();
}
absl::Status Invoke(TfLiteContext* context) {
if (thread_id_prepare_ != std::this_thread::get_id()) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"GpuDelegate invoke thread != prepare thread");
if (core_.enforce_same_thread()) {
return absl::FailedPreconditionError(
"GpuDelegate must run on the same thread where it was "
"initialized.");
}
}
const bool is_dequant_required = !core_.qu | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Delegate, CreateWithoutParams) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
}
TEST(Delegate, CreateWithDefaultParams) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
}
TEST(Delegate, CreateWithNumThreadsParam) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
}
TEST(Delegate, GetThreadPool) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
pthreadpool_t threadpool = static_cast<pthreadpool_t>(
TfLiteXNNPackDelegateGetThreadPool(xnnpack_delegate.get()));
ASSERT_TRUE(threadpool);
ASSERT_EQ(2, pthreadpool_get_threads_count(threadpool));
}
}
} |
979 | cpp | tensorflow/tensorflow | allowlisted_flex_ops | tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops.cc | tensorflow/lite/delegates/flex/allowlisted_flex_ops_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_ALLOWLISTED_FLEX_OPS_H_
#define TENSORFLOW_LITE_DELEGATES_FLEX_ALLOWLISTED_FLEX_OPS_H_
#include <set>
#include <string>
namespace tflite {
namespace flex {
bool IsAllowlistedFlexOp(const std::string& tensorflow_op_name);
const std::set<std::string>& GetFlexAllowlist();
const std::set<std::string>& GetTFTextFlexAllowlist();
const std::set<std::string>& GetSentencePieceFlexAllowlist();
}
}
#endif
#include "tensorflow/lite/delegates/flex/allowlisted_flex_ops.h"
#include <set>
#include <string>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/lite/delegates/flex/allowlisted_flex_ops_internal.h"
namespace tflite {
namespace flex {
const std::set<std::string>& GetFlexAllowlist() {
static const std::set<std::string>* allowlisted_flex_ops =
new std::set<std::string>({
"Abort",
"Abs",
"Add",
"AddN",
"AddV2",
"AdjustContrast",
"AdjustContrastv2",
"AdjustHue",
"AdjustSaturation",
"All",
"Angle",
"Any",
"ApplyAdaMax",
"ApplyAdadelta",
"ApplyAdagrad",
"ApplyAdagradDA",
"ApplyAdagradV2",
"ApplyAdam",
"ApplyAddSign",
"ApplyCenteredRMSProp",
"ApplyFtrl",
"ApplyFtrlV2",
"ApplyGradientDescent",
"ApplyMomentum",
"ApplyPowerSign",
"ApplyProximalAdagrad",
"ApplyProximalGradientDescent",
"ApplyRMSProp",
"ApproximateEqual",
"ArgMax",
"ArgMin",
"AsString",
"Assert",
"Assign",
"AssignAdd",
"AssignAddVariableOp",
"AssignSub",
"AssignSubVariableOp",
"AssignVariableOp",
"Atan",
"Atan2",
"AudioSpectrogram",
"AvgPool",
"AvgPool3D",
"AvgPool3DGrad",
"AvgPoolGrad",
"BatchCholesky",
"BatchDatasetV2",
"BatchMatMul",
"BatchMatMulV2",
"BatchMatrixBandPart",
"BatchMatrixDeterminant",
"BatchMatrixDiag",
"BatchMatrixDiagPart",
"BatchMatrixInverse",
"BatchMatrixSetDiag",
"BatchMatrixTriangularSolve",
"BatchNormWithGlobalNormalization",
"BatchNormWithGlobalNormalizationGrad",
"BatchToSpace",
"BatchToSpaceND",
"BiasAdd",
"BiasAddGrad",
"BiasAddV1",
"Bincount",
"Bitcast",
"BitwiseAnd",
"BitwiseOr",
"BitwiseXor",
"BroadcastArgs",
"BroadcastGradientArgs",
"BroadcastTo",
"Bucketize",
"CTCBeamSearchDecoder",
"CTCGreedyDecoder",
"Case",
"Cast",
"Ceil",
"CheckNumerics",
"CheckNumericsV2",
"Cholesky",
"ClipByValue",
"CombinedNonMaxSuppression",
"Complex",
"ComplexAbs",
"Concat",
"ConcatOffset",
"ConcatV2",
"Conj",
"ConjugateTranspose",
"Const",
"ControlTrigger",
"Conv2D",
"Conv2DBackpropFilter",
"Conv2DBackpropInput",
"Conv3D",
"Conv3DBackpropFilter",
"Conv3DBackpropFilterV2",
"Conv3DBackpropInput",
"Conv3DBackpropInputV2",
"Cos",
"Cosh",
"CropAndResize",
"CropAndResizeGradBoxes",
"CropAndResizeGradImage",
"Cumprod",
"Cumsum",
"CumulativeLogsumexp",
"DataFormatDimMap",
"DataFormatVecPermute",
"DebugGradientIdentity",
"DebugGradientRefIdentity",
"DecodeAndCropJpeg",
"DecodeBase64",
"DecodeBmp",
"DecodeGif",
"DecodeImage",
"DecodeJpeg",
"DecodePaddedRaw",
"DecodePng",
"DecodeRaw",
"DecodeWav",
"DeepCopy",
"DeleteSessionTensor",
"DenseBincount",
"DenseToDenseSetOperation",
"DenseToSparseSetOperation",
"DepthToSpace",
"DepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropFilter",
"DepthwiseConv2dNativeBackpropInput",
"Dequantize",
"DestroyResourceOp",
"DestroyTemporaryVariable",
"Diag",
"DiagPart",
"Dilation2D",
"Dilation2DBackpropFilter",
"Dilation2DBackpropInput",
"Div",
"DivNoNan",
"DynamicPartition",
"DynamicStitch",
"Einsum",
"Elu",
"EluGrad",
"Empty",
"EmptyTensorList",
"EmptyTensorMap",
"EncodeBase64",
"EncodeJpeg",
"EncodeJpegVariableQuality",
"EncodePng",
"EncodeWav",
"EnsureShape",
"Enter",
"Equal",
"Erf",
"Exit",
"Exp",
"ExpandDims",
"ExtractImagePatches",
"FFT",
"FFT2D",
"FFT3D",
"FIFOQueue",
"FIFOQueueV2",
"FakeQuantWithMinMaxArgs",
"FakeQuantWithMinMaxArgsGradient",
"FakeQuantWithMinMaxVars",
"FakeQuantWithMinMaxVarsGradient",
"FakeQuantWithMinMaxVarsPerChannel",
"FakeQuantWithMinMaxVarsPerChannelGradient",
"FakeQueue",
"Fill",
"FilterDataset",
"FinalizeDataset",
"Fingerprint",
"FlatMapDataset",
"Floor",
"FloorDiv",
"FloorMod",
"FusedBatchNorm",
"FusedBatchNormGrad",
"FusedBatchNormGradV2",
"FusedBatchNormGradV3",
"FusedBatchNormV2",
"FusedBatchNormV3",
"FusedPadConv2D",
"FusedResizeAndPadConv2D",
"Gather",
"GatherNd",
"GatherV2",
"GetSessionHandle",
"GetSessionHandleV2",
"GetSessionTensor",
"Greater",
"GreaterEqual",
"HSVToRGB",
"HashTable",
"HashTableV2",
"HistogramSummary",
"IFFT",
"IFFT2D",
"IFFT3D",
"IRFFT",
"IRFFT2D",
"IRFFT3D",
"Identity",
"IdentityN",
"Imag",
"ImageProjectiveTransformV2",
"ImageProjectiveTransformV3",
"ImmutableConst",
"InTopK",
"InTopKV2",
"InitializeTable",
"InitializeTableFromDataset",
"InitializeTableFromTextFile",
"InitializeTableFromTextFileV2",
"InitializeTableV2",
"InplaceAdd",
"InplaceSub",
"InplaceUpdate",
"Inv",
"InvGrad",
"Invert",
"InvertPermutation",
"IsFinite",
"IsNan",
"IsVariableInitialized",
"LRN",
"LeakyRelu",
"LeakyReluGrad",
"LeftShift",
"Less",
"LessEqual",
"LinSpace",
"ListDiff",
"Log",
"LogMatrixDeterminant",
"LogSoftmax",
"LogicalAnd",
"LogicalNot",
"LogicalOr",
"LookupTableExport",
"LookupTableExportV2",
"LookupTableFind",
"LookupTableFindV2",
"LookupTableImport",
"LookupTableImportV2",
"LookupTableInsert",
"LookupTableInsertV2",
"LookupTableRemoveV2",
"LookupTableSize",
"LookupTableSizeV2",
"LoopCond",
"MapDataset",
"MatMul",
"MatrixBandPart",
"MatrixDeterminant",
"MatrixDiag",
"MatrixDiagPart",
"MatrixDiagPartV2",
"MatrixDiagPartV3",
"MatrixDiagV2",
"MatrixDiagV3",
"MatrixInverse",
"MatrixSetDiag",
"MatrixSetDiagV2",
"MatrixSetDiagV3",
"MatrixTriangularSolve",
"Max",
"MaxPool",
"MaxPool3D",
"MaxPool3DGrad",
"MaxPool3DGradGrad",
"MaxPoolGrad",
"MaxPoolGradGrad",
"MaxPoolGradGradV2",
"MaxPoolGradV2",
"MaxPoolGradWithArgmax",
"MaxPoolV2",
"MaxPoolWithArgmax",
"Maximum",
"Mean",
"Merge",
"MergeSummary",
"MergeV2Checkpoints",
"Mfcc",
"Min",
"Minimum",
"MirrorPad",
"MirrorPadGrad",
"ModelDataset",
"Mul",
"MulNoNan",
"Multinomial",
"MutableDenseHashTable",
"MutableDenseHashTableV2",
"MutableHashTable",
"MutableHashTableOfTensors",
"MutableHashTableOfTensorsV2",
"MutableHashTableV2",
"Neg",
"NextIteration",
"NoOp",
"NonMaxSuppression",
"NonMaxSuppressionV2",
"NonMaxSuppressionV3",
"NonMaxSuppressionV4",
"NonMaxSuppressionV5",
"NonMaxSuppressionWithOverlaps",
"NotEqual",
"OneHot",
"OnesLike",
"OptimizeDatasetV2",
"OptionalFromValue",
"OptionalGetValue",
"OptionalHasValue",
"OptionalNone",
"Pack",
"Pad",
"PadV2",
"PaddingFIFOQueue",
"PaddingFIFOQueueV2",
"ParallelConcat",
"ParallelDynamicStitch",
"ParseExample",
"ParseExampleV2",
"ParseSequenceExample",
"ParseSequenceExampleV2",
"ParseSingleExample",
"ParseSingleSequenceExample",
"Placeholder",
"PlaceholderV2",
"PlaceholderWithDefault",
"PopulationCount",
"Pow",
"PreventGradient",
"Print",
"PrintV2",
"Prod",
"Qr",
"QuantizeDownAndShrinkRange",
"QuantizeV2",
"QuantizedAdd",
"QuantizedAvgPool",
"QuantizedBatchNormWithGlobalNormalization",
"QuantizedBiasAdd",
"QuantizedConcat",
"QuantizedConv2D",
"QuantizedInstanceNorm",
"QuantizedMatMul",
"QuantizedMaxPool",
"QuantizedMul",
"QuantizedRelu",
"QuantizedRelu6",
"QuantizedReshape",
"QuantizedResizeBilinear",
"QueueClose",
"QueueCloseV2",
"QueueDequeue",
"QueueDequeueMany",
"QueueDequeueManyV2",
"QueueDequeueUpTo",
"QueueDequeueUpToV2",
"QueueDequeueV2",
"QueueEnqueue",
"QueueEnqueueMany",
"QueueEnqueueManyV2",
"QueueEnqueueV2",
"QueueIsClosed",
"QueueIsClosedV2",
"QueueSize",
"QueueSizeV2",
"RFFT",
"RFFT2D",
"RFFT3D",
"RGBToHSV",
"RaggedBincount",
"RaggedGather",
"RaggedRange",
"RaggedTensorFromVariant",
"RaggedTensorToSparse",
"RaggedTensorToTensor",
"RaggedTensorToVariant",
"RaggedTensorToVariantGradient",
"RandomGamma",
"RandomPoisson",
"RandomPoissonV2",
"RandomShuffle",
"RandomStandardNormal",
"RandomUniform",
"RandomUniformInt",
"Range",
"Rank",
"ReadFile",
"ReadVariableOp",
"Real",
"RealDiv",
"Reciprocal",
"ReciprocalGrad",
"Recv",
"ReduceDataset",
"ReduceJoin",
"RefEnter",
"RefExit",
"RefIdentity",
"RefMerge",
"RefNextIteration",
"RefSelect",
"RefSwitch",
"RegexFullMatch",
"RegexReplace",
"Relu",
"Relu6",
"Relu6Grad",
"ReluGrad",
"RemoteCall",
"RepeatDataset",
"RequantizationRange",
"Requantize",
"Reshape",
"ResizeBicubic",
"ResizeBicubicGrad",
"ResizeBilinear",
"ResizeBilinearGrad",
"ResizeNearestNeighbor",
"ResizeNearestNeighborGrad",
"ResourceApplyAdaMax",
"ResourceApplyAdadelta",
"ResourceApplyAdagrad",
"ResourceApplyAdagradDA",
"ResourceApplyAdagradV2",
"ResourceApplyAdam",
"ResourceApplyAdamWithAmsgrad",
"ResourceApplyAddSign",
"ResourceApplyCenteredRMSProp",
"ResourceApplyFtrl",
"ResourceApplyFtrlV2",
"ResourceApplyGradientDescent",
"ResourceApplyKerasMomentum",
"ResourceApplyMomentum",
"ResourceApplyPowerSign",
"ResourceApplyProximalAdagrad",
"ResourceApplyProximalGradientDescent",
"ResourceApplyRMSProp",
"ResourceGather",
"ResourceGatherNd",
"ResourceScatterAdd",
"ResourceScatterDiv",
"ResourceScatterMax",
"ResourceScatterMin",
"ResourceScatterMul",
"ResourceScatterNdAdd",
"ResourceScatterNdMax",
"ResourceScatterNdMin",
"ResourceScatterNdSub",
"ResourceScatterNdUpdate",
"ResourceScatterSub",
"ResourceScatterUpdate",
"ResourceSparseApplyAdadelta",
"ResourceSparseApplyAdagrad",
"ResourceSparseApplyAdagradDA",
"ResourceSparseApplyAdagradV2",
"ResourceSparseApplyCenteredRMSProp",
"ResourceSparseApplyFtrl",
"ResourceSparseApplyFtrlV2",
"ResourceSparseApplyKerasMomentum",
"ResourceSparseApplyMomentum",
"ResourceSparseApplyProximalAdagrad",
"ResourceSparseApplyProximalGradientDescent",
"ResourceSparseApplyRMSProp",
"ResourceStridedSliceAssign",
"Restore",
"RestoreSlice",
"RestoreV2",
"Reverse",
"ReverseSequence",
"ReverseV2",
"RightShift",
"Roll",
"Round",
"Rsqrt",
"RsqrtGrad",
"SampleDistortedBoundingBox",
"SampleDistortedBoundingBoxV2",
"Save",
"SaveSlices",
"SaveV2",
"ScalarSummary",
"ScatterNd",
"ScatterNdAdd",
"ScatterNdMax",
"ScatterNdMin",
"ScatterNdNonAliasingAdd",
"ScatterNdSub",
"ScatterNdUpdate",
"SegmentMax",
"SegmentMean",
"SegmentMin",
"SegmentProd",
"SegmentSum",
"Select",
"SelectV2",
"Selu",
"SeluGrad",
"Send",
"SerializeTensor",
"Shape",
"ShapeN",
"ShardedFilename",
"ShardedFilespec",
"Sigmoid",
"SigmoidGrad",
"Sign",
"Sin",
"Sinh",
"Size",
"Slice",
"Softmax",
"SoftmaxCrossEntropyWithLogits",
"Softplus",
"SoftplusGrad",
"Softsign",
"SoftsignGrad",
"SpaceToBatch",
"SpaceToBatchND",
"SpaceToDepth",
"SparseAdd",
"SparseApplyAdadelta",
"SparseApplyAdagrad",
"SparseApplyAdagradDA",
"SparseApplyAdagradV2",
"SparseApplyCenteredRMSProp",
"SparseApplyFtrl",
"SparseApplyFtrlV2",
"SparseApplyMomentum",
"SparseApplyProximalAdagrad",
"SparseApplyProximalGradientDescent",
"SparseApplyRMSProp",
"SparseBincount",
"SparseCross",
"SparseCrossHashed",
"SparseCrossV2",
"SparseFillEmptyRows",
"SparseFillEmptyRowsGrad",
"SparseReduceSum",
"SparseReorder",
"SparseReshape",
"SparseSegmentMean",
"SparseSegmentMeanGrad",
"SparseSegmentMeanWithNumSegments",
"SparseSegmentSqrtN",
"SparseSegmentSqrtNGrad",
"SparseSegmentSqrtNWithNumSegments",
"SparseSegmentSum",
"SparseSegmentSumGrad",
"SparseSegmentSumWithNumSegments",
"SparseSlice",
"SparseSoftmaxCrossEntropyWithLogits",
"SparseTensorDenseMatMul",
"SparseToDense",
"SparseToSparseSetOperation",
"Split",
"SplitV",
"Sqrt",
"SqrtGrad",
"Square",
"SquaredDifference",
"Squeeze",
"Stack",
"StackClose",
"StackCloseV2",
"StackPop",
"StackPopV2",
"StackPush",
"StackPushV2",
"StackV2",
"StatelessMultinomial",
"StatelessRandomGammaV2",
"StatelessRandomGammaV3",
"StatelessRandomGetAlg",
"StatelessRandomGetKeyCounter",
"StatelessRandomGetKeyCounterAlg",
"StatelessRandomNormal",
"StatelessRandomNormalV2",
"StatelessRandomPoisson",
"StatelessRandomUniform",
"StatelessRandomUniformFullInt",
"StatelessRandomUniformFullIntV2",
"StatelessRandomUniformInt",
"StatelessRandomUniformIntV2",
"StatelessRandomUniformV2",
"StatelessSampleDistortedBoundingBox",
"StatelessTruncatedNormal",
"StatelessTruncatedNormalV2",
"StaticRegexFullMatch",
"StaticRegexReplace",
"StopGradient",
"StridedSlice",
"StridedSliceAssign",
"StridedSliceGrad",
"StringFormat",
"StringJoin",
"StringLength",
"StringLower",
"StringSplit",
"StringSplitV2",
"StringStrip",
"StringToHashBucket",
"StringToHashBucketFast",
"StringToHashBucketStrong",
"StringToNumber",
"Sub",
"Substr",
"Sum",
"Switch",
"SymbolicGradient",
"TakeDataset",
"TakeWhileDataset",
"Tan",
"Tanh",
"TanhGrad",
"TemporaryVariable",
"TensorArray",
"TensorArrayClose",
"TensorArrayCloseV2",
"TensorArrayCloseV3",
"TensorArrayConcat",
"TensorArrayConcatV2",
"TensorArrayConcatV3",
"TensorArrayGather",
"TensorArrayGatherV2",
"TensorArrayGatherV3",
"TensorArrayGrad",
"TensorArrayGradV2",
"TensorArrayGradV3",
"TensorArrayGradWithShape",
"TensorArrayPack",
"TensorArrayRead",
"TensorArrayReadV2",
"TensorArrayReadV3",
"TensorArrayScatter",
"TensorArrayScatterV2",
"TensorArrayScatterV3",
"TensorArraySize",
"TensorArraySizeV2",
"TensorArraySizeV3",
"TensorArraySplit",
"TensorArraySplitV2",
"TensorArraySplitV3",
"TensorArrayUnpack",
"TensorArrayV2",
"TensorArrayV3",
"TensorArrayWrite",
"TensorArrayWriteV2",
"TensorArrayWriteV3",
"TensorListConcat",
"TensorListConcatLists",
"TensorListConcatV2",
"TensorListElementShape",
"TensorListFromTensor",
"TensorListGather",
"TensorListGetItem",
"TensorListLength",
"TensorListPopBack",
"TensorListPushBack",
"TensorListPushBackBatch",
"TensorListReserve",
"TensorListResize",
"TensorListScatter",
"TensorListScatterIntoExistingList",
"TensorListScatterV2",
"TensorListSetItem",
"TensorListSplit",
"TensorListStack",
"TensorMapErase",
"TensorMapHasKey",
"TensorMapInsert",
"TensorMapLookup",
"TensorMapSize",
"TensorMapStackKeys",
"TensorScatterAdd",
"TensorScatterMax",
"TensorScatterMin",
"TensorScatterSub",
"TensorScatterUpdate",
"TensorSliceDataset",
"TensorStridedSliceUpdate",
"Tile",
"TileGrad",
"Timestamp",
"TopK",
"TopKV2",
"Transpose",
"TruncateDiv",
"TruncatedNormal",
"UnicodeDecode",
"UnicodeDecodeWithOffsets",
"UnicodeEncode",
"UnicodeTranscode",
"Unique",
"UniqueV2",
"UniqueWithCounts",
"UniqueWithCountsV2",
"Unpack",
"UnsortedSegmentJoin",
"UnsortedSegmentMax",
"UnsortedSegmentMin",
"UnsortedSegmentProd",
"UnsortedSegmentSum",
"UnwrapDatasetVariant",
"UpperBound",
"VarHandleOp",
"VarIsInitializedOp",
"Variable",
"VariableShape",
"VariableV2",
"Where",
"WrapDatasetVariant",
"WriteFile",
"Xdivy",
"Xlog1py",
"Xlogy",
"ZerosLike",
"_Arg",
"_ArrayToList",
"_DeviceArg",
"_DeviceRetval",
"_FusedConv2D",
"_HostCast",
"_HostRecv",
"_HostSend",
"_ListToArray",
"_ParallelConcatStart",
"_ParallelConcatUpdate",
"_ReadVariablesOp",
"_Recv",
"_Retval",
"_Send",
"_SwitchN",
"_VarHandlesOp",
});
return *allowlisted_flex_ops;
}
const std::set<std::string>& GetTFTextFlexAllowlist() {
static const std::set<std::string>* tftext_flex_ops =
new std::set<std::string>({
"CaseFoldUTF8",
"ConstrainedSequence",
"MaxSpanningTree",
"NormalizeUTF8",
"NormalizeUTF8WithOffsetsMap",
"RegexSplitWithOffsets",
"RougeL",
"SentenceFragments",
"SentencepieceOp",
"SentencepieceTokenizeOp",
"SentencepieceTokenizeWithOffsetsOp",
"SentencepieceDetokenizeOp",
"SentencepieceVocabSizeOp",
"SplitMergeTokenizeWithOffsets",
"TFText>NgramsStringJoin",
"TFText>WhitespaceTokenizeWithOffsetsV2",
"TokenizerFromLogits",
"UnicodeScriptTokenizeWithOffsets",
"WhitespaceTokenizeWithOffsets",
"WordpieceTokenizeWithOffsets",
});
return *tftext_flex_ops;
}
bool IsAllowedTFTextOpForFlex(const std::string& op_name) {
if (GetTFTextFlexAllowlist().count(op_name) == 0) return false;
return tensorflow::OpRegistry::Global()->LookUp(op_name) != nullptr;
}
const std::set<std::string>& GetSentencePieceFlexAllowlist() {
static const std::set<std::string>* sentencepiece_flex_ops =
new std::set<std::string>({
"SentencepieceGetPieceSize",
"SentencepiecePieceToId",
"SentencepieceIdToPiece",
"SentencepieceEncodeDense",
"SentencepieceEncodeSparse",
"SentencepieceDecode",
});
return *sentencepiece_flex_ops;
}
bool IsAllowedSentencePieceOpForFlex(const std::string& op_name) {
if (GetSentencePieceFlexAllowlist().count(op_name) == 0) return false;
return tensorflow::OpRegistry::Global()->LookUp(op_name) != nullptr;
}
bool IsAllowlistedFlexOp(const std::string& tensorflow_op_name) {
if (GetFlexAllowlist().count(tensorflow_op_name) != 0) return true;
return IsAllowedTFTextOpForFlex(tensorflow_op_name) ||
IsAllowedSentencePieceOpForFlex(tensorflow_op_name);
}
}
} | #include "tensorflow/lite/delegates/flex/allowlisted_flex_ops.h"
#include <set>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/lite/delegates/flex/allowlisted_flex_ops_internal.h"
namespace tflite {
namespace flex {
std::set<std::string> GetAllCpuKernels() {
auto is_cpu_kernel = [](const tensorflow::KernelDef& def) {
return (def.device_type() == "CPU" || def.device_type() == "DEFAULT");
};
tensorflow::KernelList kernel_list =
tensorflow::GetFilteredRegisteredKernels(is_cpu_kernel);
std::set<std::string> result;
for (int i = 0; i < kernel_list.kernel_size(); ++i) {
tensorflow::KernelDef kernel_def = kernel_list.kernel(i);
result.insert(kernel_def.op());
}
return result;
}
TEST(AllowlistedFlexOpsTest, EveryOpHasKernel) {
const std::set<std::string>& allowlist = GetFlexAllowlist();
std::set<std::string> all_kernels = GetAllCpuKernels();
for (const std::string& op_name : allowlist) {
EXPECT_EQ(all_kernels.count(op_name), 1)
<< op_name << " op is added to flex allowlist "
<< "but its kernel is not found.";
}
}
TEST(TfTextUtilsTest, TestFlexOpAllowed) {
EXPECT_FALSE(IsAllowedTFTextOpForFlex("ConstrainedSequence"));
}
TEST(TfTextUtilsTest, TestFlexOpNotAllowed) {
EXPECT_FALSE(IsAllowedTFTextOpForFlex("ngrams"));
}
}
} |
980 | cpp | tensorflow/tensorflow | delegate_data | tensorflow/lite/delegates/flex/delegate_data.cc | tensorflow/lite/delegates/flex/delegate_data_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_DATA_H_
#define TENSORFLOW_LITE_DELEGATES_FLEX_DELEGATE_DATA_H_
#include <functional>
#include <string>
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/flex/buffer_map.h"
#include "tensorflow/lite/delegates/flex/subgraph_resource.h"
namespace tflite {
namespace flex {
class DelegateData {
public:
DelegateData();
~DelegateData();
tensorflow::Status Prepare(const tensorflow::SessionOptions& session_options,
Subgraph* main_subgraph = nullptr,
TfLiteDelegate* flex_delegate = nullptr);
tensorflow::EagerContext* GetEagerContext() { return eager_context_; }
tensorflow::CancellationManager* GetCancellationManager() {
return cancellation_manager_;
}
void SetCancellationManager(
tensorflow::CancellationManager* cancellation_manager) {
cancellation_manager_ = cancellation_manager;
}
BufferMap* GetBufferMap(const TfLiteContext* context) {
return &buffer_map_[context];
}
std::map<int, int>* GetTensorReleaseMap(const TfLiteContext* context) {
return &tensor_release_map_[context];
}
private:
tensorflow::EagerContext* eager_context_ = nullptr;
tensorflow::CancellationManager* cancellation_manager_ = nullptr;
std::unordered_map<const TfLiteContext*, BufferMap> buffer_map_;
std::unordered_map<const TfLiteContext*, std::map<int, int>>
tensor_release_map_;
};
tensorflow::Status RegisterFunctionDefForSubgraphs(
Subgraph& main_subgraph,
const std::function<tensorflow::Status(
const std::vector<std::unique_ptr<Subgraph>>&,
std::set<std::string>* result)>& select_subgraphs_to_register,
tensorflow::ResourceMgr* resource_mgr,
tensorflow::EagerContext* eager_context, TfLiteDelegate* flex_delegate);
}
}
#endif
#include "tensorflow/lite/delegates/flex/delegate_data.h"
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
namespace {
void BuildFunctionDefProto(const std::string& function_name,
const Subgraph& subgraph,
tensorflow::FunctionDef& fdef) {
std::vector<std::string> inputs, outputs;
inputs.reserve(subgraph.inputs().size());
outputs.reserve(subgraph.outputs().size());
for (int i = 0; i < subgraph.inputs().size(); ++i) {
inputs.push_back(absl::StrCat(
"args_", i, ": ",
TfLiteTypeToTfTypeName(subgraph.tensor(subgraph.inputs()[i])->type)));
}
for (int i = 0; i < subgraph.outputs().size(); ++i) {
outputs.push_back(absl::StrCat(
"res_", i, ": ",
TfLiteTypeToTfTypeName(subgraph.tensor(subgraph.outputs()[i])->type)));
}
std::vector<tensorflow::FunctionDefHelper::Node> nodes;
nodes.push_back(tensorflow::FunctionDefHelper::Const<tensorflow::tstring>(
"SubgraphResourceKey", function_name));
tensorflow::FunctionDefHelper::Node execute_node;
execute_node.ret.push_back("InvokeTfLite");
execute_node.op = "TfLiteSubgraphExecute";
execute_node.arg.push_back("SubgraphResourceKey:output:0");
for (int i = 0; i < subgraph.inputs().size(); ++i) {
execute_node.arg.push_back(absl::StrCat("args_", i));
}
nodes.push_back(execute_node);
std::vector<std::pair<std::string, std::string>> ret_def;
ret_def.reserve(subgraph.outputs().size());
for (int i = 0; i < subgraph.outputs().size(); ++i) {
ret_def.emplace_back(absl::StrCat("res_", i),
absl::StrCat("InvokeTfLite:output:", i));
}
fdef = tensorflow::FunctionDefHelper::Create(function_name, inputs, outputs,
{}, nodes, ret_def);
tensorflow::AttrValue tin_attrs, tout_attrs;
for (int i = 0; i < subgraph.inputs().size(); ++i) {
TF_DataType dtype = tflite::flex::GetTensorFlowDataType(
subgraph.tensor(subgraph.inputs()[i])->type);
tin_attrs.mutable_list()->add_type(tensorflow::DataType(dtype));
}
for (int i = 0; i < subgraph.outputs().size(); ++i) {
TF_DataType dtype = tflite::flex::GetTensorFlowDataType(
subgraph.tensor(subgraph.outputs()[i])->type);
tout_attrs.mutable_list()->add_type(tensorflow::DataType(dtype));
}
fdef.mutable_node_def(1)->mutable_attr()->insert({"Tin", tin_attrs});
fdef.mutable_node_def(1)->mutable_attr()->insert({"Tout", tout_attrs});
}
tensorflow::Status GetSubgraphNamesForFunctionExecution(
const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
tensorflow::NodeDef node_def;
for (const auto& subgraph : subgraphs) {
for (const auto& node_and_reg : subgraph->nodes_and_registration()) {
if (node_and_reg.second.builtin_code != tflite::BuiltinOperator_CUSTOM) {
continue;
}
const std::string custom_name = node_and_reg.second.custom_name;
if (custom_name.substr(0, strlen(tflite::kFlexCustomCodePrefix)) !=
tflite::kFlexCustomCodePrefix) {
continue;
}
const flexbuffers::Vector& v =
flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(
node_and_reg.first.custom_initial_data),
node_and_reg.first.custom_initial_data_size)
.AsVector();
if (!node_def.ParseFromString(v[1].AsString().str())) {
return tensorflow::Status(absl::StatusCode::kInternal,
"could not parse NodeDef");
}
for (const auto& attr : node_def.attr()) {
if (attr.second.has_func()) {
result->insert(attr.second.func().name());
}
}
}
}
return absl::OkStatus();
}
}
tensorflow::Status RegisterFunctionDefForSubgraphs(
Subgraph& main_subgraph,
const std::function<tensorflow::Status(
const std::vector<std::unique_ptr<Subgraph>>&, std::set<std::string>*)>&
select_subgraphs_to_register,
tensorflow::ResourceMgr* resource_mgr,
tensorflow::EagerContext* eager_context, TfLiteDelegate* flex_delegate) {
std::vector<std::unique_ptr<Subgraph>>* subgraphs =
main_subgraph.GetSubgraphs();
if (!subgraphs) {
return absl::OkStatus();
}
std::set<std::string> function_subgraphs;
TF_RETURN_IF_ERROR(
select_subgraphs_to_register(*subgraphs, &function_subgraphs));
for (int i = 0; i < subgraphs->size(); ++i) {
if (subgraphs->at(i)->GetName() == "main") {
continue;
}
const std::string subgraph_name = subgraphs->at(i)->GetName();
if (!function_subgraphs.count(subgraph_name)) {
continue;
}
auto* subgraph_resource =
new TFLiteSubgraphResource(*(subgraphs->at(i)), flex_delegate);
TF_RETURN_IF_ERROR(resource_mgr->Create<TFLiteSubgraphResource>(
"flex", subgraph_name, subgraph_resource));
tensorflow::FunctionDef fdef;
BuildFunctionDefProto(subgraph_name, *(subgraphs->at(i)), fdef);
TF_RETURN_IF_ERROR(eager_context->AddFunctionDef(fdef));
}
return absl::OkStatus();
}
DelegateData::DelegateData() {}
DelegateData::~DelegateData() {
if (eager_context_) {
eager_context_->HostCPU()->ClearResourceMgr();
eager_context_->Unref();
}
}
tensorflow::Status DelegateData::Prepare(
const tensorflow::SessionOptions& session_options, Subgraph* main_subgraph,
TfLiteDelegate* flex_delegate) {
if (eager_context_) {
return tensorflow::Status();
}
if (flex_delegate == nullptr && main_subgraph != nullptr) {
return tensorflow::Status(
absl::StatusCode::kFailedPrecondition,
"flex_delegate must be non-null when main_subgraph is provided.");
}
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto device_mgr =
std::make_unique<tensorflow::StaticDeviceMgr>(std::move(devices));
auto rendezvous = tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
eager_context_ = new tensorflow::EagerContext(
session_options,
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr.release(), true,
std::move(rendezvous), nullptr);
if (main_subgraph) {
TF_RETURN_IF_ERROR(RegisterFunctionDefForSubgraphs(
*main_subgraph, GetSubgraphNamesForFunctionExecution,
eager_context_->HostCPU()->resource_manager(), eager_context_,
flex_delegate));
}
return tensorflow::Status();
}
}
} | #include "tensorflow/lite/delegates/flex/delegate_data.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace flex {
namespace {
using ::tensorflow::protobuf::TextFormat;
using ::tensorflow::protobuf::util::MessageDifferencer;
TEST(DelegateDataTest, Basic) {
DelegateData data;
tensorflow::SessionOptions session_options;
session_options.config.set_intra_op_parallelism_threads(2);
EXPECT_TRUE(data.Prepare(session_options).ok());
TfLiteContext dummy_context1 = {};
TfLiteContext dummy_context2 = {};
ASSERT_NE(data.GetEagerContext(), nullptr);
EXPECT_NE(data.GetBufferMap(&dummy_context1), nullptr);
EXPECT_NE(data.GetBufferMap(&dummy_context1),
data.GetBufferMap(&dummy_context2));
}
TEST(DelegateDataTest, CheckFunctionDef) {
tensorflow::StaticDeviceMgr device_mgr(tensorflow::DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
tensorflow::SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr, false, nullptr,
nullptr);
auto select_subgraphs_to_register =
[](const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
result->insert("add_subgraph");
result->insert("mul_subgraph");
return absl::OkStatus();
};
subgraph_test_util::SubgraphBuilder builder;
std::unique_ptr<ErrorReporter> error_reporter =
std::make_unique<TestErrorReporter>();
auto add_subgraph = std::make_unique<Subgraph>(
error_reporter.get(), nullptr,
nullptr, nullptr, nullptr,
nullptr);
add_subgraph->SetName("add_subgraph");
auto mul_subgraph = std::make_unique<Subgraph>(
error_reporter.get(), nullptr,
nullptr, nullptr, nullptr,
nullptr);
mul_subgraph->SetName("mul_subgraph");
builder.BuildAddSubgraph(add_subgraph.get());
builder.BuildMulSubgraph(mul_subgraph.get());
std::vector<std::unique_ptr<Subgraph>> subgraphs;
subgraphs.push_back(std::move(add_subgraph));
subgraphs.push_back(std::move(mul_subgraph));
Subgraph main_subgraph(error_reporter.get(), nullptr, &subgraphs,
nullptr, nullptr,
nullptr);
main_subgraph.SetName("main");
TF_ASSERT_OK(RegisterFunctionDefForSubgraphs(
main_subgraph, select_subgraphs_to_register,
eager_context->HostCPU()->resource_manager(), eager_context,
nullptr));
const string add_fdef_txt = R"pb(
signature {
name: "add_subgraph"
input_arg { name: "args_0" type: DT_INT32 }
input_arg { name: "args_1" type: DT_INT32 }
output_arg { name: "res_0" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "SubgraphResourceKey"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "add_subgraph"
}
}
}
}
node_def {
name: "InvokeTfLite"
op: "TfLiteSubgraphExecute"
input: "SubgraphResourceKey:output:0"
input: "args_0"
input: "args_1"
attr {
key: "Tin"
value { list { type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
}
ret { key: "res_0" value: "InvokeTfLite:output:0" })pb";
const string mul_fdef_txt = R"pb(
signature {
name: "mul_subgraph"
input_arg { name: "args_0" type: DT_INT32 }
input_arg { name: "args_1" type: DT_INT32 }
output_arg { name: "res_0" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "SubgraphResourceKey"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "mul_subgraph"
}
}
}
}
node_def {
name: "InvokeTfLite"
op: "TfLiteSubgraphExecute"
input: "SubgraphResourceKey:output:0"
input: "args_0"
input: "args_1"
attr {
key: "Tin"
value { list { type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
}
ret { key: "res_0" value: "InvokeTfLite:output:0" })pb";
tensorflow::FunctionDef add_fdef, mul_fdef;
ASSERT_TRUE(TextFormat::ParseFromString(add_fdef_txt, &add_fdef));
ASSERT_TRUE(TextFormat::ParseFromString(mul_fdef_txt, &mul_fdef));
EXPECT_EQ(eager_context->GetFunctionDef("main"), nullptr);
ASSERT_NE(eager_context->GetFunctionDef("add_subgraph"), nullptr);
ASSERT_NE(eager_context->GetFunctionDef("mul_subgraph"), nullptr);
EXPECT_TRUE(MessageDifferencer::Equals(
*(eager_context->GetFunctionDef("add_subgraph")), add_fdef));
EXPECT_TRUE(MessageDifferencer::Equals(
*(eager_context->GetFunctionDef("mul_subgraph")), mul_fdef));
eager_context->Unref();
}
TEST(DelegateDataTest, CheckFunctionDefWithOnlyMainGraph) {
tensorflow::StaticDeviceMgr device_mgr(tensorflow::DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
tensorflow::SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr, false, nullptr,
nullptr);
auto select_subgraphs_to_register =
[](const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
result->insert("add_subgraph");
result->insert("mul_subgraph");
return absl::OkStatus();
};
subgraph_test_util::SubgraphBuilder builder;
std::unique_ptr<ErrorReporter> error_reporter =
std::make_unique<TestErrorReporter>();
Subgraph main_subgraph(error_reporter.get(), nullptr,
nullptr, nullptr,
nullptr,
nullptr);
main_subgraph.SetName("main");
TF_ASSERT_OK(RegisterFunctionDefForSubgraphs(
main_subgraph, select_subgraphs_to_register,
eager_context->HostCPU()->resource_manager(), eager_context,
nullptr));
EXPECT_EQ(eager_context->GetFunctionDef("main"), nullptr);
eager_context->Unref();
}
}
}
} |
981 | cpp | tensorflow/tensorflow | buffer_map | tensorflow/lite/delegates/flex/buffer_map.cc | tensorflow/lite/delegates/flex/buffer_map_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_BUFFER_MAP_H_
#define TENSORFLOW_LITE_DELEGATES_FLEX_BUFFER_MAP_H_
#include <map>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace flex {
class BufferMap {
public:
BufferMap();
~BufferMap();
bool HasTensor(int tensor_index) const;
tensorflow::Tensor GetTensor(int tensor_index) const;
const tensorflow::Tensor* GetTensorPtr(int tensor_index) const;
void SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor);
void SetFromTfLite(int tensor_index, const TfLiteTensor* tensor,
bool allow_reusing = true);
private:
std::map<int, tensorflow::Tensor> id_to_tensor_;
};
}
}
#endif
#include "tensorflow/lite/delegates/flex/buffer_map.h"
#include <utility>
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/lite/delegates/flex/buffer_map_util.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace flex {
BufferMap::BufferMap() {}
BufferMap::~BufferMap() {}
bool BufferMap::HasTensor(int tensor_index) const {
return id_to_tensor_.count(tensor_index) != 0;
}
tensorflow::Tensor BufferMap::GetTensor(int tensor_index) const {
return id_to_tensor_.at(tensor_index);
}
const tensorflow::Tensor* BufferMap::GetTensorPtr(int tensor_index) const {
auto& tensor = id_to_tensor_.at(tensor_index);
return &tensor;
}
void BufferMap::SetFromTfLite(int tensor_index, const TfLiteTensor* tensor,
bool allow_reusing) {
TFLITE_CHECK(
SetTfTensorFromTfLite(tensor, &id_to_tensor_[tensor_index], allow_reusing)
.ok());
}
void BufferMap::SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor) {
id_to_tensor_[tensor_index] = std::move(tensor);
}
}
} | #include "tensorflow/lite/delegates/flex/buffer_map.h"
#include <sys/types.h>
#include <functional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/delegates/flex/buffer_map_util.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
namespace {
using ::testing::ElementsAre;
using UniqueTfLiteTensor =
std::unique_ptr<TfLiteTensor, std::function<void(TfLiteTensor*)>>;
template <typename T>
UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
const std::vector<T>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<T>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
return tensor;
}
template <>
UniqueTfLiteTensor MakeLiteTensor<string>(const std::vector<int>& shape,
const std::vector<string>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<string>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
TfLiteTensorRealloc(data.size() * sizeof(string), tensor.get());
DynamicBuffer b;
for (const string& s : data) {
b.AddString(s.data(), s.size());
}
b.WriteToTensor(tensor.get(), ConvertVectorToTfLiteIntArray(shape));
return tensor;
}
template <typename T>
tensorflow::Tensor MakeTensor(const std::vector<int64_t>& shape,
const std::vector<T>& data,
tensorflow::DataType dtype) {
tensorflow::Tensor tensor(dtype, tensorflow::TensorShape(shape));
memcpy(tensor.data(), data.data(), data.size() * sizeof(T));
return tensor;
}
std::vector<int64_t> GetTensorShape(const tensorflow::Tensor& t) {
std::vector<int64_t> shape(t.dims());
for (int i = 0; i < t.dims(); ++i) {
shape[i] = t.dim_size(i);
}
return shape;
}
template <typename T>
std::vector<T> GetTensorData(const tensorflow::Tensor& t) {
const T* data = t.flat<T>().data();
return std::vector<T>(data, data + t.NumElements());
}
TEST(BufferMapTest, EmptyBuffer) {
BufferMap buffer_map;
EXPECT_FALSE(buffer_map.HasTensor(0));
}
TEST(BufferMapTest, SetFromTfLite) {
BufferMap buffer_map;
UniqueTfLiteTensor t =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
buffer_map.SetFromTfLite(0, t.get());
ASSERT_TRUE(buffer_map.HasTensor(0));
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTfLiteString) {
BufferMap buffer_map;
UniqueTfLiteTensor t =
MakeLiteTensor<string>({1, 2, 1, 3}, {"", "", "", "str1", "", ""});
buffer_map.SetFromTfLite(0, t.get());
ASSERT_TRUE(buffer_map.HasTensor(0));
EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
ElementsAre("", "", "", "str1", "", ""));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_STRING);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTfLiteTwice) {
UniqueTfLiteTensor t1 =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t1.get());
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, SetFromTfLiteStringTwice) {
UniqueTfLiteTensor t1 =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
UniqueTfLiteTensor t2 =
MakeLiteTensor<string>({1, 2, 4}, {"", "", "", "s3", "", "", "s1", "s2"});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t1.get());
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
ElementsAre("", "", "", "s3", "", "", "s1", "s2"));
}
TEST(BufferMapTest, SetFromTfLiteBuiltinResource) {
BufferMap buffer_map;
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = kTfLiteResource;
tensor->dims = ConvertVectorToTfLiteIntArray({1});
TfLiteTensorRealloc(sizeof(int32_t), tensor.get());
tensor->delegate = nullptr;
tensor->data.i32[0] = 1;
buffer_map.SetFromTfLite(0, tensor.get());
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_RESOURCE);
ASSERT_EQ(out_tensor.NumElements(), 1);
tensorflow::ResourceHandle handle =
out_tensor.flat<tensorflow::ResourceHandle>()(0);
EXPECT_EQ(handle.name(), "tflite_resource_variable:1");
}
TEST(BufferMapTest, SetFromTensorFlow) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTensorFlowTwice) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
tensorflow::Tensor t2 = MakeTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2},
tensorflow::DT_INT32);
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
buffer_map.SetFromTensorFlow(0, t2);
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, TfLiteOverwritesTensorFlow) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, TensorFlowOverwritesTfLite) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t2.get());
buffer_map.SetFromTensorFlow(0, t1);
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
}
TEST(BufferMapTest, TensorflowBufferReuse) {
TfLiteTensor tensor;
tensor.allocation_type = kTfLiteDynamic;
tensor.data.raw = nullptr;
TfLiteTensorRealloc(10, &tensor);
CHECK(tensor.data.raw);
EXPECT_EQ(tensor.bytes, 10);
TfLiteTensorBuffer* tensor_buffer_reused = new TfLiteTensorBuffer(&tensor);
EXPECT_TRUE(tensor_buffer_reused->BufferReusedFromTfLiteTensor());
EXPECT_EQ(tensor_buffer_reused->data(), tensor.data.raw);
tensor_buffer_reused->Unref();
TfLiteTensorDataFree(&tensor);
}
TEST(BufferMapTest, ExplicitlyDisableBufferReuse) {
TfLiteTensor tensor;
tensor.allocation_type = kTfLiteDynamic;
tensor.data.raw = nullptr;
TfLiteTensorRealloc(10, &tensor);
CHECK(tensor.data.raw);
EXPECT_EQ(tensor.bytes, 10);
TfLiteTensorBuffer* tensor_buffer =
new TfLiteTensorBuffer(&tensor, false);
EXPECT_FALSE(tensor_buffer->BufferReusedFromTfLiteTensor());
EXPECT_NE(tensor_buffer->data(), tensor.data.raw);
tensor_buffer->Unref();
TfLiteTensorDataFree(&tensor);
}
}
}
} |
982 | cpp | tensorflow/tensorflow | kernel | third_party/xla/xla/stream_executor/kernel.cc | third_party/xla/xla/stream_executor/kernel_test.cc | #include "absl/base/attributes.h"
#ifndef XLA_STREAM_EXECUTOR_KERNEL_H_
#define XLA_STREAM_EXECUTOR_KERNEL_H_
#include <array>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "tsl/platform/logging.h"
namespace stream_executor {
class Kernel;
enum class KernelCacheConfig {
kNoPreference,
kPreferShared,
kPreferL1,
kPreferEqual,
};
class KernelMetadata {
public:
KernelMetadata() = default;
std::optional<int64_t> registers_per_thread() const;
std::optional<int64_t> shared_memory_bytes() const;
void set_registers_per_thread(int registers_per_thread);
void set_shared_memory_bytes(int shared_memory_bytes);
private:
std::optional<int64_t> registers_per_thread_;
std::optional<int64_t> shared_memory_bytes_;
};
class KernelArgs {
public:
template <typename T>
using IsKernelArgs = std::enable_if_t<std::is_base_of<KernelArgs, T>::value>;
enum class Kind {
kDeviceMemoryArray,
kPackedArray
};
virtual ~KernelArgs() = default;
virtual size_t number_of_arguments() const = 0;
virtual uint64_t number_of_shared_bytes() const = 0;
virtual Kind kind() const = 0;
};
class KernelArgsPackedArrayBase : public KernelArgs {
public:
virtual absl::Span<const void *const> argument_addresses() const = 0;
static bool classof(const KernelArgs *args) {
return args->kind() == Kind::kPackedArray;
}
Kind kind() const final { return Kind::kPackedArray; }
};
class Kernel {
public:
using KernelArgsPacking =
std::function<absl::StatusOr<std::unique_ptr<KernelArgsPackedArrayBase>>(
const Kernel &kernel, const KernelArgs &args)>;
Kernel() = default;
virtual ~Kernel() = default;
Kernel(const Kernel &) = delete;
void operator=(const Kernel &) = delete;
virtual unsigned Arity() const = 0;
virtual absl::StatusOr<int32_t> GetMaxOccupiedBlocksPerCore(
ThreadDim threads, size_t dynamic_shared_memory_bytes) const = 0;
KernelCacheConfig cache_config() const { return cache_config_; }
void set_cache_config(KernelCacheConfig cache_config) {
cache_config_ = std::move(cache_config);
}
const KernelMetadata &metadata() const { return metadata_; }
void set_metadata(KernelMetadata metadata) {
metadata_ = std::move(metadata);
}
const KernelArgsPacking &args_packing() const { return args_packing_; }
void set_args_packing(KernelArgsPacking args_packing) {
args_packing_ = std::move(args_packing);
}
std::string_view name() const { return name_; }
void set_name(absl::string_view name);
std::string_view demangled_name() const { return demangled_name_; }
private:
std::string name_;
std::string demangled_name_;
KernelCacheConfig cache_config_ = KernelCacheConfig::kNoPreference;
KernelMetadata metadata_;
KernelArgsPacking args_packing_;
};
template <typename... Params>
class TypedKernelFactory;
template <typename... Params>
class TypedKernel {
public:
static constexpr size_t kNumberOfParameters = sizeof...(Params);
TypedKernel() = default;
Kernel &operator*() { return *kernel_; }
const Kernel &operator*() const { return *kernel_; }
Kernel *operator->() { return kernel_.get(); }
const Kernel *operator->() const { return kernel_.get(); }
operator bool() const { return static_cast<bool>(kernel_); }
using FactoryType = TypedKernelFactory<Params...>;
private:
friend class TypedKernelFactory<Params...>;
explicit TypedKernel(std::unique_ptr<Kernel> kernel)
: kernel_(std::move(kernel)) {}
std::unique_ptr<Kernel> kernel_;
};
template <class T, KernelArgs::IsKernelArgs<T> * = nullptr>
T *Cast(KernelArgs *args) {
CHECK(T::classof(args)) << "Invalid arguments casting to a destination type: "
<< typeid(T).name();
CHECK(args != nullptr) << "Casted arguments must be not null";
return static_cast<const T *>(args);
}
template <class T, KernelArgs::IsKernelArgs<T> * = nullptr>
const T *Cast(const KernelArgs *args) {
CHECK(T::classof(args)) << "Invalid arguments casting to a destination type: "
<< typeid(T).name();
CHECK(args != nullptr) << "Casted arguments must be not null";
return static_cast<const T *>(args);
}
template <class T, KernelArgs::IsKernelArgs<T> * = nullptr>
const T *DynCast(const KernelArgs *args) {
CHECK(args != nullptr) << "Casted arguments must be not null";
return T::classof(args) ? static_cast<const T *>(args) : nullptr;
}
template <class T, KernelArgs::IsKernelArgs<T> * = nullptr>
const T *DynCastOrNull(const KernelArgs *args) {
return args && T::classof(args) ? static_cast<const T *>(args) : nullptr;
}
class KernelArgsDeviceMemoryArray : public KernelArgs {
public:
KernelArgsDeviceMemoryArray(absl::Span<const DeviceMemoryBase> args,
size_t shared_memory_bytes)
: device_memory_args_(args.begin(), args.end()),
shared_memory_bytes_(shared_memory_bytes) {}
static bool classof(const KernelArgs *args) {
return args->kind() == Kind::kDeviceMemoryArray;
}
Kind kind() const final { return Kind::kDeviceMemoryArray; }
size_t number_of_arguments() const final {
return device_memory_args_.size() + (shared_memory_bytes_ > 0);
}
uint64_t number_of_shared_bytes() const final { return shared_memory_bytes_; }
absl::Span<const DeviceMemoryBase> device_memory_args() const {
return device_memory_args_;
}
const void *device_memory_ptr(size_t index) const {
return device_memory_args_[index].opaque();
}
size_t device_memory_size(size_t index) const {
return device_memory_args_[index].size();
}
private:
absl::InlinedVector<DeviceMemoryBase, 4> device_memory_args_;
size_t shared_memory_bytes_ = 0;
};
namespace internal {
class EmptyArgs {};
template <size_t capacity, size_t size = 8,
size_t alignment = alignof(std::max_align_t)>
class PodArgs {
protected:
template <typename T>
const std::byte *add_pod_argument(const T &arg) {
static_assert(
std::is_pod_v<T> && sizeof(T) <= size & alignof(T) <= alignment,
"Type is not compatible with POD arguments storage");
assert(num_args_ < capacity && "pod args overflow");
std::byte *arg_storage = args_storage_[num_args_++].storage;
std::memcpy(arg_storage, &arg, sizeof(T));
return arg_storage;
}
private:
struct Arg {
alignas(alignment) std::byte storage[size];
};
size_t num_args_ = 0;
std::array<Arg, capacity> args_storage_;
};
template <typename ArgsStorage>
static constexpr bool is_pod_args_v = false;
template <size_t capacity, size_t size, size_t alignment>
static constexpr bool is_pod_args_v<PodArgs<capacity, size, alignment>> = true;
}
template <size_t num_args, typename ArgsStorage = internal::PodArgs<num_args>>
class KernelArgsPackedArray : public KernelArgsPackedArrayBase, ArgsStorage {
public:
KernelArgsPackedArray() = default;
KernelArgsPackedArray(const KernelArgsPackedArray &) = delete;
KernelArgsPackedArray &operator=(const KernelArgsPackedArray &) = delete;
template <typename T>
void add_argument(const T &arg) {
if constexpr (internal::is_pod_args_v<ArgsStorage>) {
argument_addresses_[number_of_argument_addresses_++] =
ArgsStorage::add_pod_argument(arg);
} else {
static_assert(sizeof(T) == 0, "Arguments storage is not supported");
}
}
void add_device_memory_argument(const DeviceMemoryBase &arg) {
const void **copy_ptr =
&device_memory_opaque_pointers_[number_of_argument_addresses_];
*copy_ptr = arg.opaque();
argument_addresses_[number_of_argument_addresses_] = copy_ptr;
++number_of_argument_addresses_;
}
void add_shared_bytes(size_t number_of_bytes) {
shared_memory_bytes_ += number_of_bytes;
}
size_t number_of_arguments() const final {
return number_of_argument_addresses_ + (shared_memory_bytes_ > 0);
}
uint64_t number_of_shared_bytes() const final { return shared_memory_bytes_; }
absl::Span<const void *const> argument_addresses() const final {
return absl::Span<const void *const>(argument_addresses_.data(),
number_of_argument_addresses_);
}
private:
std::array<const void *, num_args> device_memory_opaque_pointers_;
std::array<const void *, num_args> argument_addresses_;
size_t shared_memory_bytes_ = 0;
size_t number_of_argument_addresses_ = 0;
};
namespace internal {
template <int n>
std::unique_ptr<KernelArgsPackedArrayBase> PackKernelArgs(
absl::Span<const DeviceMemoryBase> args, uint32_t shared_mem_bytes) {
auto packed = std::make_unique<KernelArgsPackedArray<n, EmptyArgs>>();
for (const DeviceMemoryBase &buf : args) {
packed->add_device_memory_argument(buf);
}
if (shared_mem_bytes > 0) {
packed->add_shared_bytes(shared_mem_bytes);
}
return packed;
}
}
inline absl::StatusOr<std::unique_ptr<KernelArgsPackedArrayBase>>
PackKernelArgs(absl::Span<const DeviceMemoryBase> args,
uint32_t shared_mem_bytes) {
static constexpr int kKernelArgsLimit = 1024;
if (args.size() > kKernelArgsLimit)
return absl::InvalidArgumentError(absl::StrCat(
"Can't pack device memory arguments array of size ", args.size(),
" which is larger than the maximum supported size of ",
kKernelArgsLimit));
if (args.size() <= 4) {
return internal::PackKernelArgs<4>(args, shared_mem_bytes);
} else if (args.size() <= 8) {
return internal::PackKernelArgs<8>(args, shared_mem_bytes);
} else if (args.size() <= 16) {
return internal::PackKernelArgs<16>(args, shared_mem_bytes);
} else if (args.size() <= 32) {
return internal::PackKernelArgs<32>(args, shared_mem_bytes);
} else if (args.size() <= 64) {
return internal::PackKernelArgs<64>(args, shared_mem_bytes);
} else if (args.size() <= 256) {
return internal::PackKernelArgs<256>(args, shared_mem_bytes);
} else if (args.size() <= 512) {
return internal::PackKernelArgs<512>(args, shared_mem_bytes);
}
return internal::PackKernelArgs<kKernelArgsLimit>(args, shared_mem_bytes);
}
inline absl::StatusOr<std::unique_ptr<KernelArgsPackedArrayBase>>
PackKernelArgs(absl::Span<const DeviceMemoryBase> args,
const KernelMetadata &metadata) {
return PackKernelArgs(args, metadata.shared_memory_bytes().value_or(0));
}
namespace internal {
template <typename T>
struct PackedArgType {
static_assert(!std::is_pointer_v<T>, "cannot pass raw pointer to the device");
using Type = T;
};
template <>
struct PackedArgType<DeviceMemoryBase> {
using Type = const void *;
};
template <typename T>
struct PackedArgType<DeviceMemory<T>> {
using Type = typename PackedArgType<DeviceMemoryBase>::Type;
};
template <>
struct PackedArgType<DeviceMemoryBase *> {
using Type = typename PackedArgType<DeviceMemoryBase>::Type;
};
template <>
struct PackedArgType<const DeviceMemoryBase *> {
using Type = typename PackedArgType<DeviceMemoryBase>::Type;
};
template <typename T>
struct PackedArgType<DeviceMemory<T> *> {
using Type = typename PackedArgType<DeviceMemoryBase>::Type;
};
template <typename T>
struct PackedArgType<const DeviceMemory<T> *> {
using Type = typename PackedArgType<DeviceMemoryBase>::Type;
};
template <typename T, std::enable_if_t<!std::is_pointer_v<T>> * = nullptr>
T PackArg(const T &arg) {
return arg;
}
inline const void *PackArg(const DeviceMemoryBase &arg) { return arg.opaque(); }
inline const void *PackArg(const DeviceMemoryBase *arg) {
return PackArg(*arg);
}
template <typename T>
const void *PackArg(const DeviceMemory<T> &arg) {
return arg.opaque();
}
template <typename T>
const void *PackArg(const DeviceMemory<T> *arg) {
return PackArg(*arg);
}
}
template <typename... Args>
class KernelArgsPackedTuple : public KernelArgsPackedArrayBase {
public:
static constexpr size_t kSize = sizeof...(Args);
using Storage = std::tuple<
typename internal::PackedArgType<absl::remove_cvref_t<Args>>::Type...>;
explicit KernelArgsPackedTuple(Args... args, size_t shared_memory_bytes)
: storage_(internal::PackArg(std::forward<Args>(args))...),
shared_memory_bytes_(shared_memory_bytes) {
InitializeArgumentAddresses(std::make_index_sequence<kSize>{});
}
KernelArgsPackedTuple(const KernelArgsPackedTuple &) = delete;
KernelArgsPackedTuple &operator=(const KernelArgsPackedTuple &) = delete;
size_t number_of_arguments() const final {
return kSize + (shared_memory_bytes_ > 0);
}
uint64_t number_of_shared_bytes() const final { return shared_memory_bytes_; }
absl::Span<const void *const> argument_addresses() const final {
return absl::Span<const void *const>(argument_addresses_.data(), kSize);
}
template <typename... OtherArgs>
static void CheckCompatibleStaticAssert() {
static constexpr size_t kOtherSize = sizeof...(OtherArgs);
static_assert(kSize == kOtherSize, "length of arguments packs must match");
using StrippedArgs = std::tuple<absl::remove_cvref_t<Args>...>;
using StrippedOtherArgs = std::tuple<absl::remove_cvref_t<OtherArgs>...>;
static_assert(std::is_same_v<StrippedArgs, StrippedOtherArgs>,
"arguments types do not match");
}
private:
template <size_t... Is>
void InitializeArgumentAddresses(std::index_sequence<Is...>) {
((argument_addresses_[Is] = &std::get<Is>(storage_)), ...);
}
Storage storage_;
size_t shared_memory_bytes_ = 0;
std::array<const void *, kSize> argument_addresses_;
};
template <typename... Args>
std::unique_ptr<KernelArgsPackedArrayBase> PackKernelArgs(int64_t shmem_bytes,
Args... args) {
using PackedArgs = KernelArgsPackedTuple<Args...>;
return std::make_unique<PackedArgs>(std::forward<Args>(args)..., shmem_bytes);
}
template <typename... Params, typename... Args>
std::unique_ptr<KernelArgsPackedArrayBase> PackKernelArgs(
const TypedKernel<Params...> &kernel, Args... args) {
using PackedParams = KernelArgsPackedTuple<Params...>;
using PackedArgs = KernelArgsPackedTuple<Args...>;
PackedParams::template CheckCompatibleStaticAssert<Args...>();
int64_t shmem_bytes = kernel->metadata().shared_memory_bytes().value_or(0);
return std::make_unique<PackedArgs>(std::forward<Args>(args)..., shmem_bytes);
}
}
#endif
#include "xla/stream_executor/kernel.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tsl/platform/demangle.h"
namespace stream_executor {
std::optional<int64_t> KernelMetadata::registers_per_thread() const {
return registers_per_thread_;
}
std::optional<int64_t> KernelMetadata::shared_memory_bytes() const {
return shared_memory_bytes_;
}
void KernelMetadata::set_registers_per_thread(int registers_per_thread) {
registers_per_thread_ = registers_per_thread;
}
void KernelMetadata::set_shared_memory_bytes(int shared_memory_bytes) {
shared_memory_bytes_ = shared_memory_bytes;
}
void Kernel::set_name(absl::string_view name) {
name_ = std::string(name);
demangled_name_ =
tsl::port::Demangle(absl::StripPrefix(name, "__device_stub_").data());
}
} | #include "xla/stream_executor/kernel.h"
#include <cstdint>
#include <memory>
#include <tuple>
#include <type_traits>
#include <vector>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace stream_executor {
struct Data {};
template <typename... Args>
using ArgsStorage = typename KernelArgsPackedTuple<Args...>::Storage;
static_assert(
std::is_same_v<ArgsStorage<int32_t, const int32_t, int32_t&, const int32_t>,
std::tuple<int32_t, int32_t, int32_t, int32_t>>);
static_assert(std::is_same_v<ArgsStorage<Data, const Data, Data&, const Data>,
std::tuple<Data, Data, Data, Data>>);
static_assert(std::is_same_v<
ArgsStorage<DeviceMemoryBase, const DeviceMemoryBase,
DeviceMemoryBase&, const DeviceMemoryBase&>,
std::tuple<const void*, const void*, const void*, const void*>>);
static_assert(std::is_same_v<
ArgsStorage<DeviceMemory<float>, const DeviceMemory<float>,
DeviceMemory<float>&, const DeviceMemory<float>&>,
std::tuple<const void*, const void*, const void*, const void*>>);
static_assert(
std::is_same_v<ArgsStorage<DeviceMemoryBase*, const DeviceMemoryBase*>,
std::tuple<const void*, const void*>>);
static std::unique_ptr<StreamExecutor> NewStreamExecutor() {
Platform* platform = PlatformManager::PlatformWithName("Host").value();
StreamExecutorConfig config(0);
return platform->GetUncachedExecutor(config).value();
}
TEST(KernelTest, PackDeviceMemoryArguments) {
auto executor = NewStreamExecutor();
DeviceMemoryBase a(reinterpret_cast<void*>(0x12345678));
DeviceMemoryBase b(reinterpret_cast<void*>(0x87654321));
auto args = PackKernelArgs({a, b}, 0).value();
ASSERT_EQ(args->number_of_arguments(), 2);
auto packed = args->argument_addresses();
const void* ptr0 = *reinterpret_cast<const void* const*>(packed[0]);
const void* ptr1 = *reinterpret_cast<const void* const*>(packed[1]);
ASSERT_EQ(ptr0, a.opaque());
ASSERT_EQ(ptr1, b.opaque());
}
TEST(KernelTest, PackPodArguments) {
auto args = std::make_unique<KernelArgsPackedArray<4>>();
args->add_argument(1);
args->add_argument(2.0f);
args->add_argument(3.0);
ASSERT_EQ(args->number_of_arguments(), 3);
auto packed = args->argument_addresses();
int32_t i32 = *reinterpret_cast<const int32_t*>(packed[0]);
float f32 = *reinterpret_cast<const float*>(packed[1]);
double f64 = *reinterpret_cast<const double*>(packed[2]);
ASSERT_EQ(i32, 1);
ASSERT_EQ(f32, 2.0f);
ASSERT_EQ(f64, 3.0);
}
TEST(KernelTest, PackTupleArguments) {
auto args = PackKernelArgs(0, 1, 2.0f, 3.0);
ASSERT_EQ(args->number_of_arguments(), 3);
auto packed = args->argument_addresses();
int32_t i32 = *reinterpret_cast<const int32_t*>(packed[0]);
float f32 = *reinterpret_cast<const float*>(packed[1]);
double f64 = *reinterpret_cast<const double*>(packed[2]);
ASSERT_EQ(i32, 1);
ASSERT_EQ(f32, 2.0f);
ASSERT_EQ(f64, 3.0);
}
TEST(KernelTest, FailToCreateTypedKernelFromEmptySpec) {
MultiKernelLoaderSpec empty_spec(0);
auto executor = NewStreamExecutor();
auto kernel = TypedKernelFactory<>::Create(executor.get(), empty_spec);
EXPECT_FALSE(kernel.ok());
}
static void BM_PackDeviceMemoryArgs(benchmark::State& state) {
std::vector<DeviceMemoryBase> args(state.range(0));
for (int i = 0; i < state.range(0); ++i) {
args[i] = DeviceMemoryBase(reinterpret_cast<void*>(0x12345678), 42);
}
for (auto s : state) {
auto packed = PackKernelArgs(args, 0);
benchmark::DoNotOptimize(packed);
}
}
BENCHMARK(BM_PackDeviceMemoryArgs)
->Arg(4)
->Arg(8)
->Arg(32)
->Arg(64)
->Arg(128)
->Arg(256)
->Arg(512)
->Arg(1024);
} |
983 | cpp | tensorflow/tensorflow | weight_cache | tensorflow/lite/delegates/xnnpack/weight_cache.cc | tensorflow/lite/delegates/xnnpack/weight_cache_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_WEIGHT_CACHE_H_
#define TENSORFLOW_LITE_DELEGATES_XNNPACK_WEIGHT_CACHE_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "xnnpack.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h"
namespace tflite {
namespace xnnpack {
struct XNNPackCacheHeader {
enum : uint64_t { kInvalidHeader = 0, kVersion = 1 };
uint64_t version;
uint8_t xnnpack_build_identifier[32];
uint64_t buffer_list_offset;
uint64_t buffer_list_size;
};
struct PackIdentifier {
enum { kNoId = SIZE_MAX };
uint64_t pack_algorithm_id = kNoId;
uint64_t weights_id = kNoId;
uint64_t bias_id = kNoId;
friend bool operator==(const PackIdentifier& a, const PackIdentifier& b) {
return a.pack_algorithm_id == b.pack_algorithm_id &&
a.weights_id == b.weights_id && a.bias_id == b.bias_id;
}
struct Hash {
size_t operator()(const PackIdentifier& p) const {
std::hash<uint64_t> hasher;
return hasher(p.pack_algorithm_id) ^ hasher(p.weights_id) ^
hasher(p.bias_id);
}
};
};
struct BufferLocation {
uint64_t offset;
uint64_t size;
};
class MMapHandle {
public:
using value_type = uint8_t;
MMapHandle() = default;
~MMapHandle();
MMapHandle(const MMapHandle&) = delete;
MMapHandle& operator=(const MMapHandle&) = delete;
MMapHandle(MMapHandle&&);
MMapHandle& operator=(MMapHandle&&);
[[nodiscard ]]
bool Map(const char* path);
void UnMap();
bool IsMapped() const { return data_ != nullptr; }
uint8_t* data() { return data_; }
const uint8_t* data() const { return data_; }
size_t size() const { return size_; }
uint8_t* begin() { return data(); }
const uint8_t* begin() const { return data(); }
uint8_t* end() { return data() + size(); }
const uint8_t* end() const { return data() + size(); }
friend void swap(MMapHandle& a, MMapHandle& b);
private:
size_t size_ = 0;
uint8_t* data_ = nullptr;
};
class WeightCacheBuilder {
public:
WeightCacheBuilder() = default;
~WeightCacheBuilder();
WeightCacheBuilder(const WeightCacheBuilder&) = delete;
WeightCacheBuilder& operator=(const WeightCacheBuilder&) = delete;
WeightCacheBuilder(WeightCacheBuilder&&);
WeightCacheBuilder& operator=(WeightCacheBuilder&&);
[[nodiscard ]]
bool Start(const char* path);
[[nodiscard]]
bool IsStarted() const {
return fd_ != -1;
}
void Reset();
[[nodiscard ]]
void* Reserve(size_t size);
[[nodiscard ]]
BufferLocation Append(PackIdentifier pack_id, const void* data,
uint64_t size);
bool ShouldFinalize() const;
[[nodiscard ]]
bool Finalize();
size_t capacity() const { return capacity_; }
uint8_t* data() const { return data_.get(); }
friend void swap(WeightCacheBuilder& a, WeightCacheBuilder& b);
private:
std::unique_ptr<uint8_t[]> data_ = nullptr;
cache::schema::BufferListT schema_;
size_t capacity_ = 0;
int fd_ = -1;
std::string file_path_;
};
class MMapWeightCacheProvider {
public:
MMapWeightCacheProvider() = default;
MMapWeightCacheProvider(const MMapWeightCacheProvider&) = delete;
MMapWeightCacheProvider& operator=(const MMapWeightCacheProvider&) = delete;
MMapWeightCacheProvider(MMapWeightCacheProvider&&);
MMapWeightCacheProvider& operator=(MMapWeightCacheProvider&&);
void SetFilePath(const char* file_path);
const std::string& GetFilePath() const { return file_path_; }
[[nodiscard ]]
bool LoadOrStartBuild(const char* file_path);
[[nodiscard ]]
bool StartBuild(const char* file_path);
[[nodiscard ]]
bool Load(const std::string& path);
[[nodiscard ]]
bool Load();
void MapTensorIdentifiers(
const TfLiteTensor* tensors, size_t size,
const std::unordered_map<size_t, size_t>& tensor_index_to_identifier);
[[nodiscard]]
size_t LookUp(const xnn_weights_cache_look_up_key* cache_key);
[[nodiscard]]
void* ReserveSpace(size_t size);
[[nodiscard]]
size_t LookUpOrInsert(const xnn_weights_cache_look_up_key* cache_key,
void* ptr, size_t size);
void* OffsetToAddr(size_t offset);
void Release();
[[nodiscard ]]
bool Finalize();
bool IsFinalized() const;
bool IsBuilding() const { return !IsFinalized() && !file_path_.empty(); };
bool IsActive() const { return IsFinalized() || !file_path_.empty(); };
xnn_weights_cache_provider& GetCacheProvider() { return cache_provider_; }
static size_t look_up(void* context,
const xnn_weights_cache_look_up_key* cache_key);
static void* reserve_space(void* context, size_t n);
static size_t look_up_or_insert(
void* context, const xnn_weights_cache_look_up_key* cache_key, void* ptr,
size_t size);
static bool is_finalized(void* context);
static void* offset_to_addr(void* context, size_t offset);
static enum xnn_status delete_cache(void* context);
private:
PackIdentifier BuildPackIdentifier(const xnn_weights_cache_look_up_key& key);
xnn_weights_cache_provider cache_provider_{
.context = this,
.look_up = MMapWeightCacheProvider::look_up,
.reserve_space = MMapWeightCacheProvider::reserve_space,
.look_up_or_insert = MMapWeightCacheProvider::look_up_or_insert,
.is_finalized = MMapWeightCacheProvider::is_finalized,
.offset_to_addr = MMapWeightCacheProvider::offset_to_addr,
.delete_cache = MMapWeightCacheProvider::delete_cache};
std::string file_path_;
std::unordered_map<const void*, uint64_t> buffer_address_to_identifier_;
std::unordered_multimap<PackIdentifier, BufferLocation, PackIdentifier::Hash>
cache_key_to_offset_;
MMapHandle mmap_handle_;
size_t mmap_buffer_base_offset_;
WeightCacheBuilder builder_;
};
}
}
#endif
#include "tensorflow/lite/delegates/xnnpack/weight_cache.h"
#include <fcntl.h>
#include <sys/stat.h>
#if defined(_MSC_VER)
#include <io.h>
#define F_OK 0
#else
#include <sys/mman.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <cerrno>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "xnnpack.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#define XNNPACK_ABORT_CHECK(TEST, ...) \
if (!(TEST)) { \
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, __VA_ARGS__); \
std::abort(); \
}
namespace tflite::xnnpack {
namespace {
constexpr size_t kMinAlignment = 64;
size_t Align(size_t offset, const size_t alignment) {
const size_t misalign = offset % alignment;
return offset + (misalign ? alignment - misalign : 0);
}
template <class F>
class ScopeGuard {
public:
explicit ScopeGuard(F&& callback) : callback_(std::forward<F>(callback)) {}
ScopeGuard(const ScopeGuard&) = delete;
ScopeGuard& operator=(const ScopeGuard&) = delete;
ScopeGuard(ScopeGuard&& other)
: active_(other.active_), callback_(std::move(other.callback_)) {
other.Deactivate();
}
ScopeGuard& operator=(ScopeGuard&& other) {
if (this != &other) {
active_ = std::move(other.active_);
callback_ = std::move(other.callback_);
other.Deactivate();
}
}
~ScopeGuard() {
if (active_) {
callback_();
}
}
void Deactivate() { active_ = false; }
private:
F callback_;
bool active_ = true;
};
template <class F>
ScopeGuard(F&&) -> ScopeGuard<F>;
[[nodiscard]]
bool FileExists(const char* path) {
return access(path, F_OK) != -1;
}
}
void swap(MMapHandle& a, MMapHandle& b) {
using std::swap;
swap(a.size_, b.size_);
swap(a.data_, b.data_);
}
MMapHandle::~MMapHandle() { UnMap(); }
MMapHandle::MMapHandle(MMapHandle&& other) { swap(*this, other); }
MMapHandle& MMapHandle::operator=(MMapHandle&& other) {
swap(*this, other);
return *this;
}
bool MMapHandle::Map(const char* path) {
this->UnMap();
const int fd = open(path, O_RDONLY);
if (fd == -1) {
TFLITE_LOG_PROD(
tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: could not open file to mmap ('%s'): %s.", path,
strerror(errno))
return false;
}
const ScopeGuard close_fd_on_return([&fd] {
if (fd >= 0) {
close(fd);
}
});
struct stat file_stats;
if (fstat(fd, &file_stats)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: could not access file stats to get "
"size ('%s'): %s.",
path, strerror(errno))
return false;
}
size_ = file_stats.st_size;
#if defined(_MSC_VER)
data_ = new uint8_t[size_];
{
uint8_t* data_reader = data_;
size_t remaining_bytes = size_;
while (remaining_bytes > 0) {
const auto bytes = read(fd, data_reader, remaining_bytes);
if (bytes == -1) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: could not read file ('%s'): %s.",
path, strerror(errno))
UnMap();
return false;
}
remaining_bytes -= bytes;
data_reader += bytes;
}
}
#else
data_ = static_cast<uint8_t*>(
mmap(nullptr, size_, PROT_READ, MAP_SHARED, fd, 0));
if (data_ == MAP_FAILED) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: could not mmap file (%s): %s.", path,
strerror(errno));
data_ = nullptr;
size_ = 0;
return false;
}
#endif
return true;
}
void MMapHandle::UnMap() {
if (data_) {
#if defined(_MSC_VER)
delete[] data_;
#else
munmap(data_, size_);
#endif
data_ = nullptr;
size_ = 0;
}
}
void swap(WeightCacheBuilder& a, WeightCacheBuilder& b) {
using std::swap;
swap(a.schema_, b.schema_);
swap(a.data_, b.data_);
swap(a.capacity_, b.capacity_);
swap(a.fd_, b.fd_);
swap(a.file_path_, b.file_path_);
}
WeightCacheBuilder::WeightCacheBuilder(WeightCacheBuilder&& other) {
swap(*this, other);
}
WeightCacheBuilder& WeightCacheBuilder::operator=(WeightCacheBuilder&& other) {
Reset();
swap(*this, other);
return *this;
}
WeightCacheBuilder::~WeightCacheBuilder() { Reset(); }
namespace {
bool WriteData(const int fd, const uint8_t* data, size_t size,
const char* const file_path, const char* step_description) {
for (size_t bytes = 0; bytes < size;) {
const auto written_bytes = write(fd, data + bytes, size - bytes);
if (written_bytes == -1) {
TFLITE_LOG_PROD(
tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: file write incomplete (%s). %s: %s.",
file_path, step_description, strerror(errno))
}
bytes += written_bytes;
}
return true;
}
}
bool WeightCacheBuilder::Start(const char* path) {
Reset();
ScopeGuard reset_on_error([this] { Reset(); });
file_path_ = path;
fd_ = open(file_path_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (fd_ == -1) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR, "Could not open file ('%s'): %s.",
file_path_.c_str(), strerror(errno));
return false;
}
const XNNPackCacheHeader header{
.version = XNNPackCacheHeader::kInvalidHeader,
.buffer_list_offset = 0,
.buffer_list_size = 0,
};
if (!WriteData(fd_, (const uint8_t*)&header, sizeof(header),
file_path_.c_str(), "padding for flatbuffer offset")) {
return false;
}
schema_.base_offset = Align(sizeof(header), kMinAlignment);
reset_on_error.Deactivate();
return true;
}
void WeightCacheBuilder::Reset() {
if (fd_ != -1) {
close(fd_);
fd_ = -1;
file_path_.clear();
}
data_.reset(nullptr);
capacity_ = 0;
}
void* WeightCacheBuilder::Reserve(size_t size) {
if (size > capacity_) {
data_.reset(nullptr);
data_ = std::make_unique<uint8_t[]>(size);
capacity_ = size;
}
return data_.get();
}
BufferLocation WeightCacheBuilder::Append(PackIdentifier pack_id,
const void* data, uint64_t size) {
XNNPACK_ABORT_CHECK(IsStarted(),
"Cannot append data to an unstarted builder.");
const size_t offset = Align(lseek(fd_, 0, SEEK_CUR), kMinAlignment);
if (lseek(fd_, offset, SEEK_SET) != offset) {
return BufferLocation{};
}
BufferLocation loc{.offset = offset - schema_.base_offset, .size = size};
schema_.buffers.push_back(std::make_unique<cache::schema::BufferT>(
cache::schema::BufferT{.packing_algorithm_id = pack_id.pack_algorithm_id,
.weights_id = pack_id.weights_id,
.bias_id = pack_id.bias_id,
.offset = loc.offset,
.size = loc.size}));
WriteData(fd_, reinterpret_cast<const uint8_t*>(data), size,
file_path_.c_str(), "Append buffer to cache file");
return loc;
}
bool WeightCacheBuilder::ShouldFinalize() const { return fd_ != -1; }
bool WeightCacheBuilder::Finalize() {
if (fd_ == -1) {
TFLITE_LOG_PROD(
tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: cache file ('%s') is not open for writing: %s.",
file_path_.c_str(), strerror(errno))
return false;
}
flatbuffers::FlatBufferBuilder builder;
cache::schema::FinishBufferListBuffer(
builder, cache::schema::BufferList::Pack(builder, &schema_));
const size_t layout_offset = Align(lseek(fd_, 0, SEEK_CUR), kMinAlignment);
if (lseek(fd_, layout_offset, SEEK_SET) != layout_offset) {
return false;
}
if (sizeof(XNNPackCacheHeader::xnnpack_build_identifier) !=
xnn_experimental_get_build_identifier_size()) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: cache file ('%s') header cannot "
"hold XNNPack's build identifier: %s.",
file_path_.c_str(), strerror(errno))
return false;
}
XNNPackCacheHeader header{XNNPackCacheHeader::kVersion};
memcpy(header.xnnpack_build_identifier,
xnn_experimental_get_build_identifier_data(),
xnn_experimental_get_build_identifier_size());
header.buffer_list_offset = lseek(fd_, 0, SEEK_CUR);
header.buffer_list_size = builder.GetSize();
if (!WriteData(fd_, builder.GetBufferPointer(), builder.GetSize(),
file_path_.c_str(), "Buffer list")) {
return false;
}
lseek(fd_, 0, SEEK_SET);
WriteData(fd_, (const uint8_t*)&header, sizeof(header), file_path_.c_str(),
"Writing header");
TFLITE_LOG_PROD(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache: written to '%s'.", file_path_.c_str());
Reset();
return true;
}
MMapWeightCacheProvider::MMapWeightCacheProvider(
MMapWeightCacheProvider&& other) {
*this = std::move(other);
}
MMapWeightCacheProvider& MMapWeightCacheProvider::operator=(
MMapWeightCacheProvider&& other) {
using std::swap;
swap(cache_provider_, other.cache_provider_);
cache_provider_.context = this;
other.cache_provider_.context = &other;
swap(file_path_, other.file_path_);
swap(buffer_address_to_identifier_, other.buffer_address_to_identifier_);
swap(cache_key_to_offset_, other.cache_key_to_offset_);
swap(mmap_handle_, other.mmap_handle_);
swap(mmap_buffer_base_offset_, other.mmap_buffer_base_offset_);
swap(builder_, other.builder_);
return *this;
}
void MMapWeightCacheProvider::SetFilePath(const char* path) {
XNNPACK_ABORT_CHECK(
!IsFinalized(),
"Cannot change the path of a cache that has already been loaded.");
if (file_path_ != path) {
file_path_ = path;
}
}
bool MMapWeightCacheProvider::LoadOrStartBuild(const char* path) {
if (Load(path)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache loaded from '%s'.", path);
return true;
} else if (StartBuild(path)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache build for '%s' started.", path);
return true;
}
return false;
}
bool MMapWeightCacheProvider::StartBuild(const char* path) {
SetFilePath(path);
return builder_.Start(path);
}
bool MMapWeightCacheProvider::Load(const std::string& path) {
SetFilePath(path.c_str());
return Load();
}
bool MMapWeightCacheProvider::Load() {
XNNPACK_ABORT_CHECK(!file_path_.empty(),
"Path wasn't provided to weight cache provider.");
mmap_buffer_base_offset_ = 0;
cache_key_to_offset_.clear();
if (!FileExists(file_path_.c_str())) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"XNNPack weight cache: could not load '%s': %s.",
file_path_.c_str(), strerror(errno));
return false;
}
if (!mmap_handle_.Map(file_path_.c_str())) {
return false;
}
ScopeGuard unmap_on_fail([this] { mmap_handle_.UnMap(); });
if (mmap_handle_.size() < sizeof(XNNPackCacheHeader)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: invalid cache file size.");
return false;
}
const XNNPackCacheHeader header = [this] {
XNNPackCacheHeader header;
memcpy(&header, mmap_handle_.data(), sizeof(header));
return header;
}();
if (header.version != XNNPackCacheHeader::kVersion) {
TFLITE_LOG(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache: incompatible header version. Cache "
"needs to be built again.");
return false;
}
if (!xnn_experimental_check_build_identifier(
header.xnnpack_build_identifier,
sizeof(header.xnnpack_build_identifier))) {
TFLITE_LOG(tflite::TFLITE_LOG_VERBOSE,
"XNNPack weight cache: incompatible XNNPack version. Cache "
"needs to be built again.");
return false;
}
if (header.buffer_list_offset >= mmap_handle_.size()) {
TFLITE_LOG_PROD(
tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: invalid offset for buffer list descriptor.");
return false;
}
if (header.buffer_list_size !=
mmap_handle_.size() - header.buffer_list_offset) {
TFLITE_LOG_PROD(
tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: invalid size for buffer list descriptor.");
return false;
}
flatbuffers::Verifier verifier(
mmap_handle_.data() + header.buffer_list_offset, header.buffer_list_size);
if (!cache::schema::VerifyBufferListBuffer(verifier)) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: buffer list validation failed.");
return false;
}
const cache::schema::BufferList* buffer_list = cache::schema::GetBufferList(
mmap_handle_.data() + header.buffer_list_offset);
if (!buffer_list) {
TFLITE_LOG_PROD(
tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: could not get packed weights from flatbuffer.");
return false;
}
mmap_buffer_base_offset_ = buffer_list->base_offset();
if (const auto buffers = buffer_list->buffers(); buffers) {
for (auto* buffer : *buffers) {
if (!buffer) {
TFLITE_LOG_PROD(
tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: Invalid buffer address in buffer list.");
return false;
}
cache_key_to_offset_.emplace(
PackIdentifier{.pack_algorithm_id = buffer->packing_algorithm_id(),
.weights_id = buffer->weights_id(),
.bias_id = buffer->bias_id()},
BufferLocation{.offset = buffer->offset(), .size = buffer->size()});
}
}
unmap_on_fail.Deactivate();
return true;
}
void MMapWeightCacheProvider::MapTensorIdentifiers(
const TfLiteTensor* tensors, const size_t size,
const std::unordered_map<size_t, size_t>& tensor_index_to_identifier) {
for (const auto [index, identifier] : tensor_index_to_identifier) {
XNNPACK_ABORT_CHECK(index < size,
"Tensor index corresponds to a non existing tensor.");
buffer_address_to_identifier_[tensors[index].data.data] = identifier;
}
}
size_t MMapWeightCacheProvider::LookUp(
const xnn_weights_cache_look_up_key* cache_key) {
if (!cache_key) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: a null cache key was provided.");
return SIZE_MAX;
}
const PackIdentifier pack_id = BuildPackIdentifier(*cache_key);
if (auto offset_it = cache_key_to_offset_.find(pack_id);
offset_it != cache_key_to_offset_.end()) {
return offset_it->second.offset;
}
return SIZE_MAX;
}
void* MMapWeightCacheProvider::ReserveSpace(size_t size) {
XNNPACK_ABORT_CHECK(!IsFinalized(),
"Cannot reserve space in a finalized cache.");
return builder_.Reserve(size);
}
size_t MMapWeightCacheProvider::LookUpOrInsert(
const xnn_weights_cache_look_up_key* cache_key, void* ptr, size_t size) {
XNNPACK_ABORT_CHECK(cache_key, "A null cache key was provided.");
const PackIdentifier pack_id = BuildPackIdentifier(*cache_key);
if (auto offset_it = cache_key_to_offset_.find(pack_id);
offset_it != cache_key_to_offset_.end()) {
return offset_it->second.offset;
}
XNNPACK_ABORT_CHECK(!IsFinalized(),
"Cannot insert a buffer in a finalized cache.");
const BufferLocation location = builder_.Append(pack_id, ptr, size);
cache_key_to_offset_.emplace(pack_id, location);
return location.offset;
}
void* MMapWeightCacheProvider::OffsetToAddr(const size_t offset) {
XNNPACK_ABORT_CHECK(
IsFinalized(),
"Cannot get the address of a buffer in a non finalized cache.");
return mmap_handle_.data() + mmap_buffer_base_offset_ + offset;
}
void MMapWeightCacheProvider::Release() {
buffer_address_to_identifier_.clear();
cache_key_to_offset_.clear();
mmap_handle_ = MMapHandle();
mmap_buffer_base_offset_ = 0;
builder_ = WeightCacheBuilder();
}
bool MMapWeightCacheProvider::Finalize() {
if (IsFinalized()) {
return true;
}
if (file_path_.empty()) {
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: file path wasn't set. Cannot "
"finalize the cache.");
return false;
}
if (!builder_.Finalize()) {
return false;
}
builder_ = WeightCacheBuilder();
return Load();
}
bool MMapWeightCacheProvider::IsFinalized() const {
return mmap_handle_.IsMapped();
}
size_t MMapWeightCacheProvider::look_up(
void* context, const xnn_weights_cache_look_up_key* cache_key) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->LookUp(cache_key);
}
void* MMapWeightCacheProvider::reserve_space(void* context, size_t n) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->ReserveSpace(n);
}
size_t MMapWeightCacheProvider::look_up_or_insert(
void* context, const xnn_weights_cache_look_up_key* cache_key, void* ptr,
size_t size) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->LookUpOrInsert(
cache_key, ptr, size);
}
bool MMapWeightCacheProvider::is_finalized(void* context) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->IsFinalized();
}
void* MMapWeightCacheProvider::offset_to_addr(void* context, size_t offset) {
return reinterpret_cast<MMapWeightCacheProvider*>(context)->OffsetToAddr(
offset);
}
enum xnn_status MMapWeightCacheProvider::delete_cache(void* context) {
reinterpret_cast<MMapWeightCacheProvider*>(context)->Release();
return xnn_status_success;
}
PackIdentifier MMapWeightCacheProvider::BuildPackIdentifier(
const xnn_weights_cache_look_up_key& key) {
const auto get_buffer_id = [&](const void* buffer) -> size_t {
if (buffer) {
const auto identifier_it = buffer_address_to_identifier_.find(buffer);
XNNPACK_ABORT_CHECK(identifier_it != buffer_address_to_identifier_.end(), | #include "tensorflow/lite/delegates/xnnpack/weight_cache.h"
#include <fcntl.h>
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <iterator>
#include <map>
#include <ostream>
#include <string>
#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xnnpack.h"
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/weight_cache_schema_generated.h"
namespace tflite::xnnpack {
std::ostream& operator<<(std::ostream& os, const PackIdentifier& p) {
return os << "PackIdentifier{pack_algo: " << p.pack_algorithm_id
<< ", weights_id: " << p.weights_id << ", bias_id: " << p.bias_id
<< "}";
}
namespace {
using testing::ElementsAreArray;
using testing::Ge;
class TempFileDesc {
public:
static constexpr struct AutoClose {
} kAutoCLose{};
#if defined(_MSC_VER)
TempFileDesc() : fd_() {
char filename[L_tmpnam_s];
errno_t err = tmpnam_s(filename, L_tmpnam_s);
if (err) {
fprintf(stderr, "Could not create temporary filename.\n");
std::abort();
}
path_ = filename;
fd_ = open(path_.c_str(), O_CREAT | O_EXCL | O_RDWR, 0644);
if (fd_ < 0) {
fprintf(stderr, "Could not create temporary filename.\n");
std::abort();
}
}
#else
TempFileDesc() : fd_(mkstemp(path_.data())) {
if (GetFd() < 0) {
perror("Could not create temporary file");
}
}
#endif
explicit TempFileDesc(AutoClose) : TempFileDesc() { Close(); }
TempFileDesc(const TempFileDesc&) = delete;
TempFileDesc& operator=(const TempFileDesc&) = delete;
friend void swap(TempFileDesc& a, TempFileDesc& b) {
std::swap(a.path_, b.path_);
std::swap(a.fd_, b.fd_);
}
TempFileDesc(TempFileDesc&& other) { swap(*this, other); }
TempFileDesc& operator=(TempFileDesc&& other) {
swap(*this, other);
return *this;
}
~TempFileDesc() { Close(); }
void Close() {
if (GetFd() >= 0) {
close(fd_);
fd_ = -1;
}
}
const std::string& GetPath() const { return path_; }
const char* GetCPath() const { return path_.c_str(); }
int GetFd() const { return fd_; }
bool IsOpen() const { return fd_ >= 0; }
private:
std::string path_ = testing::TempDir() + "/weight_cache_test_file.XXXXXX";
int fd_ = -1;
};
TEST(MMapHandleTest, DefaultConstructs) {
MMapHandle handle;
EXPECT_FALSE(handle.IsMapped());
EXPECT_EQ(handle.data(), nullptr);
EXPECT_EQ(handle.size(), 0);
}
TEST(MMapHandleTest, MapNonExitxingFileFails) {
const char* file_path = "sdbgfd";
MMapHandle handle;
EXPECT_FALSE(handle.Map(file_path));
}
TEST(MMapHandleTest, MapExistingFileWorks) {
using std::size;
const std::string payload = "This is some data in the file.";
TempFileDesc tmp_file;
ASSERT_TRUE(tmp_file.IsOpen());
ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)),
size(payload));
tmp_file.Close();
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath()));
EXPECT_TRUE(handle.IsMapped());
EXPECT_NE(handle.data(), nullptr);
EXPECT_THAT(handle.size(), Ge(size(payload)));
EXPECT_THAT(handle, ElementsAreArray(payload));
handle.UnMap();
EXPECT_FALSE(handle.IsMapped());
EXPECT_EQ(handle.data(), nullptr);
EXPECT_EQ(handle.size(), 0);
}
TEST(MMapHandleTest, MoveConstructs) {
const std::string payload = "This is some data in the file.";
TempFileDesc tmp_file;
ASSERT_TRUE(tmp_file.IsOpen());
ASSERT_EQ(write(tmp_file.GetFd(), payload.c_str(), size(payload)),
size(payload));
tmp_file.Close();
MMapHandle handle;
ASSERT_TRUE(handle.Map(tmp_file.GetCPath()));
MMapHandle handle2(std::move(handle));
EXPECT_FALSE(handle.IsMapped());
EXPECT_EQ(handle.data(), nullptr);
EXPECT_EQ(handle.size(), 0);
EXPECT_TRUE(handle2.IsMapped());
EXPECT_NE(handle2.data(), nullptr);
EXPECT_THAT(handle2.size(), Ge(size(payload)));
EXPECT_THAT(handle2, ElementsAreArray(payload));
}
TEST(WeightCacheBuilderTest, ReserveAppendWriteWorks) {
using std::size;
const std::string payload = "This is some data in the file.";
const PackIdentifier dummy_id{1, 2, 3};
WeightCacheBuilder builder;
const std::string cache_path = testing::TempDir() + "/cache";
ASSERT_TRUE(builder.Start(cache_path.c_str()));
const size_t payload_size = size(payload);
void* buffer = builder.Reserve(payload_size);
std::memcpy(buffer, payload.c_str(), payload_size);
auto loc = builder.Append(dummy_id, buffer, payload_size);
EXPECT_EQ(loc.size, payload_size);
EXPECT_GE(builder.capacity(), payload_size);
EXPECT_TRUE(builder.ShouldFinalize());
ASSERT_TRUE(builder.Finalize());
MMapHandle handle;
ASSERT_TRUE(handle.Map(cache_path.c_str()));
const XNNPackCacheHeader& header =
*reinterpret_cast<const XNNPackCacheHeader*>(handle.data());
ASSERT_EQ(header.version, XNNPackCacheHeader::kVersion);
ASSERT_NE(header.buffer_list_offset, 0);
ASSERT_NE(header.buffer_list_size, 0);
ASSERT_LE(header.buffer_list_offset + header.buffer_list_size, handle.size());
const cache::schema::BufferList* const packed_weights =
cache::schema::GetBufferList(handle.data() + header.buffer_list_offset);
ASSERT_NE(packed_weights, nullptr);
ASSERT_NE(packed_weights->buffers(), nullptr);
ASSERT_EQ(packed_weights->buffers()->size(), 1);
ASSERT_NE(packed_weights->buffers()->Get(0), nullptr);
ASSERT_EQ(packed_weights->buffers()->Get(0)->size(), size(payload));
ASSERT_EQ(packed_weights->buffers()->Get(0)->packing_algorithm_id(),
dummy_id.pack_algorithm_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->weights_id(),
dummy_id.weights_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->bias_id(), dummy_id.bias_id);
flatbuffers::Verifier verifier(handle.data() + header.buffer_list_offset,
header.buffer_list_size);
EXPECT_TRUE(cache::schema::VerifyBufferListBuffer(verifier));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset(),
size(handle));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset() +
packed_weights->buffers()->Get(0)->size(),
size(handle));
std::tuple<const char*, size_t> cache_data(
reinterpret_cast<const char*>(
handle.data() + packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset()),
packed_weights->buffers()->Get(0)->size());
EXPECT_THAT(cache_data, ElementsAreArray(payload));
}
TEST(WeightCacheBuilderTest, AppendWithoutReserveWriteWorks) {
using std::size;
const std::string payload = "This is some data in the file.";
const PackIdentifier dummy_id{1, 2, 3};
const std::string cache_path = testing::TempDir() + "/cache";
WeightCacheBuilder builder;
ASSERT_TRUE(builder.Start(cache_path.c_str()));
const size_t payload_size = size(payload);
auto loc = builder.Append(dummy_id, payload.c_str(), payload_size);
EXPECT_EQ(loc.size, payload_size);
EXPECT_TRUE(builder.ShouldFinalize());
ASSERT_TRUE(builder.Finalize());
MMapHandle handle;
ASSERT_TRUE(handle.Map(cache_path.c_str()));
const XNNPackCacheHeader& header =
*reinterpret_cast<const XNNPackCacheHeader*>(handle.data());
ASSERT_EQ(header.version, XNNPackCacheHeader::kVersion);
ASSERT_NE(header.buffer_list_offset, 0);
ASSERT_NE(header.buffer_list_size, 0);
ASSERT_LE(header.buffer_list_offset + header.buffer_list_size, handle.size());
const cache::schema::BufferList* const packed_weights =
cache::schema::GetBufferList(handle.data() + header.buffer_list_offset);
ASSERT_NE(packed_weights, nullptr);
ASSERT_NE(packed_weights->buffers(), nullptr);
ASSERT_EQ(packed_weights->buffers()->size(), 1);
ASSERT_NE(packed_weights->buffers()->Get(0), nullptr);
ASSERT_EQ(packed_weights->buffers()->Get(0)->size(), size(payload));
ASSERT_EQ(packed_weights->buffers()->Get(0)->packing_algorithm_id(),
dummy_id.pack_algorithm_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->weights_id(),
dummy_id.weights_id);
ASSERT_EQ(packed_weights->buffers()->Get(0)->bias_id(), dummy_id.bias_id);
flatbuffers::Verifier verifier(handle.data() + header.buffer_list_offset,
header.buffer_list_size);
EXPECT_TRUE(cache::schema::VerifyBufferListBuffer(verifier));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset(),
size(handle));
ASSERT_LE(packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset() +
packed_weights->buffers()->Get(0)->size(),
size(handle));
std::tuple<const char*, size_t> cache_data(
reinterpret_cast<const char*>(
handle.data() + packed_weights->base_offset() +
packed_weights->buffers()->Get(0)->offset()),
packed_weights->buffers()->Get(0)->size());
EXPECT_THAT(cache_data, ElementsAreArray(payload));
}
TEST(WeightCacheBuilderTest, NonExistingPathFails) {
using std::size;
WeightCacheBuilder builder;
EXPECT_FALSE(builder.Start(""));
EXPECT_FALSE(builder.Start("/seldf/sedsft"));
}
struct FakeContext {
void AddTensor(int buffer_identifier, size_t size) {
buffers.emplace_back(size, buffer_identifier);
tensors.push_back({});
tensors.back().allocation_type = kTfLiteMmapRo;
tensor_buffer_identifiers[tensors.size() - 1] = buffer_identifier;
}
void FinalizeTensors() {
for (size_t i = 0; i < tensors.size(); ++i) {
tensors[i].data.data = buffers[i].data();
tensors[i].bytes = buffers[i].size();
}
}
xnn_weights_cache_look_up_key LookUpKey(const uint32_t algorithm_seed,
const int weights_index) const {
return {.seed = algorithm_seed,
.kernel = buffers[weights_index].data(),
.bias = nullptr};
}
xnn_weights_cache_look_up_key LookUpKey(const uint32_t algorithm_seed,
const int weights_index,
const int bias_index) const {
return {.seed = algorithm_seed,
.kernel = buffers[weights_index].data(),
.bias = buffers[bias_index].data()};
}
void AddTensorToPack(std::vector<uint8_t>& pack_buffer, int index) {
const std::vector<uint8_t>& buffer = buffers[index];
pack_buffer.resize(std::max(size(pack_buffer), size(buffer)));
for (size_t i = 0; i < size(buffer); ++i) {
pack_buffer[i] ^= buffer[i];
}
}
template <class... Ids>
PackIdentifier PackTensors(xnn_weights_cache_t weight_cache,
const uint32_t algorithm_seed,
const Ids... tensor_indices) {
PackIdentifier pack_id{algorithm_seed,
tensor_buffer_identifiers[tensor_indices]...};
PackedBuffer& packed =
packed_buffers.emplace(pack_id, PackedBuffer{})->second;
(AddTensorToPack(packed.buffer, tensor_indices), ...);
xnn_weights_cache_look_up_key look_up_key =
LookUpKey(algorithm_seed, tensor_indices...);
packed.offset = weight_cache->look_up_or_insert(
weight_cache->context, &look_up_key, packed.buffer.data(),
packed.buffer.size());
return pack_id;
}
struct PackedBuffer {
size_t offset;
std::vector<uint8_t> buffer;
};
std::vector<TfLiteTensor> tensors;
std::vector<std::vector<uint8_t>> buffers;
std::unordered_multimap<PackIdentifier, PackedBuffer, PackIdentifier::Hash>
packed_buffers;
std::unordered_map<size_t, size_t> tensor_buffer_identifiers;
};
struct BuildMMapWeightCacheProviderTest : testing::Test {
enum { kAlgoSeed1, kAlgoSeed2, kAlgoSeed3 };
enum { kBufferId1, kBufferId2, kBufferId3, kBufferId4 };
void SetUp() override {
AddTensors();
EndSetup();
}
void AddTensors() {
ctx.AddTensor(kBufferId1, 12);
ctx.AddTensor(kBufferId2, 43);
ctx.AddTensor(kBufferId3, 64);
ctx.AddTensor(kBufferId4, 8);
}
void EndSetup() {
ctx.FinalizeTensors();
cache_provider.MapTensorIdentifiers(ctx.tensors.data(), ctx.tensors.size(),
ctx.tensor_buffer_identifiers);
const std::string cache_path = testing::TempDir() + "/cache";
ASSERT_TRUE(cache_provider.StartBuild(cache_path.c_str()));
}
FakeContext ctx;
MMapWeightCacheProvider cache_provider;
};
TEST_F(BuildMMapWeightCacheProviderTest, LookUpFailsIfKeyDoesntMatch) {
xnn_weights_cache_look_up_key look_up_key{};
EXPECT_EQ(cache_provider.LookUp(&look_up_key), SIZE_MAX);
}
TEST_F(BuildMMapWeightCacheProviderTest, LookUpSucceeds) {
enum { kWeightIndex, kBiasIndex };
const auto pack_id = ctx.PackTensors(&cache_provider.GetCacheProvider(),
kAlgoSeed1, kWeightIndex, kBiasIndex);
const xnn_weights_cache_look_up_key look_up_key =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex, kBiasIndex);
EXPECT_EQ(cache_provider.LookUp(&look_up_key),
ctx.packed_buffers.find(pack_id)->second.offset);
}
TEST_F(BuildMMapWeightCacheProviderTest,
DifferentAlgoSeedsSameTensorsDontConflict) {
enum { kWeightIndex, kBiasIndex };
const auto pack_id_1 = ctx.PackTensors(&cache_provider.GetCacheProvider(),
kAlgoSeed1, kWeightIndex, kBiasIndex);
const auto pack_id_2 = ctx.PackTensors(&cache_provider.GetCacheProvider(),
kAlgoSeed2, kWeightIndex, kBiasIndex);
const xnn_weights_cache_look_up_key look_up_key_1 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex, kBiasIndex);
const xnn_weights_cache_look_up_key look_up_key_2 =
ctx.LookUpKey(kAlgoSeed2, kWeightIndex, kBiasIndex);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_1),
ctx.packed_buffers.find(pack_id_1)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_2),
ctx.packed_buffers.find(pack_id_2)->second.offset);
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_2));
}
TEST_F(BuildMMapWeightCacheProviderTest,
SameAlgoSeedDifferentTensorsDontConflict) {
enum { kWeightIndex1, kWeightIndex2, kBiasIndex1, kBiasIndex2 };
const auto pack_id_1 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex1, kBiasIndex1);
const auto pack_id_2 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex2, kBiasIndex1);
const auto pack_id_3 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex1, kBiasIndex2);
const auto pack_id_4 =
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex2, kBiasIndex2);
const xnn_weights_cache_look_up_key look_up_key_1 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex1);
const xnn_weights_cache_look_up_key look_up_key_2 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex2, kBiasIndex1);
const xnn_weights_cache_look_up_key look_up_key_3 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex2);
const xnn_weights_cache_look_up_key look_up_key_4 =
ctx.LookUpKey(kAlgoSeed1, kWeightIndex2, kBiasIndex2);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_1),
ctx.packed_buffers.find(pack_id_1)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_2),
ctx.packed_buffers.find(pack_id_2)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_3),
ctx.packed_buffers.find(pack_id_3)->second.offset);
EXPECT_EQ(cache_provider.LookUp(&look_up_key_4),
ctx.packed_buffers.find(pack_id_4)->second.offset);
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_2));
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_3));
EXPECT_NE(cache_provider.LookUp(&look_up_key_1),
cache_provider.LookUp(&look_up_key_4))
<< pack_id_1 << " " << pack_id_4;
EXPECT_NE(cache_provider.LookUp(&look_up_key_2),
cache_provider.LookUp(&look_up_key_3));
EXPECT_NE(cache_provider.LookUp(&look_up_key_2),
cache_provider.LookUp(&look_up_key_4));
EXPECT_NE(cache_provider.LookUp(&look_up_key_3),
cache_provider.LookUp(&look_up_key_4));
}
TEST_F(BuildMMapWeightCacheProviderTest, FinalizeWorks) {
enum { kWeightIndex1, kBiasIndex, kWeightIndex2 };
TempFileDesc tmp_file(TempFileDesc::kAutoCLose);
ASSERT_TRUE(cache_provider.StartBuild(tmp_file.GetCPath()));
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1, kWeightIndex1,
kBiasIndex);
ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed2,
kWeightIndex2);
EXPECT_TRUE(cache_provider.IsActive());
EXPECT_TRUE(cache_provider.IsBuilding());
ASSERT_TRUE(cache_provider.Finalize());
ASSERT_TRUE(cache_provider.IsFinalized());
}
struct LoadMMapWeightCacheProviderTest : BuildMMapWeightCacheProviderTest {
enum { kWeightIndex1, kBiasIndex, kWeightIndex2 };
void SetUp() override {
BuildMMapWeightCacheProviderTest::SetUp();
ASSERT_TRUE(cache_provider.StartBuild(tmp_file.GetCPath()));
pack_id_1 = ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed1,
kWeightIndex1, kBiasIndex);
pack_id_2 = ctx.PackTensors(&cache_provider.GetCacheProvider(), kAlgoSeed2,
kWeightIndex2);
ASSERT_TRUE(cache_provider.Finalize());
ASSERT_TRUE(cache_provider.IsFinalized());
}
xnn_weights_cache_look_up_key LookUpKey1() const {
return ctx.LookUpKey(kAlgoSeed1, kWeightIndex1, kBiasIndex);
}
xnn_weights_cache_look_up_key LookUpKey2() const {
return ctx.LookUpKey(kAlgoSeed2, kWeightIndex2);
}
TempFileDesc tmp_file;
PackIdentifier pack_id_1;
PackIdentifier pack_id_2;
};
TEST_F(LoadMMapWeightCacheProviderTest, LookUpFailsIfKeyDoesntMatch) {
xnn_weights_cache_look_up_key look_up_key{};
EXPECT_EQ(cache_provider.LookUp(&look_up_key), SIZE_MAX);
}
template <class T>
class LightSpan {
public:
using value_type = T;
LightSpan(const void* data, const size_t size)
: ptr_(reinterpret_cast<T*>(data)), size_(size) {}
size_t size() const { return size(); }
const T* begin() const { return ptr_; }
const T* end() const { return ptr_ + size_; }
friend std::ostream& operator<<(std::ostream& os, const LightSpan<T>& s) {
os << '[';
auto it = s.begin();
if (it != s.end()) {
os << +*it;
}
++it;
for (; it != s.end(); ++it) {
os << ", " << +*it;
}
return os << ']';
}
private:
T* ptr_;
size_t size_;
};
TEST_F(LoadMMapWeightCacheProviderTest, LookUpSucceeds) {
const auto& reference_1 = ctx.packed_buffers.find(pack_id_1)->second;
const auto& reference_2 = ctx.packed_buffers.find(pack_id_2)->second;
const xnn_weights_cache_look_up_key look_up_key_1 = LookUpKey1();
const xnn_weights_cache_look_up_key look_up_key_2 = LookUpKey2();
const uint64_t offset_1 = cache_provider.LookUp(&look_up_key_1);
const uint64_t offset_2 = cache_provider.LookUp(&look_up_key_2);
ASSERT_EQ(offset_1, reference_1.offset);
ASSERT_EQ(offset_2, reference_2.offset);
const void* const addr_1 = cache_provider.OffsetToAddr(offset_1);
const void* const addr_2 = cache_provider.OffsetToAddr(offset_2);
ASSERT_NE(addr_1, nullptr);
ASSERT_NE(addr_2, nullptr);
EXPECT_THAT(LightSpan<const uint8_t>(addr_1, reference_1.buffer.size()),
ElementsAreArray(reference_1.buffer));
EXPECT_THAT(LightSpan<const uint8_t>(addr_2, reference_2.buffer.size()),
ElementsAreArray(reference_2.buffer));
}
TEST(MMapWeightCacheProviderTest, XnnpackCApiJourney) {
using std::size;
TempFileDesc temp_fd(TempFileDesc::kAutoCLose);
const int32_t fake_packing_algo_seed = 0xBA0BAB;
const char packed_data_ref_1[] = "abcdefghij";
const char packed_data_ref_2[] = "klmnopqr";
auto bytes = [](const auto& array) { return size(array) * sizeof(array[0]); };
constexpr int kBufferCount = 10;
char fake_buffer_pointer[kBufferCount] = {0};
{
TfLiteTensor tensors[kBufferCount];
std::unordered_map<size_t, size_t> tensor_buffer_identifiers;
for (int i = 0; i < kBufferCount; ++i) {
tensors[i].data.data = (void*)(fake_buffer_pointer + i);
tensor_buffer_identifiers[i] = i;
}
MMapWeightCacheProvider cache_provider;
ASSERT_TRUE(cache_provider.StartBuild(temp_fd.GetCPath()));
xnn_weights_cache_t cache = &cache_provider.GetCacheProvider();
cache_provider.MapTensorIdentifiers(tensors, size(tensors),
tensor_buffer_identifiers);
const xnn_weights_cache_look_up_key look_up_key_1{
.seed = fake_packing_algo_seed,
.kernel = tensors[0].data.data,
.bias = tensors[1].data.data};
ASSERT_EQ(cache->look_up(cache, &look_up_key_1), SIZE_MAX);
void* const reserved_ptr =
cache->reserve_space(cache, bytes(packed_data_ref_1));
ASSERT_NE(reserved_ptr, nullptr);
std::memcpy(reserved_ptr, packed_data_ref_1, bytes(packed_data_ref_1));
const size_t build_offset_1 = cache->look_up_or_insert(
cache, &look_up_key_1, reserved_ptr, bytes(packed_data_ref_1));
const size_t build_offset_redundant = cache->look_up_or_insert(
cache, &look_up_key_1, reserved_ptr, bytes(packed_data_ref_1));
EXPECT_EQ(build_offset_1, build_offset_redundant);
ASSERT_EQ(cache->look_up(cache, &look_up_key_1), build_offset_1);
const xnn_weights_cache_look_up_key look_up_key_2{
.seed = fake_packing_algo_seed,
.kernel = tensors[2].data.data,
.bias = tensors[3].data.data};
const size_t build_offset_2 = cache->look_up_or_insert(
cache, &look_up_key_2, (void*)packed_data_ref_2,
bytes(packed_data_ref_2));
ASSERT_TRUE(cache_provider.Finalize());
ASSERT_TRUE(cache->is_finalized(cache));
const size_t reload_offset_1 = cache->look_up(cache, &look_up_key_1);
ASSERT_EQ(reload_offset_1, build_offset_1);
const void* const loaded_packed_data_1 =
cache->offset_to_addr(cache, reload_offset_1);
ASSERT_NE(loaded_packed_data_1, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_1, size(packed_data_ref_1)),
ElementsAreArray(packed_data_ref_1));
const size_t reload_offset_2 = cache->look_up(cache, &look_up_key_2);
ASSERT_EQ(reload_offset_2, build_offset_2);
const void* const loaded_packed_data_2 =
cache->offset_to_addr(cache, reload_offset_2);
ASSERT_NE(loaded_packed_data_2, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_2, size(packed_data_ref_2)),
ElementsAreArray(packed_data_ref_2));
}
{
TfLiteTensor tensors[kBufferCount];
std::unordered_map<size_t, size_t> tensor_buffer_identifiers;
for (int i = 0; i < kBufferCount; ++i) {
tensors[i].data.data = (void*)(fake_buffer_pointer + i);
tensor_buffer_identifiers[i] = i;
}
MMapWeightCacheProvider cache_provider;
ASSERT_TRUE(cache_provider.Load(temp_fd.GetCPath()));
xnn_weights_cache_t cache = &cache_provider.GetCacheProvider();
cache_provider.MapTensorIdentifiers(tensors, size(tensors),
tensor_buffer_identifiers);
const xnn_weights_cache_look_up_key look_up_key_1{
.seed = fake_packing_algo_seed,
.kernel = tensors[0].data.data,
.bias = tensors[1].data.data};
const xnn_weights_cache_look_up_key look_up_key_2{
.seed = fake_packing_algo_seed,
.kernel = tensors[2].data.data,
.bias = tensors[3].data.data};
ASSERT_TRUE(cache->is_finalized(cache));
const size_t offset_1 = cache->look_up(cache, &look_up_key_1);
const void* const loaded_packed_data_1 =
cache->offset_to_addr(cache, offset_1);
ASSERT_NE(loaded_packed_data_1, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_1, size(packed_data_ref_1)),
ElementsAreArray(packed_data_ref_1));
const size_t offset_2 = cache->look_up(cache, &look_up_key_2);
ASSERT_NE(offset_2, SIZE_MAX);
const void* const loaded_packed_data_2 =
cache->offset_to_addr(cache, offset_2);
ASSERT_NE(loaded_packed_data_2, nullptr);
EXPECT_THAT(
LightSpan<const char>(loaded_packed_data_2, size(packed_data_ref_2)),
ElementsAreArray(packed_data_ref_2));
}
}
}
} |
984 | cpp | tensorflow/tensorflow | nnapi_delegate | tensorflow/lite/delegates/nnapi/nnapi_delegate.cc | tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_NNAPI_NNAPI_DELEGATE_H_
#define TENSORFLOW_LITE_DELEGATES_NNAPI_NNAPI_DELEGATE_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/serialization.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
struct NnApiSLDriverImplFL5;
struct NnapiDelegateVendorPlugin;
typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
namespace tflite {
namespace delegate {
namespace nnapi {
class NNAPIDelegateKernel;
}
}
using tflite::delegate::nnapi::NNAPIDelegateKernel;
class StatefulNnApiDelegate : public TfLiteDelegate {
public:
struct Options {
enum ExecutionPreference {
kUndefined = -1,
kLowPower = 0,
kFastSingleAnswer = 1,
kSustainedSpeed = 2,
};
ExecutionPreference execution_preference = kUndefined;
const char* accelerator_name = nullptr;
const char* cache_dir = nullptr;
const char* model_token = nullptr;
bool disallow_nnapi_cpu = true;
int max_number_delegated_partitions = 3;
bool allow_fp16 = false;
int execution_priority = ANEURALNETWORKS_PRIORITY_DEFAULT;
uint64_t max_compilation_timeout_duration_ns = 0;
uint64_t max_execution_timeout_duration_ns = 0;
uint64_t max_execution_loop_timeout_duration_ns = 0;
bool allow_dynamic_dimensions = false;
bool use_burst_computation = false;
uint32_t max_execution_cache_size = 4;
std::map<int, size_t> tensor_max_size_hints;
const char* vendor_compilation_hints = nullptr;
const char* vendor_execution_hints = nullptr;
NnapiDelegateVendorPlugin* vendor_plugin = nullptr;
bool disable_debugging_diagnostics_callbacks = false;
};
StatefulNnApiDelegate();
explicit StatefulNnApiDelegate(const NnApi* nnapi);
explicit StatefulNnApiDelegate(Options options);
StatefulNnApiDelegate(const NnApi* nnapi, Options options);
StatefulNnApiDelegate(
const NnApiSLDriverImplFL5* nnapi_support_library_driver,
Options options);
~StatefulNnApiDelegate() = default;
static const Options GetOptions(TfLiteDelegate* delegate);
typedef TfLiteStatus (*CopyToHostTensorFnPtr)(TfLiteTensor* tensor,
ANeuralNetworksMemory* memory,
size_t memory_offset,
size_t byte_size,
void* callback_context);
struct MemoryRegistration {
ANeuralNetworksMemory* memory;
CopyToHostTensorFnPtr callback;
void* callback_context;
uint64_t timestamp;
};
TfLiteBufferHandle RegisterNnapiMemory(ANeuralNetworksMemory* memory,
CopyToHostTensorFnPtr callback,
void* callback_context);
static const std::vector<MemoryRegistration>& GetTensorMemoryMap(
TfLiteDelegate* delegate);
static delegates::Serialization* GetCache(TfLiteDelegate* delegate);
int GetNnApiErrno() const;
private:
struct Data {
const NnApi* nnapi;
Options::ExecutionPreference execution_preference;
std::string accelerator_name;
std::string cache_dir;
std::string model_token;
bool disallow_nnapi_cpu;
std::vector<MemoryRegistration> tensor_memory_map;
uint64_t next_buffer_handle_timestamp = 1;
int nnapi_errno = ANEURALNETWORKS_NO_ERROR;
std::unordered_map<int, NNAPIDelegateKernel*> delegate_state_cache;
int max_number_delegated_partitions;
bool allow_fp16;
int execution_priority = ANEURALNETWORKS_PRIORITY_DEFAULT;
uint64_t max_compilation_timeout_duration_ns = 0;
uint64_t max_execution_timeout_duration_ns = 0;
uint64_t max_execution_loop_timeout_duration_ns = 0;
bool allow_dynamic_dimensions = false;
bool use_burst_computation = false;
uint32_t max_execution_cache_size = 4;
std::map<int, size_t> tensor_max_size_hints;
const char* vendor_compilation_hints = nullptr;
const char* vendor_execution_hints = nullptr;
NnapiDelegateVendorPlugin* vendor_plugin = nullptr;
std::unique_ptr<const NnApi> owned_nnapi = nullptr;
std::unique_ptr<delegates::Serialization> cache;
bool disable_debugging_diagnostics_callbacks = false;
explicit Data(const NnApi* nnapi);
explicit Data(std::unique_ptr<const NnApi> nnapi);
~Data();
void CacheDelegateKernel(const TfLiteDelegateParams* delegate_params,
NNAPIDelegateKernel* delegate_state);
NNAPIDelegateKernel* MaybeGetCachedDelegateKernel(
const TfLiteDelegateParams* delegate_params);
};
static TfLiteStatus DoPrepare(TfLiteContext* context,
TfLiteDelegate* delegate);
static TfLiteStatus DoCopyFromBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor);
static TfLiteStatus DoCopyToBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor);
static void DoFreeBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle* handle);
static TfLiteStatus GetNodesSupportedByAccelerator(
TfLiteContext* context, TfLiteDelegate* delegate, const NnApi* nnapi,
const std::vector<int>& supported_nodes,
std::vector<int>* device_supported_nodes, int* num_partitions,
TfLiteDelegateParams** params_array, int* nnapi_errno);
static TfLiteStatus LimitDelegatedPartitions(
int max_partitions,
std::vector<TfLiteDelegateParams> partition_params_array,
std::vector<int>* nodes_to_delegate);
void StatefulNnApiDelegateConstructorImpl(const Options& options);
Data delegate_data_;
};
TfLiteDelegate* NnApiDelegate();
}
#endif
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include <algorithm>
#include <cinttypes>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <iostream>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/delegates/serialization.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif
#if defined __ANDROID__ || defined __unix__
#define TFLITE_NNAPI_ALLOW_MMAP_SHARING
#include <sys/mman.h>
#include <unistd.h>
#endif
#include "fp16.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include "tensorflow/lite/nnapi/nnapi_util.h"
#include "tensorflow/lite/util.h"
#ifdef NNAPI_VERBOSE_VALIDATION
#include "tensorflow/lite/schema/schema_generated.h"
#endif
#include <farmhash.h>
namespace tflite {
namespace {
static const char kNnapiId[] = "nnapi_";
constexpr uint64_t kNoMemoryTimestamp = 0;
std::string NnApiBackendId(
const StatefulNnApiDelegate::Options& delegate_options) {
std::string delegate_id = kNnapiId;
if (delegate_options.accelerator_name) {
delegate_id += delegate_options.accelerator_name;
}
return delegate_id;
}
std::string NnApiErrorDescription(int error_code) {
switch (error_code) {
case ANEURALNETWORKS_NO_ERROR:
return "ANEURALNETWORKS_NO_ERROR";
case ANEURALNETWORKS_OUT_OF_MEMORY:
return "ANEURALNETWORKS_OUT_OF_MEMORY";
case ANEURALNETWORKS_INCOMPLETE:
return "ANEURALNETWORKS_INCOMPLETE";
case ANEURALNETWORKS_UNEXPECTED_NULL:
return "ANEURALNETWORKS_UNEXPECTED_NULL";
case ANEURALNETWORKS_BAD_DATA:
return "ANEURALNETWORKS_BAD_DATA";
case ANEURALNETWORKS_OP_FAILED:
return "ANEURALNETWORKS_OP_FAILED";
case ANEURALNETWORKS_BAD_STATE:
return "ANEURALNETWORKS_BAD_STATE";
case ANEURALNETWORKS_UNMAPPABLE:
return "ANEURALNETWORKS_UNMAPPABLE";
case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
return "ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE";
case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
return "ANEURALNETWORKS_UNAVAILABLE_DEVICE";
case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
return "ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT";
case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
return "ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT";
case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
return "ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT";
case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
return "ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT";
case ANEURALNETWORKS_DEAD_OBJECT:
return "ANEURALNETWORKS_DEAD_OBJECT";
default:
return "Unknown NNAPI error code: " + std::to_string(error_code);
}
}
#define RETURN_TFLITE_ERROR_IF_NN_ERROR(context, code, call_desc, p_errno) \
do { \
const auto _code = (code); \
const auto _call_desc = (call_desc); \
if (_code != ANEURALNETWORKS_NO_ERROR) { \
const auto error_desc = NnApiErrorDescription(_code); \
TF_LITE_KERNEL_LOG(context, \
"NN API returned error %s at line %d while %s.\n", \
error_desc.c_str(), __LINE__, _call_desc); \
*p_errno = _code; \
return kTfLiteError; \
} \
} while (0)
#define RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(context, code, call_desc, \
p_tensor, p_errno) \
do { \
const auto _code = (code); \
const auto _call_desc = (call_desc); \
if (_code != ANEURALNETWORKS_NO_ERROR) { \
const auto error_desc = NnApiErrorDescription(_code); \
TF_LITE_KERNEL_LOG(context, \
"NN API returned error %s at line %d while %s " \
"for tensor '%s'.\n", \
error_desc.c_str(), __LINE__, _call_desc, \
(p_tensor)->name ? (p_tensor)->name : "no-name"); \
*p_errno = _code; \
return kTfLiteError; \
} \
} while (0)
bool IsFloat(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
return true;
default:
return false;
}
}
bool IsFloatOrUInt8(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
return true;
default:
return false;
}
}
bool IsQuantized(TfLiteType type) {
switch (type) {
case kTfLiteUInt8:
case kTfLiteInt8:
return true;
default:
return false;
}
}
bool IsInt32(TfLiteType type) {
switch (type) {
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsFloatOrQuantized(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
return true;
default:
return false;
}
}
bool IsFloatOrInt32(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsFloatQuantizedOrInt32(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsScalarInputSupported(int builtin_code) {
switch (builtin_code) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinMul:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinDiv:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinNotEqual:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinPow:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinPrelu:
case kTfLiteBuiltinLeakyRelu:
return true;
default:
return false;
}
}
bool NeedInt8Conversion(const TfLiteContext* context, int builtin_code,
const TfLiteNode* node) {
const int input_id = node->inputs->data[0];
const TfLiteType input_type = context->tensors[input_id].type;
switch (builtin_code) {
case kTfLiteBuiltinConv2d:
case kTfLiteBuiltinDepthwiseConv2d:
case kTfLiteBuiltinFullyConnected: {
if (input_type == kTfLiteInt8) {
const int weights_id = node->inputs->data[1];
const auto& weights_tensor = context->tensors[weights_id];
if ((weights_tensor.type == kTfLiteInt8 ||
weights_tensor.type == kTfLiteUInt8) &&
weights_tensor.quantization.type == kTfLiteAffineQuantization) {
return true;
}
}
return false;
}
case kTfLiteBuiltinTransposeConv: {
const int input_id = 2;
const TfLiteType input_type = context->tensors[input_id].type;
if (in | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include <sys/mman.h>
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_plugin.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
MATCHER(QuantizedNear, "") {
const int diff = abs(std::get<0>(arg) - std::get<1>(arg));
if (diff > 1) {
*result_listener << "Quantized values can be at most off by one: " << diff;
return false;
}
return true;
}
class SingleOpModelWithNNAPI : public SingleOpModel {
public:
SingleOpModelWithNNAPI() { options_.disallow_nnapi_cpu = false; }
~SingleOpModelWithNNAPI() { stateful_delegate_.reset(); }
explicit SingleOpModelWithNNAPI(
const StatefulNnApiDelegate::Options& options) {
options_ = options;
options_.disallow_nnapi_cpu = false;
}
TfLiteStatus ResizeInputTensor(int tensor_index,
const std::vector<int>& dims) {
return interpreter_->ResizeInputTensor(tensor_index, dims);
}
StatefulNnApiDelegate* GetDelegate() { return stateful_delegate_.get(); }
void SetBufferHandle(int index, TfLiteBufferHandle handle) {
interpreter_->SetBufferHandle(index, handle, stateful_delegate_.get());
}
void MarkInputTensorDataStale(int index) {
interpreter_->tensor(index)->data_is_stale = true;
}
TfLiteStatus AllocateTensors() { return interpreter_->AllocateTensors(); }
void SetTensorMaxSize(uint32_t tensor_index, size_t max_size) {
options_.tensor_max_size_hints.emplace(tensor_index, max_size);
}
void ApplyNNAPIDelegate() {
stateful_delegate_ = std::make_unique<StatefulNnApiDelegate>(options_);
SetDelegate(stateful_delegate_.get());
ApplyDelegate();
}
protected:
void SetData(int index, TensorType type, const std::vector<float>& data) {
switch (type) {
case TensorType_FLOAT32:
PopulateTensor(index, data);
break;
case TensorType_INT32:
QuantizeAndPopulate<int32_t>(index, data);
break;
case TensorType_UINT8:
QuantizeAndPopulate<uint8_t>(index, data);
break;
case TensorType_INT8:
QuantizeAndPopulate<int8_t>(index, data);
break;
default:
FAIL() << "Type not supported: " << type;
break;
}
}
void GetData(int index, TensorType type, std::vector<float>* output) {
switch (type) {
case TensorType_FLOAT32:
*output = ExtractVector<float>(index);
break;
case TensorType_UINT8:
*output = Dequantize<uint8_t>(ExtractVector<uint8_t>(index),
GetScale(index), GetZeroPoint(index));
break;
default:
FAIL() << "Type not supported: " << type;
break;
}
}
void BuildInterpreterWithNNAPI(std::vector<std::vector<int>> input_shapes,
bool allow_fp32_relax_to_fp16 = false,
bool apply_delegate = true) {
BuildInterpreter(input_shapes, -1, allow_fp32_relax_to_fp16,
false, true);
if (apply_delegate) {
ApplyNNAPIDelegate();
}
}
private:
StatefulNnApiDelegate::Options options_;
std::unique_ptr<StatefulNnApiDelegate> stateful_delegate_;
};
class FloatAddOpModel : public SingleOpModelWithNNAPI {
public:
FloatAddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false) {
Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16);
}
FloatAddOpModel(const StatefulNnApiDelegate::Options& options,
const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false)
: SingleOpModelWithNNAPI(options) {
Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16);
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
private:
void Init(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)},
allow_fp32_relax_to_fp16);
}
};
TEST(NNAPIDelegate, AddWithNoActivation) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, AddScalarWithNoActivation) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.3, 0.8, 0.8}));
}
TEST(NNAPIDelegate, AddWithNoActivationRelaxed) {
FloatAddOpModel m(
{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE, true);
m.PopulateTensor<float>(m.input1(), {-2.0, -1.0, 1.0, 2.0});
m.PopulateTensor<float>(m.input2(), {1.0, 2.0, 3.0, 4.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.0, 1.0, 4.0, 6.0}));
}
TEST(NNAPIDelegate, AddWithRelu) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_RELU);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0.0, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, ResizeInputTensorsWorks) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 1.3, 1.1, 1.5}));
}
TEST(NNAPIDelegate, ResizeDynamicBatchInputTensorsWorks) {
StatefulNnApiDelegate::Options options;
options.allow_dynamic_dimensions = true;
options.max_execution_cache_size = 1;
FloatAddOpModel m(options,
{TensorType_FLOAT32, {1, 3, 2, 1}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
{TensorType_FLOAT32, {1, 3, 2, 1}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
{TensorType_FLOAT32, {}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
ActivationFunctionType_NONE);
auto RunTestCase1 = [&m]() {
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
};
auto RunTestCase2 = [&m]() {
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 1.3, 1.1, 1.5}));
};
RunTestCase1();
RunTestCase1();
RunTestCase2();
RunTestCase1();
}
TEST(NNAPIDelegate, StatefulDelegate) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithAcceleratorName) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.accelerator_name = "nnapi-reference";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithInvalidAcceleratorName) {
if (!NnApiImplementation()->ANeuralNetworksDevice_getName) {
GTEST_SKIP();
}
testing::internal::CaptureStderr();
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.accelerator_name = "foo";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
EXPECT_THAT(testing::internal::GetCapturedStderr(),
testing::HasSubstr(
"Could not find the specified NNAPI accelerator: foo"));
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithCompilationCaching) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.cache_dir = "/data/local/tmp";
options.model_token = "NNAPIDelegate.StatefulDelegateWithCompilationCaching";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithQoS) {
StatefulNnApiDelegate::Options options;
options.accelerator_name = "nnapi-reference";
options.execution_priority = ANEURALNETWORKS_PRIORITY_HIGH;
options.max_compilation_timeout_duration_ns = UINT64_MAX;
options.max_execution_timeout_duration_ns = UINT64_MAX;
options.max_execution_loop_timeout_duration_ns = UINT64_MAX;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, DISABLED_StatefulDelegateWithBufferHandles) {
if (!NnApiImplementation()->ASharedMemory_create ||
!NnApiImplementation()->ANeuralNetworksMemory_createFromFd) {
GTEST_SKIP();
}
StatefulNnApiDelegate::Options options;
options.disallow_nnapi_cpu = false;
options.max_execution_cache_size = 2;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
auto* delegate = m.GetDelegate();
constexpr auto kInput1ByteSize = 4 * sizeof(float);
ANeuralNetworksMemory* input1_memory = nullptr;
int fd =
NnApiImplementation()->ASharedMemory_create("input1", kInput1ByteSize);
EXPECT_GE(fd, 0);
void* input1_memory_data =
mmap(nullptr, kInput1ByteSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
EXPECT_TRUE(input1_memory_data != nullptr);
float input1_data[] = {-2.0, 0.2, 0.7, 0.8};
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
int result = NnApiImplementation()->ANeuralNetworksMemory_createFromFd(
kInput1ByteSize, PROT_READ, fd, 0, &input1_memory);
EXPECT_EQ(result, ANEURALNETWORKS_NO_ERROR);
ASSERT_NE(input1_memory, nullptr);
struct DummyMemoryContext {
ANeuralNetworksMemory* memory_handle;
void* memory_data;
size_t byte_size;
};
DummyMemoryContext memory_context = {input1_memory, input1_memory_data,
kInput1ByteSize};
static StatefulNnApiDelegate::CopyToHostTensorFnPtr memory_callback =
[](TfLiteTensor* tensor, ANeuralNetworksMemory* memory,
size_t memory_offset, size_t byte_size,
void* callback_context) -> TfLiteStatus {
auto memory_context =
reinterpret_cast<DummyMemoryContext*>(callback_context);
if (memory != memory_context->memory_handle ||
memory_offset + byte_size > memory_context->byte_size) {
return kTfLiteError;
}
memcpy(
tensor->data.raw,
reinterpret_cast<uint8_t*>(memory_context->memory_data) + memory_offset,
byte_size);
return kTfLiteOk;
};
auto input1_handle = delegate->RegisterNnapiMemory(
input1_memory, memory_callback, &memory_context);
m.SetBufferHandle(m.input1(), input1_handle);
m.MarkInputTensorDataStale(m.input1());
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
for (int i = 0; i < 10; i++) {
input1_data[0] = -2.0 + i;
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
m.MarkInputTensorDataStale(m.input1());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
}
for (int i = 0; i < 10; i++) {
input1_data[0] = -2.0 + i;
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
auto input1_handle = delegate->RegisterNnapiMemory(
input1_memory, memory_callback, &memory_context);
m.SetBufferHandle(m.input1(), input1_handle);
m.MarkInputTensorDataStale(m.input1());
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
}
}
class FloatMulOpModel : public SingleOpModelWithNNAPI {
public:
FloatMulOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MUL, BuiltinOptions_MulOptions,
CreateMulOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(NNAPIDelegate, MulWithNoActivation) {
FloatMulOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4})));
}
class FloatPoolingOpModel : public SingleOpModelWithNNAPI {
public:
FloatPoolingOpModel(BuiltinOperator type, const TensorData& input,
int filter_width, int filter_height,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
type, BuiltinOptions_Pool2DOptions,
CreatePool2DOptions(builder_, Padding_VALID, 2, 2, filter_width,
filter_height, ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
};
TEST(NNAPIDelegate, AveragePoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_AVERAGE_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2.75, 5.75}));
}
TEST(NNAPIDelegate, MaxPoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_MAX_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10}));
}
TEST(NNAPIDelegate, L2PoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_L2_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3.5, 6.5}));
}
class ConvolutionOpModel : public SingleOpModelWithNNAPI {
public:
ConvolutionOpModel(
const TensorData& input, const TensorData& filter,
const TensorData& output, int stride_width = 2, int stride_height = 2,
enum Padding padding = Padding_VALID,
enum ActivationFunctionType activation = ActivationFunctionType_NONE,
int dilation_width_factor = 1, int dilation_height_factor = 1)
: input_type_(input.type), filter_type_(filter.type) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[0];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(
builder_, padding, stride_width, stride_height, activation,
dilation_width_factor, dilation_height_factor)
.Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetFilter(std::initializer_list<float> data) {
SetData(filter_, filter_type_, data);
}
void SetBias(std::initializer_list<float> data) {
const auto bias_type =
(input_type_ == TensorType_FLOAT32) ? input_type_ : TensorType_INT32;
SetData(bias_, bias_type, data);
}
std::vector<float> GetOutput() {
if (input_type_ == TensorType_FLOAT32) {
return ExtractVector<float>(output_);
} else {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
}
std::vector<uint8_t> GetQuantizedOutput() {
if (input_type_ == TensorType_FLOAT32) {
return {};
} else {
return ExtractVector<uint8_t>(output_);
}
}
protected:
int input_;
int filter_;
int bias_;
int output_;
const TensorType input_type_;
const TensorType filter_type_;
};
TEST(ConvolutionOpTest, SimpleTestQuantized) {
ConvolutionOpModel m({TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
},
1e-5)));
EXPECT_THAT(m.GetQuantizedOutput(), ElementsAreArray({
145, 129, 132,
145, 129, 132,
144, 131, 130,
164, 131, 130,
}));
}
TEST(ConvolutionOpTest, SimpleTestQuantizedGrouped) {
ConvolutionOpModel m({TensorType_UINT8, {2, 2, 2, 2}, -63.5, 64},
{TensorType_UINT8, {2, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 2,
23, 6
},
1e-5)));
EXPECT_THAT(m.GetQuantizedOutput(), ElementsAreArray({
145, 129,
150, 133,
}));
}
TEST(ConvolutionOpTest, FloatInputQuantizedWeights) {
ConvolutionOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_UINT8, {3, 2, 2, 1}, 0, 64},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 1, 1, 2,
2, 2, 2, 1,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
0, 1, 0, 1,
0, 0, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 5, 7,
16, 5, 6,
17, 6, 6,
37, 10, 10,
},
0.2)));
}
TEST(ConvolutionOpTest, NoActivation) {
ConvolutionOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
}));
}
TEST(ConvolutionOpTest, SimpleTestQuantizedOutputMultiplierGreaterThan1) {
ConvolutionOpModel quant_op({TensorType_UINT8, {2, 2, 4, 1}, -128.5, 128},
{TensorType_UINT8, {3, 2, 2, 1}, -128.5, 128},
{TensorType_UINT8, {}, -127, 128});
ConvolutionOpModel float_op({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}});
std: |
985 | cpp | tensorflow/tensorflow | nnapi_delegate_c_api | tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.cc | tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_NNAPI_NNAPI_DELEGATE_C_API_H_
#define TENSORFLOW_LITE_DELEGATES_NNAPI_NNAPI_DELEGATE_C_API_H_
#include "tensorflow/lite/core/c/common.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TFL_CAPI_EXPORT TfLiteNnapiDelegateOptions {
enum ExecutionPreference {
kUndefined = -1,
kLowPower = 0,
kFastSingleAnswer = 1,
kSustainedSpeed = 2,
} execution_preference;
const char* accelerator_name;
const char* cache_dir;
const char* model_token;
int disallow_nnapi_cpu;
int allow_fp16;
int max_number_delegated_partitions;
void* nnapi_support_library_handle;
} TfLiteNnapiDelegateOptions;
TfLiteDelegate* TFL_CAPI_EXPORT
TfLiteNnapiDelegateCreate(const TfLiteNnapiDelegateOptions* options);
TFL_CAPI_EXPORT TfLiteNnapiDelegateOptions TfLiteNnapiDelegateOptionsDefault();
void TFL_CAPI_EXPORT TfLiteNnapiDelegateDelete(TfLiteDelegate* delegate);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
TfLiteDelegate* TfLiteNnapiDelegateCreate(
const TfLiteNnapiDelegateOptions* options) {
tflite::StatefulNnApiDelegate::StatefulNnApiDelegate::Options
internal_options;
internal_options.execution_preference =
static_cast<tflite::StatefulNnApiDelegate::StatefulNnApiDelegate::
Options::ExecutionPreference>(
options->execution_preference);
internal_options.accelerator_name = options->accelerator_name;
internal_options.cache_dir = options->cache_dir;
internal_options.model_token = options->model_token;
internal_options.disallow_nnapi_cpu = options->disallow_nnapi_cpu;
internal_options.max_number_delegated_partitions =
options->max_number_delegated_partitions;
internal_options.allow_fp16 = options->allow_fp16;
tflite::StatefulNnApiDelegate* delegate = nullptr;
if (options->nnapi_support_library_handle) {
delegate = new tflite::StatefulNnApiDelegate(
static_cast<NnApiSLDriverImplFL5*>(
options->nnapi_support_library_handle),
internal_options);
} else {
delegate = new tflite::StatefulNnApiDelegate(internal_options);
}
return delegate;
}
TfLiteNnapiDelegateOptions TfLiteNnapiDelegateOptionsDefault() {
TfLiteNnapiDelegateOptions result = {};
tflite::StatefulNnApiDelegate::Options options;
result.execution_preference =
static_cast<TfLiteNnapiDelegateOptions::ExecutionPreference>(
options.execution_preference);
result.accelerator_name = options.accelerator_name;
result.cache_dir = options.cache_dir;
result.model_token = options.model_token;
result.disallow_nnapi_cpu = options.disallow_nnapi_cpu;
result.max_number_delegated_partitions =
options.max_number_delegated_partitions;
result.allow_fp16 = options.allow_fp16;
result.nnapi_support_library_handle = nullptr;
return result;
}
void TfLiteNnapiDelegateDelete(TfLiteDelegate* delegate) {
if (delegate == nullptr) return;
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
} | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.h"
#include <sys/mman.h>
#include <algorithm>
#include <initializer_list>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class SingleOpModelWithNnapiDelegateCApi : public SingleOpModel {
public:
SingleOpModelWithNnapiDelegateCApi() {
options_ = TfLiteNnapiDelegateOptionsDefault();
options_.disallow_nnapi_cpu = false;
}
explicit SingleOpModelWithNnapiDelegateCApi(
const TfLiteNnapiDelegateOptions& options) {
options_ = options;
options_.disallow_nnapi_cpu = false;
}
~SingleOpModelWithNnapiDelegateCApi() {
if (nnapi_delegate_) {
TfLiteNnapiDelegateDelete(nnapi_delegate_);
}
nnapi_delegate_ = nullptr;
}
protected:
void BuildInterpreterWithNNAPI(std::vector<std::vector<int>> input_shapes) {
if (nnapi_delegate_) {
TfLiteNnapiDelegateDelete(nnapi_delegate_);
}
nnapi_delegate_ = TfLiteNnapiDelegateCreate(&options_);
SetDelegate(nnapi_delegate_);
BuildInterpreter(input_shapes, -1, options_.allow_fp16,
true, true);
}
private:
TfLiteNnapiDelegateOptions options_;
TfLiteDelegate* nnapi_delegate_ = nullptr;
};
class FloatAddOpModel : public SingleOpModelWithNnapiDelegateCApi {
public:
FloatAddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
Init(input1, input2, output, activation_type);
}
FloatAddOpModel(const TfLiteNnapiDelegateOptions& options,
const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type)
: SingleOpModelWithNnapiDelegateCApi(options) {
Init(input1, input2, output, activation_type);
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
private:
void Init(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
};
TEST(NNAPIDelegate, C_API) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, C_API_WithAcceleratorName) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
options.accelerator_name = "nnapi-reference";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, C_API_WithCompilationCaching) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
options.cache_dir = "/data/local/tmp";
options.model_token = "NNAPIDelegate.C_API_WithCompilationCaching";
{
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
{
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-1.0, 0.1, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.2, 0.2, 0.4, 0.2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-0.8, 0.3, 1.1, 1.0}));
}
}
}
} |
986 | cpp | tensorflow/tensorflow | quant_lstm_sup | tensorflow/lite/delegates/nnapi/quant_lstm_sup.cc | tensorflow/lite/delegates/nnapi/quant_lstm_sup_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_NNAPI_QUANT_LSTM_SUP_H_
#define TENSORFLOW_LITE_DELEGATES_NNAPI_QUANT_LSTM_SUP_H_
#include <vector>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegate {
namespace nnapi {
void ExtractQuantLstmWeightsSubmatrix(const TfLiteIntArray* submatrix_dims,
const int32_t offset_row,
const int32_t offset_column,
const TfLiteIntArray* weight_dims,
const uint8_t* weights,
std::vector<uint8_t>* submatrix);
void DecomposeQuantLstmWeightsTensor(const uint8_t* concat_weights,
const TfLiteIntArray* weight_dims,
std::vector<uint8_t>* recurrent_to_input,
std::vector<uint8_t>* input_to_input,
std::vector<uint8_t>* recurrent_to_cell,
std::vector<uint8_t>* input_to_cell,
std::vector<uint8_t>* recurrent_to_forget,
std::vector<uint8_t>* input_to_forget,
std::vector<uint8_t>* recurrent_to_output,
std::vector<uint8_t>* input_to_output);
void SetWeightSubmatrixDims(const TfLiteIntArray* weight_dims,
TfLiteIntArray* recurrent_submatrix_dims,
TfLiteIntArray* input_submatrix_dims);
void DecomposeBiasTensor(const int32_t* biases, int bias_size,
std::vector<int32_t>* input_bias,
std::vector<int32_t>* cell_bias,
std::vector<int32_t>* forget_bias,
std::vector<int32_t>* output_bias);
}
}
}
#endif
#include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include <algorithm>
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegate {
namespace nnapi {
void ExtractQuantLstmWeightsSubmatrix(const TfLiteIntArray* submatrix_dims,
const int32_t offset_row,
const int32_t offset_column,
const TfLiteIntArray* weight_dims,
const uint8_t* weights,
std::vector<uint8_t>* submatrix) {
auto const& submatrix_rows = submatrix_dims->data[0];
auto const& submatrix_cols = submatrix_dims->data[1];
auto const& weight_cols = weight_dims->data[1];
submatrix->resize(NumElements(submatrix_dims));
for (uint32_t i = 0, end = submatrix_rows * submatrix_cols; i < end; ++i) {
const uint32_t row = i / submatrix_cols;
const uint32_t column = i % submatrix_cols;
(*submatrix)[i] =
weights[(row + offset_row) * weight_cols + column + offset_column];
}
}
inline int OutputDepth(const TfLiteIntArray* weight_dims) {
return weight_dims->data[0] / 4;
}
inline int InputDepth(const TfLiteIntArray* weight_dims) {
return weight_dims->data[1] - OutputDepth(weight_dims);
}
void SetWeightSubmatrixDims(const TfLiteIntArray* weight_dims,
TfLiteIntArray* recurrent_submatrix_dims,
TfLiteIntArray* input_submatrix_dims) {
const auto input_depth = InputDepth(weight_dims);
const auto output_depth = OutputDepth(weight_dims);
recurrent_submatrix_dims->data[0] = output_depth;
recurrent_submatrix_dims->data[1] = output_depth;
input_submatrix_dims->data[0] = output_depth;
input_submatrix_dims->data[1] = input_depth;
}
void DecomposeQuantLstmWeightsTensor(const uint8_t* concat_weights,
const TfLiteIntArray* weight_dims,
std::vector<uint8_t>* recurrent_to_input,
std::vector<uint8_t>* input_to_input,
std::vector<uint8_t>* recurrent_to_cell,
std::vector<uint8_t>* input_to_cell,
std::vector<uint8_t>* recurrent_to_forget,
std::vector<uint8_t>* input_to_forget,
std::vector<uint8_t>* recurrent_to_output,
std::vector<uint8_t>* input_to_output) {
const auto output_depth = OutputDepth(weight_dims);
TfLiteIntArray* recurrent_submatrix_dims = TfLiteIntArrayCreate(2);
TfLiteIntArray* input_submatrix_dims = TfLiteIntArrayCreate(2);
SetWeightSubmatrixDims(weight_dims, recurrent_submatrix_dims,
input_submatrix_dims);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 0 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_input);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 0 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_input);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 1 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_cell);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 1 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_cell);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 2 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_forget);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 2 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_forget);
ExtractQuantLstmWeightsSubmatrix(recurrent_submatrix_dims, 3 * output_depth,
0, weight_dims, concat_weights,
recurrent_to_output);
ExtractQuantLstmWeightsSubmatrix(input_submatrix_dims, 3 * output_depth,
output_depth, weight_dims, concat_weights,
input_to_output);
TfLiteIntArrayFree(recurrent_submatrix_dims);
TfLiteIntArrayFree(input_submatrix_dims);
}
void DecomposeBiasTensor(const int32_t* biases, int bias_size,
std::vector<int32_t>* input_bias,
std::vector<int32_t>* cell_bias,
std::vector<int32_t>* forget_bias,
std::vector<int32_t>* output_bias) {
input_bias->resize(bias_size);
std::copy(biases, biases + bias_size, input_bias->begin());
cell_bias->resize(bias_size);
std::copy(biases + bias_size, biases + 2 * bias_size, cell_bias->begin());
forget_bias->resize(bias_size);
std::copy(biases + 2 * bias_size, biases + 3 * bias_size,
forget_bias->begin());
output_bias->resize(bias_size);
std::copy(biases + 3 * bias_size, biases + 4 * bias_size,
output_bias->begin());
}
}
}
} | #include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/testing/util.h"
namespace {
using ::testing::ElementsAreArray;
using ::testing::Test;
class DimsAllocatingTest : public Test {
protected:
DimsAllocatingTest() : allocated_dims_() {}
~DimsAllocatingTest() override {
for (TfLiteIntArray* dim : allocated_dims_) {
TfLiteIntArrayFree(dim);
}
}
TfLiteIntArray* CreateDimArray(int size,
std::initializer_list<int> dimensions) {
TfLiteIntArray* dims = TfLiteIntArrayCreate(size);
allocated_dims_.push_back(dims);
int i = 0;
for (const int dimension : dimensions) {
dims->data[i++] = dimension;
}
return dims;
}
private:
std::vector<TfLiteIntArray*> allocated_dims_;
};
using tflite::delegate::nnapi::ExtractQuantLstmWeightsSubmatrix;
class ExtractQuantLstmWeightsSubmatrixTest : public DimsAllocatingTest {};
TEST_F(ExtractQuantLstmWeightsSubmatrixTest, TopLeftSubmatrixIsExtracted) {
std::vector<uint8_t> weights = {1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235};
const TfLiteIntArray* weight_dims = CreateDimArray(2, {8, 5});
std::vector<uint8_t> submatrix;
const TfLiteIntArray* submatrix_dims = CreateDimArray(2, {2, 3});
ExtractQuantLstmWeightsSubmatrix(submatrix_dims, 0 ,
0 , weight_dims,
weights.data(), &submatrix);
EXPECT_THAT(submatrix, ElementsAreArray({1, 2, 3, 11, 12, 13}));
}
TEST_F(ExtractQuantLstmWeightsSubmatrixTest, TopRightSubmatrixIsExtracted) {
std::vector<uint8_t> weights = {1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235};
const TfLiteIntArray* weight_dims = CreateDimArray(2, {8, 5});
std::vector<uint8_t> submatrix;
const TfLiteIntArray* submatrix_dims = CreateDimArray(2, {2, 2});
ExtractQuantLstmWeightsSubmatrix(submatrix_dims, 0 ,
3 , weight_dims,
weights.data(), &submatrix);
EXPECT_THAT(submatrix, ElementsAreArray({4, 5, 14, 15}));
}
TEST_F(ExtractQuantLstmWeightsSubmatrixTest, RightCentralSubmatrixIsExtracted) {
std::vector<uint8_t> weights = {1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235};
const TfLiteIntArray* weight_dims = CreateDimArray(2, {8, 5});
std::vector<uint8_t> submatrix;
const TfLiteIntArray* submatrix_dims = CreateDimArray(2, {2, 2});
ExtractQuantLstmWeightsSubmatrix(
submatrix_dims, 1 * submatrix_dims->data[0] ,
3 , weight_dims, weights.data(), &submatrix);
EXPECT_THAT(submatrix, ElementsAreArray({104, 105, 114, 115}));
}
using tflite::delegate::nnapi::DecomposeQuantLstmWeightsTensor;
class QuantLstmWeightDecompTest : public DimsAllocatingTest {
protected:
QuantLstmWeightDecompTest()
: weights_({1, 2, 3, 4, 5,
11, 12, 13, 14, 15,
101, 102, 103, 104, 105,
111, 112, 113, 114, 115,
201, 202, 203, 204, 205,
211, 212, 213, 214, 215,
221, 222, 223, 224, 225,
231, 232, 233, 234, 235}),
recurrent_to_input_(),
input_to_input_(),
recurrent_to_cell_(),
input_to_cell_(),
recurrent_to_forget_(),
input_to_forget_(),
recurrent_to_output_(),
input_to_output_() {
weight_dims_ = CreateDimArray(2, {8, 5});
}
const std::vector<uint8_t> weights_;
const TfLiteIntArray* weight_dims_;
std::vector<uint8_t> recurrent_to_input_;
std::vector<uint8_t> input_to_input_;
std::vector<uint8_t> recurrent_to_cell_;
std::vector<uint8_t> input_to_cell_;
std::vector<uint8_t> recurrent_to_forget_;
std::vector<uint8_t> input_to_forget_;
std::vector<uint8_t> recurrent_to_output_;
std::vector<uint8_t> input_to_output_;
};
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToInput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_input_, ElementsAreArray({1, 2,
11, 12}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToInput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_input_, ElementsAreArray({3, 4, 5,
13, 14, 15}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToCell) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_cell_, ElementsAreArray({101, 102,
111, 112}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToCell) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_cell_, ElementsAreArray({103, 104, 105,
113, 114, 115}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToForget) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_forget_, ElementsAreArray({201, 202,
211, 212}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToForget) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_forget_, ElementsAreArray({203, 204, 205,
213, 214, 215}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractRecurrentToOutput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(recurrent_to_output_, ElementsAreArray({221, 222,
231, 232}));
}
TEST_F(QuantLstmWeightDecompTest, ExtractInputToOutput) {
DecomposeQuantLstmWeightsTensor(
weights_.data(), weight_dims_, &recurrent_to_input_, &input_to_input_,
&recurrent_to_cell_, &input_to_cell_, &recurrent_to_forget_,
&input_to_forget_, &recurrent_to_output_, &input_to_output_);
EXPECT_THAT(input_to_output_, ElementsAreArray({223, 224, 225,
233, 234, 235}));
}
using tflite::delegate::nnapi::DecomposeBiasTensor;
TEST(DecomposeBiasTensor, ExtractInputBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(input_bias, ElementsAreArray({-7876, 13488, -726, 32839}));
}
TEST(DecomposeBiasTensor, ExtractCellBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(cell_bias, ElementsAreArray({39481, 48624, 48976, -21419}));
}
TEST(DecomposeBiasTensor, ExtractForgetBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(forget_bias, ElementsAreArray({9206, -46884, -11693, -38724}));
}
TEST(DecomposeBiasTensor, ExtractOutputBias) {
std::vector<int32_t> biases
{-7876, 13488, -726, 32839,
39481, 48624, 48976, -21419,
9206, -46884, -11693, -38724,
-58999, -17050, -41852, -40538};
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
DecomposeBiasTensor(biases.data(), 4, &input_bias, &cell_bias, &forget_bias,
&output_bias);
EXPECT_THAT(output_bias, ElementsAreArray({-58999, -17050, -41852, -40538}));
}
} |
987 | cpp | tensorflow/tensorflow | min_max_builder | tensorflow/lite/delegates/hexagon/builders/min_max_builder.cc | tensorflow/lite/delegates/hexagon/builders/tests/min_max_builder_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_MIN_MAX_BUILDER_H_
#define TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_MIN_MAX_BUILDER_H_
#include "tensorflow/lite/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class MinMaxOpBuilder : public OpBuilder {
public:
explicit MinMaxOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
private:
TensorID node_output_;
};
}
}
}
#endif
#include "tensorflow/lite/delegates/hexagon/builders/min_max_builder.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus MinMaxOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
int a_tensor_id = inputs->data[0];
int b_tensor_id = inputs->data[1];
const auto& a_tensor = context->tensors[a_tensor_id];
const auto& b_tensor = context->tensors[b_tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(a_tensor_id));
AddInput(graph_builder_->GetHexagonTensorId(b_tensor_id));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, a_tensor));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, b_tensor));
const int output_tensor_id = outputs->data[0];
const auto& output_tensor = context->tensors[output_tensor_id];
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, output_tensor));
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, kScalarShape);
AddOutput(sizeof(float), 4, kScalarShape);
return kTfLiteOk;
}
TfLiteStatus MinMaxOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreateMinMaxBuilder(GraphBuilder* graph_builder, int op_type) {
return new MinMaxOpBuilder(graph_builder, op_type);
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
template <typename data_type>
class MinMaxOpModel : public SingleOpModelWithHexagon {
public:
MinMaxOpModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
MinMaxOpModel(tflite::BuiltinOperator op, const TensorData& input1,
std::initializer_list<data_type> input1_values,
const TensorData& input2,
std::initializer_list<data_type> input2_values,
const TensorData& output, bool input1_const) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
if (input1_const) {
auto* input1_tensor = interpreter_->tensor(input1_);
input1_tensor->allocation_type = kTfLiteMmapRo;
} else {
auto* input2_tensor = interpreter_->tensor(input2_);
input2_tensor->allocation_type = kTfLiteMmapRo;
}
}
void SetInput1(std::vector<data_type> data) { PopulateTensor(input1_, data); }
void SetInput2(std::vector<data_type> data) { PopulateTensor(input2_, data); }
std::vector<data_type> GetOutput() {
return ExtractVector<data_type>(output_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
template <typename data_type>
void TestModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values) {
std::unique_ptr<MinMaxOpModel<data_type>> m;
m = std::make_unique<MinMaxOpModel<data_type>>(op, input1, input2, output);
m->SetInput1(input1_values);
m->SetInput2(input2_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
const auto reference_output = m->GetOutput();
const auto reference_output_shape = m->GetOutputShape();
m->ApplyDelegateAndInvoke();
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(reference_output_shape));
EXPECT_THAT(m->GetOutput(), ElementsAreArray(reference_output));
}
template <typename data_type>
void TestModelConstInput(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values,
bool input1_const) {
std::unique_ptr<MinMaxOpModel<data_type>> m;
m = std::make_unique<MinMaxOpModel<data_type>>(
op, input1, input1_values, input2, input2_values, output, input1_const);
m->SetInput1(input1_values);
m->SetInput2(input2_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
const auto reference_output = m->GetOutput();
const auto reference_output_shape = m->GetOutputShape();
m->ApplyDelegateAndInvoke();
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(reference_output_shape));
EXPECT_THAT(m->GetOutput(), ElementsAreArray(reference_output));
}
TEST(MinMaxOpTest, Maximum_Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MAXIMUM,
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2);
}
TEST(MinMaxOpTest, Maximum_Uint8Test_Const) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModelConstInput<uint8_t>(
BuiltinOperator_MAXIMUM, {TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2, false);
}
TEST(MinMaxOpTest, Minimum_Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MINIMUM,
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2);
}
TEST(MinMaxOpTest, Minimum_Uint8Test_Const) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 20, 1};
TestModelConstInput<uint8_t>(
BuiltinOperator_MINIMUM, {TensorType_UINT8, {1, 3, 1, 2}, -1, 25},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 25},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 25}, data1, data2, false);
}
TEST(MinMaxOpTest, Maximum_Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 123, 1};
TestModel<int8_t>(BuiltinOperator_MAXIMUM,
{TensorType_INT8, {1, 3, 1, 2}, -1, 125},
{TensorType_INT8, {1, 3, 1, 2}, -1, 125},
{TensorType_INT8, {1, 3, 1, 2}, -1, 125}, data1, data2);
}
TEST(MinMaxOpTest, Minimum_Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 12, 1};
TestModel<int8_t>(BuiltinOperator_MINIMUM,
{TensorType_INT8, {1, 3, 1, 2}, -1, 25},
{TensorType_INT8, {1, 3, 1, 2}, -1, 25},
{TensorType_INT8, {1, 3, 1, 2}, -1, 25}, data1, data2);
}
} |
988 | cpp | tensorflow/tensorflow | async_buffers | tensorflow/lite/delegates/gpu/async_buffers.cc | tensorflow/lite/delegates/gpu/async_buffers_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_ASYNC_BUFFERS_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_ASYNC_BUFFERS_H_
#if defined(__ANDROID__)
#include <android/hardware_buffer.h>
#endif
#include <GLES3/gl31.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/api.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
extern "C" typedef struct AHardwareBuffer AHardwareBuffer;
namespace tflite {
namespace gpu {
class AsyncBuffer {
private:
int bytes_;
bool valid_ = false;
GLuint opengl_buffer_ = GL_INVALID_INDEX;
AHardwareBuffer* ahwb_ = nullptr;
absl::Status MapAHardwareBufferToGlBuffer();
absl::Status AllocateOpenGlBuffer();
public:
explicit AsyncBuffer(TensorObjectDef tensor_def, AHardwareBuffer* ahwb) {
bytes_ = NumElements(tensor_def) * SizeOf(tensor_def.object_def.data_type);
ahwb_ = ahwb;
}
absl::Status GetOpenGlBuffer(GLuint& buffer_ref);
};
}
}
#endif
#include "tensorflow/lite/delegates/gpu/async_buffers.h"
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <GLES2/gl2ext.h>
#include <GLES3/gl31.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_errors.h"
namespace {
PFNGLBUFFERSTORAGEEXTERNALEXTPROC glBufferStorageExternalEXT;
PFNEGLGETNATIVECLIENTBUFFERANDROIDPROC eglGetNativeClientBufferANDROID;
bool IsGlSupported() {
static const bool extensions_allowed = [] {
eglGetNativeClientBufferANDROID =
reinterpret_cast<PFNEGLGETNATIVECLIENTBUFFERANDROIDPROC>(
eglGetProcAddress("eglGetNativeClientBufferANDROID"));
glBufferStorageExternalEXT =
reinterpret_cast<PFNGLBUFFERSTORAGEEXTERNALEXTPROC>(
eglGetProcAddress("glBufferStorageExternalEXT"));
return eglGetNativeClientBufferANDROID && glBufferStorageExternalEXT;
}();
return extensions_allowed;
}
}
namespace tflite {
namespace gpu {
absl::Status AsyncBuffer::MapAHardwareBufferToGlBuffer() {
if (!IsGlSupported()) {
return absl::UnknownError(
"No GL extension functions found to bind AHardwareBuffer and "
"OpenGL buffer");
}
EGLClientBuffer native_buffer = eglGetNativeClientBufferANDROID(ahwb_);
if (!native_buffer) {
return absl::UnknownError("Can't get native buffer");
}
glBufferStorageExternalEXT(GL_SHADER_STORAGE_BUFFER, 0, bytes_, native_buffer,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT |
GL_MAP_COHERENT_BIT_EXT |
GL_MAP_PERSISTENT_BIT_EXT);
return gl::GetOpenGlErrors();
}
absl::Status AsyncBuffer::AllocateOpenGlBuffer() {
if (opengl_buffer_ == GL_INVALID_INDEX) {
glGenBuffers(1, &opengl_buffer_);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, opengl_buffer_);
absl::Status status = MapAHardwareBufferToGlBuffer();
if (!status.ok()) {
if (ahwb_ != nullptr) {
if (OptionalAndroidHardwareBuffer::Instance().Supported()) {
OptionalAndroidHardwareBuffer::Instance().Release(ahwb_);
}
ahwb_ = nullptr;
}
glBufferData(GL_SHADER_STORAGE_BUFFER, bytes_, nullptr, GL_STREAM_COPY);
}
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
}
return absl::OkStatus();
}
absl::Status AsyncBuffer::GetOpenGlBuffer(GLuint& buffer_ref) {
if (!valid_) {
absl::Status status = AllocateOpenGlBuffer();
if (!status.ok()) {
return status;
}
}
valid_ = true;
buffer_ref = opengl_buffer_;
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/async_buffers.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include "tensorflow/lite/delegates/gpu/api.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
namespace tflite {
namespace gpu {
namespace {
TEST(AsyncBufferTest, DuplicateTest) {
if (__builtin_available(android 26, *)) {
auto Instance = OptionalAndroidHardwareBuffer::Instance;
TensorObjectDef* tie = new TensorObjectDef();
tie->object_def.data_type = DataType::FLOAT32;
tie->object_def.data_layout = DataLayout::BHWC;
tie->dimensions = Dimensions(2, 2, 2, 2);
AHardwareBuffer_Desc buffDesc = {};
buffDesc.width = 1000;
buffDesc.height = 1;
buffDesc.layers = 1;
buffDesc.format = AHARDWAREBUFFER_FORMAT_BLOB;
buffDesc.usage = AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
AHardwareBuffer* ahwb;
EXPECT_TRUE(Instance().IsSupported(&buffDesc));
EXPECT_EQ(Instance().Allocate(&buffDesc, &ahwb), 0);
std::unique_ptr<gl::EglEnvironment> env;
EXPECT_OK(gl::EglEnvironment::NewEglEnvironment(&env));
AsyncBuffer async_buffer1 = AsyncBuffer(*tie, ahwb);
GLuint buffer1, buffer2;
EXPECT_OK(async_buffer1.GetOpenGlBuffer(buffer1));
EXPECT_GE(buffer1, 0);
EXPECT_OK(async_buffer1.GetOpenGlBuffer(buffer2));
EXPECT_EQ(buffer1, buffer2);
AsyncBuffer async_buffer2 = AsyncBuffer(*tie, ahwb);
EXPECT_OK(async_buffer2.GetOpenGlBuffer(buffer2));
EXPECT_NE(buffer1, buffer2);
} else {
GTEST_SKIP();
}
}
}
}
} |
989 | cpp | tensorflow/tensorflow | api | tensorflow/lite/delegates/gpu/cl/api.cc | tensorflow/core/api_def/api_test.cc | #ifndef XLA_FFI_API_API_H_
#define XLA_FFI_API_API_H_
#include <algorithm>
#include <array>
#include <cassert>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "xla/ffi/api/c_api.h"
#ifdef __has_builtin
#define XLA_FFI_HAS_BUILTIN(x) __has_builtin(x)
#else
#define XLA_FFI_HAS_BUILTIN(x) 0
#endif
#if __has_attribute(always_inline)
#define XLA_FFI_ATTRIBUTE_ALWAYS_INLINE inline __attribute__((always_inline))
#elif defined(_MSC_VER)
#define XLA_FFI_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define XLA_FFI_ATTRIBUTE_ALWAYS_INLINE inline
#endif
#if __has_attribute(noinline)
#define XLA_FFI_ATTRIBUTE_NEVER_INLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define XLA_FFI_ATTRIBUTE_NEVER_INLINE __declspec(noinline)
#else
#define XLA_FFI_ATTRIBUTE_NEVER_INLINE
#endif
#if XLA_FFI_HAS_BUILTIN(__builtin_expect)
#define XLA_FFI_PREDICT_FALSE(x) (__builtin_expect(false || (x), false))
#define XLA_FFI_PREDICT_TRUE(x) (__builtin_expect(false || (x), true))
#else
#define XLA_FFI_PREDICT_FALSE(x) (x)
#define XLA_FFI_PREDICT_TRUE(x) (x)
#endif
inline std::ostream& operator<<(std::ostream& os,
const XLA_FFI_DataType dtype) {
switch (dtype) {
case XLA_FFI_DataType_INVALID:
return os << "INVALID";
case XLA_FFI_DataType_PRED:
return os << "PRED";
case XLA_FFI_DataType_S8:
return os << "S8";
case XLA_FFI_DataType_S16:
return os << "S16";
case XLA_FFI_DataType_S32:
return os << "S32";
case XLA_FFI_DataType_S64:
return os << "S64";
case XLA_FFI_DataType_U8:
return os << "U8";
case XLA_FFI_DataType_U16:
return os << "U16";
case XLA_FFI_DataType_U32:
return os << "U32";
case XLA_FFI_DataType_U64:
return os << "U64";
case XLA_FFI_DataType_F16:
return os << "F16";
case XLA_FFI_DataType_F32:
return os << "F32";
case XLA_FFI_DataType_F64:
return os << "F64";
case XLA_FFI_DataType_BF16:
return os << "BF16";
case XLA_FFI_DataType_C64:
return os << "C64";
case XLA_FFI_DataType_C128:
return os << "C128";
case XLA_FFI_DataType_TOKEN:
return os << "TOKEN";
case XLA_FFI_DataType_F8E5M2:
return os << "F8E5M2";
case XLA_FFI_DataType_F8E4M3FN:
return os << "F8E4M3FN";
case XLA_FFI_DataType_F8E4M3B11FNUZ:
return os << "F8E4M3B11FNUZ";
case XLA_FFI_DataType_F8E5M2FNUZ:
return os << "F8E5M2FNUZ";
case XLA_FFI_DataType_F8E4M3FNUZ:
return os << "F8E4M3FNUZ";
}
}
inline std::ostream& operator<<(std::ostream& os, const XLA_FFI_AttrType type) {
switch (type) {
case XLA_FFI_AttrType_ARRAY:
return os << "array";
case XLA_FFI_AttrType_DICTIONARY:
return os << "dictionary";
case XLA_FFI_AttrType_SCALAR:
return os << "scalar";
case XLA_FFI_AttrType_STRING:
return os << "string";
}
}
namespace xla::ffi {
template <typename... Ts>
class Binding;
template <typename Fn, typename... Ts>
class Handler;
class Ffi {
public:
static Binding<> Bind();
template <typename Fn>
static auto BindTo(Fn fn);
virtual ~Ffi() = default;
virtual XLA_FFI_Error* Call(const XLA_FFI_CallFrame* call_frame) const = 0;
static inline XLA_FFI_Error* RegisterStaticHandler(
const XLA_FFI_Api* api, std::string_view name, std::string_view platform,
XLA_FFI_Handler_Bundle bundle, XLA_FFI_Handler_Traits traits = 0);
static inline XLA_FFI_Error* RegisterStaticHandler(
const XLA_FFI_Api* api, std::string_view name, std::string_view platform,
XLA_FFI_Handler* execute, XLA_FFI_Handler_Traits traits = 0) {
return RegisterStaticHandler(
api, name, platform, XLA_FFI_Handler_Bundle{nullptr, nullptr, execute},
traits);
}
protected:
template <typename... Args>
static std::string StrCat(Args... args);
static inline XLA_FFI_Error* MakeError(const XLA_FFI_Api* api,
XLA_FFI_Error_Code errc,
std::string message);
static inline XLA_FFI_Error* InvalidArgument(const XLA_FFI_Api* api,
std::string message);
static inline XLA_FFI_Error* CheckStructSize(const XLA_FFI_Api* api,
std::string_view struct_name,
size_t expected, size_t actual);
};
XLA_FFI_Error* Ffi::RegisterStaticHandler(const XLA_FFI_Api* api,
std::string_view name,
std::string_view platform,
XLA_FFI_Handler_Bundle bundle,
XLA_FFI_Handler_Traits traits) {
XLA_FFI_Handler_Register_Args args;
args.struct_size = XLA_FFI_Handler_Register_Args_STRUCT_SIZE;
args.priv = nullptr;
args.name = XLA_FFI_ByteSpan{name.data(), name.size()};
args.platform = XLA_FFI_ByteSpan{platform.data(), platform.size()};
args.bundle = bundle;
args.traits = traits;
return api->XLA_FFI_Handler_Register(&args);
}
template <typename... Args>
std::string Ffi::StrCat(Args... args) {
std::stringstream ss;
(ss << ... << args);
return ss.str();
}
XLA_FFI_Error* Ffi::MakeError(const XLA_FFI_Api* api, XLA_FFI_Error_Code errc,
std::string message) {
XLA_FFI_Error_Create_Args args;
args.struct_size = XLA_FFI_Error_Create_Args_STRUCT_SIZE;
args.priv = nullptr;
args.errc = errc;
args.message = message.c_str();
return api->XLA_FFI_Error_Create(&args);
}
XLA_FFI_Error* Ffi::InvalidArgument(const XLA_FFI_Api* api,
std::string message) {
return MakeError(api, XLA_FFI_Error_Code_INVALID_ARGUMENT,
std::move(message));
}
XLA_FFI_Error* Ffi::CheckStructSize(const XLA_FFI_Api* api,
std::string_view struct_name,
size_t expected, size_t actual) {
if (expected != actual) {
return InvalidArgument(
api, StrCat("Unexpected ", struct_name, " size: expected ", expected,
" got ", actual, ". Check installed software versions."));
}
return nullptr;
}
class Dictionary;
namespace internal {
struct RemainingArgsTag {};
struct RemainingRetsTag {};
template <typename T>
struct RetTag {};
template <typename T>
struct AttrTag {};
template <typename T = Dictionary>
struct AttrsTag {};
template <typename T>
struct CtxTag {};
template <template <typename> class Tag, typename... Ts>
struct NumTagged;
template <template <typename> class Tag>
struct NumTagged<Tag> {
static constexpr int64_t value = 0;
};
template <template <typename> class Tag, typename T, typename... Ts>
struct NumTagged<Tag, Tag<T>, Ts...> {
static constexpr int64_t value = 1 + NumTagged<Tag, Ts...>::value;
};
template <template <typename> class Tag, typename T, typename... Ts>
struct NumTagged<Tag, T, Ts...> {
static constexpr int64_t value = 0 + NumTagged<Tag, Ts...>::value;
};
template <typename... Ts>
using HasRemainingArgsTag =
std::disjunction<std::is_same<RemainingArgsTag, Ts>...>;
template <typename... Ts>
using HasRemainingRetsTag =
std::disjunction<std::is_same<RemainingRetsTag, Ts>...>;
template <typename T>
XLA_FFI_DataType NativeTypeToCApiDataType() {
if constexpr (std::is_same_v<T, bool>) {
return XLA_FFI_DataType_PRED;
} else if constexpr (std::is_same_v<T, int8_t>) {
return XLA_FFI_DataType_S8;
} else if constexpr (std::is_same_v<T, int16_t>) {
return XLA_FFI_DataType_S16;
} else if constexpr (std::is_same_v<T, int32_t>) {
return XLA_FFI_DataType_S32;
} else if constexpr (std::is_same_v<T, int64_t>) {
return XLA_FFI_DataType_S64;
} else if constexpr (std::is_same_v<T, uint8_t>) {
return XLA_FFI_DataType_U8;
} else if constexpr (std::is_same_v<T, uint16_t>) {
return XLA_FFI_DataType_U16;
} else if constexpr (std::is_same_v<T, uint32_t>) {
return XLA_FFI_DataType_U32;
} else if constexpr (std::is_same_v<T, uint64_t>) {
return XLA_FFI_DataType_U64;
} else if constexpr (std::is_same_v<T, float>) {
return XLA_FFI_DataType_F32;
} else if constexpr (std::is_same_v<T, double>) {
return XLA_FFI_DataType_F64;
} else if constexpr (std::is_same_v<T, std::complex<float>>) {
return XLA_FFI_DataType_C64;
} else {
static_assert(std::is_same_v<T, std::complex<double>>,
"unsupported FFI data type");
return XLA_FFI_DataType_C128;
}
}
}
template <typename... Ts>
class Binding {
public:
template <typename T>
Binding<Ts..., T> Arg() && {
return {std::move(*this)};
}
template <typename T>
Binding<Ts..., internal::RetTag<T>> Ret() && {
return {std::move(*this)};
}
Binding<Ts..., internal::RemainingArgsTag> RemainingArgs() && {
static_assert(!internal::HasRemainingArgsTag<Ts...>::value,
"remaining arguments can be passed just once");
return {std::move(*this)};
}
Binding<Ts..., internal::RemainingRetsTag> RemainingResults() && {
static_assert(!internal::HasRemainingRetsTag<Ts...>::value,
"remaining results can be passed just once");
return {std::move(*this)};
}
template <typename T>
Binding<Ts..., internal::CtxTag<T>> Ctx() && {
return {std::move(*this)};
}
template <typename T>
Binding<Ts..., internal::AttrTag<T>> Attr(std::string attr) && {
static_assert(internal::NumTagged<internal::AttrsTag, Ts...>::value == 0,
"dictionary attributes can't be mixed with regular ones");
attrs_.push_back(std::move(attr));
return {std::move(*this)};
}
template <typename T = Dictionary>
Binding<Ts..., internal::AttrsTag<T>> Attrs() && {
static_assert(internal::NumTagged<internal::AttrTag, Ts...>::value == 0,
"dictionary attributes can't be mixed with regular ones");
return {std::move(*this)};
}
template <typename Fn>
std::unique_ptr<Handler<Fn, Ts...>> To(Fn fn) {
return std::unique_ptr<Handler<Fn, Ts...>>(
new Handler<Fn, Ts...>(std::move(fn), std::move(attrs_)));
}
private:
template <typename...>
friend class Binding;
friend class Ffi;
explicit Binding() {
static_assert(sizeof...(Ts) == 0, "arguments must be empty");
}
template <typename... TTs>
Binding(Binding<TTs...>&& other)
: attrs_(std::move(other.attrs_)) {}
Binding(Binding&) = delete;
std::vector<std::string> attrs_;
};
inline Binding<> Ffi::Bind() { return xla::ffi::Binding<>(); }
template <typename T>
struct ArgBinding {
using Arg = void;
};
template <typename T>
struct RetBinding {
using Ret = void;
};
template <typename T>
struct AttrBinding {
using Attr = void;
};
template <typename T>
struct AttrsBinding {
using Attrs = void;
};
template <typename T>
struct CtxBinding {
using Ctx = void;
};
namespace internal {
template <typename Param>
inline constexpr bool is_arg_binding_v =
!std::is_void_v<typename ArgBinding<Param>::Arg>;
template <typename Param>
inline constexpr bool is_ret_binding_v =
!std::is_void_v<typename RetBinding<Param>::Ret>;
template <typename Param>
inline constexpr bool is_attr_binding_v =
!std::is_void_v<typename AttrBinding<Param>::Attr>;
template <typename Param>
inline constexpr bool is_attrs_binding_v =
!std::is_void_v<typename AttrsBinding<Param>::Attrs>;
template <typename Param>
inline constexpr bool is_ctx_binding_v =
!std::is_void_v<typename CtxBinding<Param>::Ctx>;
template <typename Fn, typename... Params>
struct BindOne;
template <typename Fn, typename Param, typename... Params>
struct BindOne<Fn, Param, Params...> {
template <typename InFlightBinding>
static auto To(Fn fn, InFlightBinding binding) {
if constexpr (is_arg_binding_v<Param>) {
return BindOne<Fn, Params...>::To(
std::move(fn),
std::move(binding).template Arg<typename ArgBinding<Param>::Arg>());
} else if constexpr (is_ret_binding_v<Param>) {
return BindOne<Fn, Params...>::To(
std::move(fn),
std::move(binding).template Ret<typename RetBinding<Param>::Ret>());
} else if constexpr (is_attr_binding_v<Param>) {
return BindOne<Fn, Params...>::To(
std::move(fn),
std::move(binding).template Attr<typename AttrBinding<Param>::Attr>(
std::string(AttrBinding<Param>::name())));
} else if constexpr (is_attrs_binding_v<Param>) {
return BindOne<Fn, Params...>::To(
std::move(fn),
std::move(binding)
.template Attrs<typename AttrsBinding<Param>::Attrs>());
} else if constexpr (is_ctx_binding_v<Param>) {
return BindOne<Fn, Params...>::To(
std::move(fn),
std::move(binding).template Ctx<typename CtxBinding<Param>::Ctx>());
} else {
static_assert(sizeof(Param) == 0,
"parameter is not supported for binding");
}
}
};
template <typename Fn>
struct BindOne<Fn> {
template <typename InFlightBinding>
static auto To(Fn fn, InFlightBinding binding) {
return binding.To(std::move(fn));
}
};
template <typename Fn>
struct Bind;
template <typename ResultType, typename... Params>
struct Bind<ResultType (*)(Params...)> {
using Fn = ResultType (*)(Params...);
static auto To(Fn fn) {
return BindOne<Fn, Params...>::To(std::move(fn), Ffi::Bind());
}
};
template <typename ResultType, typename Fn, typename... Params>
struct Bind<ResultType (Fn::*)(Params...) const> {
static auto To(Fn fn) {
return BindOne<Fn, Params...>::To(std::move(fn), Ffi::Bind());
}
};
}
template <typename Fn>
auto Ffi::BindTo(Fn fn) {
if constexpr (std::is_pointer_v<Fn>) {
return internal::Bind<Fn>::To(fn);
} else {
return internal::Bind<decltype(&Fn::operator())>::To(std::move(fn));
}
}
template <typename T>
class Result {
public:
Result(T value) : value_(value) {}
T& operator*() { return value_; }
T* operator->() { return &value_; }
private:
T value_;
};
template <typename T, char const* attr_name>
class Attr {
public:
Attr(T value) : value_(value) {}
T& operator*() { return value_; }
T* operator->() { return &value_; }
private:
T value_;
};
template <typename T, const char* attr_name>
struct AttrBinding<Attr<T, attr_name>> {
using Attr = T;
static constexpr std::string_view name() { return attr_name; }
};
template <>
struct AttrsBinding<Dictionary> {
using Attrs = Dictionary;
};
template <typename T>
struct ArgDecoding;
template <typename T>
struct RetDecoding;
template <typename T>
struct AttrDecoding;
template <typename T>
struct CtxDecoding;
template <typename T>
struct ResultEncoding;
class DiagnosticEngine;
class InFlightDiagnostic {
public:
explicit InFlightDiagnostic(DiagnosticEngine* engine, std::string s)
: engine_(engine) {
stream_ << s;
}
InFlightDiagnostic(const InFlightDiagnostic&) = delete;
InFlightDiagnostic& operator=(const InFlightDiagnostic&) = delete;
~InFlightDiagnostic();
template <typename Arg>
InFlightDiagnostic& operator<<(Arg&& arg) {
stream_ << std::forward<Arg>(arg);
return *this;
}
template <typename T>
operator std::optional<T>() const {
return std::nullopt;
}
private:
DiagnosticEngine* engine_;
std::stringstream stream_;
};
class DiagnosticEngine {
public:
DiagnosticEngine() = default;
DiagnosticEngine(const DiagnosticEngine&) = delete;
DiagnosticEngine& operator=(const DiagnosticEngine&) = delete;
InFlightDiagnostic Emit(std::string message) {
return InFlightDiagnostic(this, std::move(message));
}
std::string Result() const { return acc_; }
private:
friend class InFlightDiagnostic;
void append(std::string s) { acc_.append(std::move(s)); }
std::string acc_;
};
inline InFlightDiagnostic::~InFlightDiagnostic() {
engine_->append(stream_.str());
}
namespace internal {
struct DecodingOffsets {
int64_t args = 0;
int64_t rets = 0;
int64_t attrs = 0;
};
struct DecodingContext {
const XLA_FFI_CallFrame* call_frame;
const std::string* attrs_names;
const std::size_t* attrs_idx;
};
template <typename T>
struct Decode {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<T> call(DecodingOffsets& offsets, DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
int64_t idx = offsets.args++;
return ArgDecoding<T>::Decode(ctx.call_frame->args.types[idx],
ctx.call_frame->args.args[idx], diagnostic);
}
};
}
template <typename T>
struct internal::Decode<internal::RetTag<T>> {
static std::optional<Result<T>> call(DecodingOffsets& offsets,
DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
int64_t idx = offsets.rets++;
return RetDecoding<T>::Decode(ctx.call_frame->rets.types[idx],
ctx.call_frame->rets.rets[idx], diagnostic);
}
};
template <typename T>
struct internal::Decode<internal::AttrTag<T>> {
using R = typename AttrDecoding<T>::Type;
static std::optional<R> call(DecodingOffsets& offsets, DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
int64_t i = offsets.attrs++;
size_t idx = ctx.attrs_idx[i];
XLA_FFI_AttrType attr_type = ctx.call_frame->attrs.types[idx];
XLA_FFI_ByteSpan* attr_name = ctx.call_frame->attrs.names[idx];
void* attr = ctx.call_frame->attrs.attrs[idx];
std::string_view attr_name_view = {attr_name->ptr, attr_name->len};
if (attr_name_view != ctx.attrs_names[i]) {
return diagnostic.Emit("Attribute name mismatch: ")
<< attr_name_view << " vs " << ctx.attrs_names[i];
}
return AttrDecoding<T>::Decode(attr_type, attr, diagnostic);
}
};
template <typename T>
struct internal::Decode<internal::CtxTag<T>> {
using R = typename CtxDecoding<T>::Type;
static std::optional<R> call(DecodingOffsets& offsets, DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
return CtxDecoding<T>::Decode(ctx.call_frame->api, ctx.call_frame->ctx,
diagnostic);
}
};
template <typename E>
class Unexpected;
template <typename T, typename E>
class Expected {
public:
constexpr Expected(T value) : data_(std::move(value)) {}
constexpr Expected(Unexpected<E> u);
constexpr operator bool() const {
return has_value();
}
constexpr T& operator*() & { return value(); }
constexpr const T& operator*() const& { return value(); }
constexpr T&& operator*() && { return std::move(value()); }
constexpr const T& operator*() const&& { return std::move(value()); }
constexpr T* operator->() { return &value(); }
constexpr const T* operator->() const { return &value(); }
constexpr bool has_value() const { return std::holds_alternative<T>(data_); }
constexpr T& value() & { return std::get<T>(data_); }
constexpr const T& value() const& { return std::get<T>(data_); }
constexpr T&& value() && { return std::get<T>(std::move(data_)); }
constexpr const T& value() const&& { return std::get<T>(std::move(data_)); }
constexpr E& error() & { return std::get<E>(data_); }
constexpr const E& error() const& { return std::get<E>(data_); }
constexpr E&& error() && { return std::get<E>(std::move(data_)); }
constexpr const E&& error() const&& { return std::get<E>(std::move(data_)); }
private:
std::variant<T, E> data_;
};
template <typename E>
class Unexpected {
public:
explicit constexpr Unexpected(E error) : error_(std::move(error)) {}
private:
template <typename, typename>
friend class Expected;
E error_;
};
Unexpected(const char*) -> Unexpected<std::string>;
template <typename T, typename E>
constexpr Expected<T, E>::Expected(Unexpected<E> u)
: data_(std::move(u.error_)) {}
class RemainingArgs {
public:
RemainingArgs(const XLA_FFI_Args* args, size_t offset)
: args_(args), offset_(offset) {
assert(offset <= args_->size && "illegal remaining args offset");
}
size_t size() const { return args_->size | #include <ctype.h>
#include <algorithm>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/api_def/excluded_ops.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
constexpr char kApiDefFilePattern[] = "api_def_*.pbtxt";
string DefaultApiDefDir() {
return GetDataDependencyFilepath(
io::JoinPath("tensorflow", "core", "api_def", "base_api"));
}
string PythonApiDefDir() {
return GetDataDependencyFilepath(
io::JoinPath("tensorflow", "core", "api_def", "python_api"));
}
void GetGoldenApiDefs(Env* env, const string& api_files_dir,
std::unordered_map<string, ApiDef>* name_to_api_def) {
std::vector<string> matching_paths;
TF_CHECK_OK(env->GetMatchingPaths(
io::JoinPath(api_files_dir, kApiDefFilePattern), &matching_paths));
for (auto& file_path : matching_paths) {
string file_contents;
TF_CHECK_OK(ReadFileToString(env, file_path, &file_contents));
file_contents = PBTxtFromMultiline(file_contents);
ApiDefs api_defs;
QCHECK(tensorflow::protobuf::TextFormat::ParseFromString(file_contents,
&api_defs))
<< "Failed to load " << file_path;
CHECK_EQ(api_defs.op_size(), 1);
(*name_to_api_def)[api_defs.op(0).graph_op_name()] = api_defs.op(0);
}
}
void TestAllApiDefsHaveCorrespondingOp(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
std::unordered_set<string> op_names;
for (const auto& op : ops.op()) {
op_names.insert(op.name());
}
for (const auto& name_and_api_def : api_defs_map) {
ASSERT_TRUE(op_names.find(name_and_api_def.first) != op_names.end())
<< name_and_api_def.first << " op has ApiDef but missing from ops. "
<< "Does api_def_" << name_and_api_def.first << " need to be deleted?";
}
}
void TestAllApiDefInputArgsAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_arg : api_def.in_arg()) {
bool found_arg = false;
for (const auto& op_arg : op.input_arg()) {
if (api_def_arg.name() == op_arg.name()) {
found_arg = true;
break;
}
}
ASSERT_TRUE(found_arg)
<< "Input argument " << api_def_arg.name()
<< " (overwritten in api_def_" << op.name()
<< ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestAllApiDefOutputArgsAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_arg : api_def.out_arg()) {
bool found_arg = false;
for (const auto& op_arg : op.output_arg()) {
if (api_def_arg.name() == op_arg.name()) {
found_arg = true;
break;
}
}
ASSERT_TRUE(found_arg)
<< "Output argument " << api_def_arg.name()
<< " (overwritten in api_def_" << op.name()
<< ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestAllApiDefAttributeNamesAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_attr : api_def.attr()) {
bool found_attr = false;
for (const auto& op_attr : op.attr()) {
if (api_def_attr.name() == op_attr.name()) {
found_attr = true;
}
}
ASSERT_TRUE(found_attr)
<< "Attribute " << api_def_attr.name() << " (overwritten in api_def_"
<< op.name() << ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestDeprecatedAttributesSetCorrectly(
const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& name_and_api_def : api_defs_map) {
int num_deprecated_endpoints = 0;
const auto& api_def = name_and_api_def.second;
for (const auto& endpoint : api_def.endpoint()) {
if (endpoint.deprecated()) {
++num_deprecated_endpoints;
}
}
const auto& name = name_and_api_def.first;
ASSERT_TRUE(api_def.deprecation_message().empty() ||
num_deprecated_endpoints == 0)
<< "Endpoints are set to 'deprecated' for deprecated op " << name
<< ". If an op is deprecated (i.e. deprecation_message is set), "
<< "all the endpoints are deprecated implicitly and 'deprecated' "
<< "field should not be set.";
if (num_deprecated_endpoints > 0) {
ASSERT_NE(num_deprecated_endpoints, api_def.endpoint_size())
<< "All " << name << " endpoints are deprecated. Please, set "
<< "deprecation_message in api_def_" << name << ".pbtxt instead. "
<< "to indicate that the op is deprecated.";
}
}
}
void TestDeprecationVersionSetCorrectly(
const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& name_and_api_def : api_defs_map) {
const auto& name = name_and_api_def.first;
const auto& api_def = name_and_api_def.second;
if (api_def.deprecation_version() != 0) {
ASSERT_TRUE(api_def.deprecation_version() > 0)
<< "Found ApiDef with negative deprecation_version";
ASSERT_FALSE(api_def.deprecation_message().empty())
<< "ApiDef that includes deprecation_version > 0 must also specify "
<< "a deprecation_message. Op " << name
<< " has deprecation_version > 0 but deprecation_message is not set.";
}
}
}
class BaseApiTest : public ::testing::Test {
protected:
BaseApiTest() {
OpRegistry::Global()->Export(false, &ops_);
const std::vector<string> multi_line_fields = {"description"};
Env* env = Env::Default();
GetGoldenApiDefs(env, DefaultApiDefDir(), &api_defs_map_);
}
OpList ops_;
std::unordered_map<string, ApiDef> api_defs_map_;
};
TEST_F(BaseApiTest, AllOpsAreInApiDef) {
auto* excluded_ops = GetExcludedOps();
for (const auto& op : ops_.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
EXPECT_TRUE(api_defs_map_.find(op.name()) != api_defs_map_.end())
<< op.name() << " op does not have api_def_*.pbtxt file. "
<< "Please add api_def_" << op.name() << ".pbtxt file "
<< "under tensorflow/core/api_def/base_api/ directory.";
}
}
TEST_F(BaseApiTest, AllApiDefsHaveCorrespondingOp) {
TestAllApiDefsHaveCorrespondingOp(ops_, api_defs_map_);
}
string GetOpDefHasDocStringError(const string& op_name) {
return strings::Printf(
"OpDef for %s has a doc string. "
"Doc strings must be defined in ApiDef instead of OpDef. "
"Please, add summary and descriptions in api_def_%s"
".pbtxt file instead",
op_name.c_str(), op_name.c_str());
}
TEST_F(BaseApiTest, OpDefsShouldNotHaveDocs) {
auto* excluded_ops = GetExcludedOps();
for (const auto& op : ops_.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
ASSERT_TRUE(op.summary().empty()) << GetOpDefHasDocStringError(op.name());
ASSERT_TRUE(op.description().empty())
<< GetOpDefHasDocStringError(op.name());
for (const auto& arg : op.input_arg()) {
ASSERT_TRUE(arg.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
for (const auto& arg : op.output_arg()) {
ASSERT_TRUE(arg.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
for (const auto& attr : op.attr()) {
ASSERT_TRUE(attr.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
}
}
TEST_F(BaseApiTest, AllApiDefInputArgsAreValid) {
TestAllApiDefInputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, AllApiDefOutputArgsAreValid) {
TestAllApiDefOutputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, DeprecationSetCorrectly) {
TestDeprecatedAttributesSetCorrectly(api_defs_map_);
}
TEST_F(BaseApiTest, DeprecationVersionSetCorrectly) {
TestDeprecationVersionSetCorrectly(api_defs_map_);
}
class PythonApiTest : public ::testing::Test {
protected:
PythonApiTest() {
OpRegistry::Global()->Export(false, &ops_);
const std::vector<string> multi_line_fields = {"description"};
Env* env = Env::Default();
GetGoldenApiDefs(env, PythonApiDefDir(), &api_defs_map_);
}
OpList ops_;
std::unordered_map<string, ApiDef> api_defs_map_;
};
TEST_F(PythonApiTest, AllApiDefsHaveCorrespondingOp) {
TestAllApiDefsHaveCorrespondingOp(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefInputArgsAreValid) {
TestAllApiDefInputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefOutputArgsAreValid) {
TestAllApiDefOutputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, DeprecationSetCorrectly) {
TestDeprecatedAttributesSetCorrectly(api_defs_map_);
}
TEST_F(PythonApiTest, DeprecationVersionSetCorrectly) {
TestDeprecationVersionSetCorrectly(api_defs_map_);
}
}
} |
990 | cpp | tensorflow/tensorflow | android_hardware_buffer | tensorflow/lite/delegates/gpu/android_hardware_buffer.cc | tensorflow/lite/delegates/gpu/android_hardware_buffer_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_ANDROID_HARDWARE_BUFFER_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_ANDROID_HARDWARE_BUFFER_H_
#include <stdint.h>
#ifdef __ANDROID__
#include <android/hardware_buffer.h>
#else
extern "C" {
typedef struct AHardwareBuffer AHardwareBuffer;
typedef struct AHardwareBuffer_Desc AHardwareBuffer_Desc;
struct AHardwareBuffer_Desc {
uint32_t width;
uint32_t height;
uint32_t layers;
uint32_t format;
uint64_t usage;
uint32_t stride;
uint32_t rfu0;
uint64_t rfu1;
};
}
#endif
namespace tflite::gpu {
class OptionalAndroidHardwareBuffer {
public:
static OptionalAndroidHardwareBuffer& Instance() {
static OptionalAndroidHardwareBuffer instance;
return instance;
}
bool Supported() { return supported_; }
int IsSupported(const AHardwareBuffer_Desc* description) {
return is_supported_(description);
}
int Allocate(const AHardwareBuffer_Desc* description,
AHardwareBuffer** buffer) {
return allocate_(description, buffer);
}
void Acquire(AHardwareBuffer* buffer) { return acquire_(buffer); }
void Release(AHardwareBuffer* buffer) { return release_(buffer); }
void Describe(AHardwareBuffer* buffer, AHardwareBuffer_Desc* desc) {
return describe_(buffer, desc);
}
private:
void* dlopen_handle_;
int (*is_supported_)(const AHardwareBuffer_Desc* desc);
int (*allocate_)(const AHardwareBuffer_Desc* desc, AHardwareBuffer** buffer);
void (*acquire_)(AHardwareBuffer* buffer);
void (*release_)(AHardwareBuffer* buffer);
void (*describe_)(AHardwareBuffer* buffer, AHardwareBuffer_Desc* desc);
bool supported_;
OptionalAndroidHardwareBuffer();
OptionalAndroidHardwareBuffer(const OptionalAndroidHardwareBuffer&) = delete;
~OptionalAndroidHardwareBuffer() = default;
};
}
#endif
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include <dlfcn.h>
namespace tflite::gpu {
OptionalAndroidHardwareBuffer::OptionalAndroidHardwareBuffer() {
#ifdef __ANDROID__
dlopen_handle_ = dlopen("libnativewindow.so", RTLD_NOW);
if (dlopen_handle_ == nullptr) {
supported_ = false;
return;
}
allocate_ = reinterpret_cast<decltype(allocate_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_allocate"));
acquire_ = reinterpret_cast<decltype(acquire_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_acquire"));
release_ = reinterpret_cast<decltype(release_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_release"));
describe_ = reinterpret_cast<decltype(describe_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_describe"));
is_supported_ = reinterpret_cast<decltype(is_supported_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_isSupported"));
supported_ =
(allocate_ != nullptr && acquire_ != nullptr && release_ != nullptr &&
describe_ != nullptr && is_supported_ != nullptr);
#else
dlopen_handle_ = nullptr;
allocate_ = nullptr;
acquire_ = nullptr;
release_ = nullptr;
describe_ = nullptr;
is_supported_ = nullptr;
supported_ = false;
#endif
}
} | #include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include <gtest/gtest.h>
using tflite::gpu::OptionalAndroidHardwareBuffer;
auto Instance = OptionalAndroidHardwareBuffer::Instance;
namespace {
#ifndef __ANDROID__
TEST(OptionalAndroidHardwareBufferTest, NotSupportedOnNonAndroid) {
EXPECT_EQ(Instance().Supported(), false);
}
#else
TEST(OptionalAndroidHardwareBufferTest, SupportedOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
}
TEST(OptionalAndroidHardwareBufferTest, CanAllocateAndReleaseOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
AHardwareBuffer* buffer;
AHardwareBuffer_Desc description{};
description.width = 1600;
description.height = 1;
description.layers = 1;
description.rfu0 = 0;
description.rfu1 = 0;
description.stride = 1;
description.format = AHARDWAREBUFFER_FORMAT_BLOB;
description.usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
EXPECT_TRUE(Instance().IsSupported(&description));
EXPECT_EQ(Instance().Allocate(&description, &buffer), 0);
Instance().Release(buffer);
}
TEST(OptionalAndroidHardwareBufferTest, CanAcquireAndReleaseOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
AHardwareBuffer* buffer;
AHardwareBuffer_Desc description{};
description.width = 1600;
description.height = 1;
description.layers = 1;
description.rfu0 = 0;
description.rfu1 = 0;
description.stride = 1;
description.format = AHARDWAREBUFFER_FORMAT_BLOB;
description.usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
EXPECT_TRUE(Instance().IsSupported(&description));
EXPECT_EQ(Instance().Allocate(&description, &buffer), 0);
Instance().Acquire(buffer);
Instance().Release(buffer);
Instance().Release(buffer);
}
#endif
} |
991 | cpp | tensorflow/tensorflow | buffer | tensorflow/lite/delegates/gpu/cl/buffer.cc | tensorflow/lite/delegates/gpu/cl/buffer_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_METAL_BUFFER_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_METAL_BUFFER_H_
#include <string>
#include <vector>
#import <Metal/Metal.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h"
#include "tensorflow/lite/delegates/gpu/metal/gpu_object.h"
namespace tflite {
namespace gpu {
namespace metal {
class Buffer : public GPUObject {
public:
Buffer() {}
Buffer(id<MTLBuffer> buffer, size_t size_in_bytes);
Buffer(Buffer&& buffer);
Buffer& operator=(Buffer&& buffer);
Buffer(const Buffer&) = delete;
Buffer& operator=(const Buffer&) = delete;
~Buffer();
uint64_t GetMemorySizeInBytes() const { return size_; }
id<MTLBuffer> GetMemoryPtr() const { return buffer_; }
template <typename T>
absl::Status WriteData(const absl::Span<T> data);
template <typename T>
absl::Status ReadData(std::vector<T>* result) const;
absl::Status GetGPUResources(const GPUObjectDescriptor* obj_ptr,
GPUResourcesWithValue* resources) const override;
absl::Status CreateFromBufferDescriptor(const BufferDescriptor& desc, id<MTLDevice> device);
private:
void Release();
id<MTLBuffer> buffer_ = nullptr;
size_t size_;
};
absl::Status CreateBuffer(size_t size_in_bytes, const void* data, id<MTLDevice> device,
Buffer* result);
template <typename T>
absl::Status Buffer::WriteData(const absl::Span<T> data) {
if (size_ != sizeof(T) * data.size()) {
return absl::InvalidArgumentError(
"absl::Span<T> data size is different from buffer allocated size.");
}
std::memcpy([buffer_ contents], data.data(), size_);
return absl::OkStatus();
}
template <typename T>
absl::Status Buffer::ReadData(std::vector<T>* result) const {
if (size_ % sizeof(T) != 0) {
return absl::UnknownError("Wrong element size(typename T is not correct?");
}
const int elements_count = size_ / sizeof(T);
result->resize(elements_count);
std::memcpy(result->data(), [buffer_ contents], size_);
return absl::OkStatus();
}
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/metal/buffer.h"
#include <utility>
namespace tflite {
namespace gpu {
namespace metal {
Buffer::Buffer(id<MTLBuffer> buffer, size_t size_in_bytes)
: buffer_(buffer), size_(size_in_bytes) {}
Buffer::Buffer(Buffer&& buffer) : buffer_(buffer.buffer_), size_(buffer.size_) {
buffer.buffer_ = nullptr;
buffer.size_ = 0;
}
Buffer& Buffer::operator=(Buffer&& buffer) {
if (this != &buffer) {
Release();
std::swap(size_, buffer.size_);
std::swap(buffer_, buffer.buffer_);
}
return *this;
}
Buffer::~Buffer() { Release(); }
void Buffer::Release() {
if (buffer_) {
buffer_ = nullptr;
size_ = 0;
}
}
absl::Status Buffer::GetGPUResources(const GPUObjectDescriptor* obj_ptr,
GPUResourcesWithValue* resources) const {
const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(obj_ptr);
if (!buffer_desc) {
return absl::InvalidArgumentError("Expected BufferDescriptor on input.");
}
resources->buffers.push_back({"buffer", {buffer_, 0}});
return absl::OkStatus();
}
absl::Status Buffer::CreateFromBufferDescriptor(const BufferDescriptor& desc,
id<MTLDevice> device) {
size_ = desc.size;
if (desc.data.empty()) {
buffer_ =
[device newBufferWithLength:size_ options:MTLResourceStorageModeShared];
} else {
buffer_ = [device newBufferWithBytes:desc.data.data()
length:size_
options:MTLResourceStorageModeShared];
}
return absl::OkStatus();
}
absl::Status CreateBuffer(size_t size_in_bytes, const void* data,
id<MTLDevice> device, Buffer* result) {
id<MTLBuffer> buffer;
if (data) {
buffer = [device newBufferWithBytes:data
length:size_in_bytes
options:MTLResourceStorageModeShared];
} else {
buffer = [device newBufferWithLength:size_in_bytes
options:MTLResourceStorageModeShared];
}
*result = Buffer(buffer, size_in_bytes);
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLTest, BufferTestFloat) {
const std::vector<float> data = {1.0, 2.0, 3.0, -4.0, 5.1};
Buffer buffer;
ASSERT_OK(CreateReadWriteBuffer(sizeof(float) * 5, &env_.context(), &buffer));
ASSERT_OK(buffer.WriteData(env_.queue(),
absl::MakeConstSpan(data.data(), data.size())));
std::vector<float> gpu_data;
ASSERT_OK(buffer.ReadData<float>(env_.queue(), &gpu_data));
EXPECT_THAT(gpu_data, Pointwise(FloatNear(0.0f), data));
}
TEST_F(OpenCLTest, BufferTestHalf) {
const std::vector<half> data = {half(1.4), half(2.1), half(2.2)};
Buffer buffer;
ASSERT_OK(CreateReadWriteBuffer(sizeof(half) * 3, &env_.context(), &buffer));
ASSERT_OK(buffer.WriteData(env_.queue(),
absl::MakeConstSpan(data.data(), data.size())));
std::vector<half> gpu_data;
ASSERT_OK(buffer.ReadData<half>(env_.queue(), &gpu_data));
EXPECT_THAT(gpu_data, Pointwise(FloatNear(0.0f), data));
}
}
}
}
} |
992 | cpp | tensorflow/tensorflow | cl_arguments | tensorflow/lite/delegates/gpu/cl/cl_arguments.cc | tensorflow/lite/delegates/gpu/cl/cl_arguments_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_CL_ARGUMENTS_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_CL_CL_ARGUMENTS_H_
#include <map>
#include <string>
#include <vector>
#include "tensorflow/lite/delegates/gpu/cl/cl_context.h"
#include "tensorflow/lite/delegates/gpu/cl/gpu_object.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/arguments.h"
namespace tflite {
namespace gpu {
namespace cl {
class CLArguments : public ArgumentsBinder {
public:
CLArguments() = default;
absl::Status Init(const GpuInfo& gpu_info,
CLContext* context, Arguments* args, std::string* code);
absl::Status Init(const GpuInfo& gpu_info, Arguments* args,
CLContext* context);
CLArguments(CLArguments&& args) = default;
CLArguments& operator=(CLArguments&& args) = default;
CLArguments(const CLArguments&) = delete;
CLArguments& operator=(const CLArguments&) = delete;
absl::Status SetInt(const std::string& name, int value) override;
absl::Status SetFloat(const std::string& name, float value) override;
absl::Status SetHalf(const std::string& name, half value) override;
absl::Status SetObjectRef(const std::string& name, const GPUObject* object);
absl::Status Bind(cl_kernel kernel, int offset = 0);
bool HasEqualScalarArguments(const CLArguments& other) const;
private:
absl::Status AllocateObjects(const Arguments& args, CLContext* context);
absl::Status AddObjectArgs(const GpuInfo& gpu_info, const Arguments& args);
void CopyArguments(const Arguments& args, bool use_f32_for_halfs);
void RenameArgumentsInCode(std::string* code);
std::string GetListOfArgs();
void AddBuffer(const std::string& name, const GPUBufferDescriptor& desc);
void AddImage2D(const std::string& name, const GPUImage2DDescriptor& desc);
void AddImage2DArray(const std::string& name,
const GPUImage2DArrayDescriptor& desc);
void AddImage3D(const std::string& name, const GPUImage3DDescriptor& desc);
void AddImageBuffer(const std::string& name,
const GPUImageBufferDescriptor& desc);
void AddCustomMemory(const std::string& name,
const GPUCustomMemoryDescriptor& desc);
void AddGPUResources(const std::string& name, const GPUResources& resources);
absl::Status SetObjectsResources(const Arguments& args);
absl::Status SetGPUResources(const std::string& name,
const GPUResourcesWithValue& resources);
absl::Status SetImage2D(const std::string& name, cl_mem memory);
absl::Status SetBuffer(const std::string& name, cl_mem memory);
absl::Status SetImage2DArray(const std::string& name, cl_mem memory);
absl::Status SetImage3D(const std::string& name, cl_mem memory);
absl::Status SetImageBuffer(const std::string& name, cl_mem memory);
absl::Status SetCustomMemory(const std::string& name, cl_mem memory);
static constexpr char kArgsPrefix[] = "args.";
struct IntValue {
int value;
bool active = false;
uint32_t offset = -1;
bool operator==(const IntValue& other) const {
return value == other.value && offset == other.offset &&
active == other.active;
}
};
std::map<std::string, IntValue> int_values_;
std::vector<int32_t> shared_int4s_data_;
struct FloatValue {
float value;
bool active = false;
uint32_t offset = -1;
bool operator==(const FloatValue& other) const {
return value == other.value && offset == other.offset &&
active == other.active;
}
};
std::map<std::string, FloatValue> float_values_;
std::vector<float> shared_float4s_data_;
struct HalfValue {
half value;
bool active = false;
bool store_as_f32 = false;
uint32_t offset = -1;
bool operator==(const HalfValue& other) const {
return value == other.value && offset == other.offset &&
active == other.active;
}
};
std::map<std::string, HalfValue> half_values_;
std::vector<half> shared_half4s_data_;
struct CLBufferDescriptor {
GPUBufferDescriptor desc;
cl_mem memory;
};
struct CLImage2DDescriptor {
GPUImage2DDescriptor desc;
cl_mem memory;
};
struct CLImage2DArrayDescriptor {
GPUImage2DArrayDescriptor desc;
cl_mem memory;
};
struct CLImage3DDescriptor {
GPUImage3DDescriptor desc;
cl_mem memory;
};
struct CLImageBufferDescriptor {
GPUImageBufferDescriptor desc;
cl_mem memory;
};
struct CLCustomMemoryDescriptor {
GPUCustomMemoryDescriptor desc;
cl_mem memory;
};
std::map<std::string, CLBufferDescriptor> buffers_;
std::map<std::string, CLImage2DDescriptor> images2d_;
std::map<std::string, CLImage2DArrayDescriptor> image2d_arrays_;
std::map<std::string, CLImage3DDescriptor> images3d_;
std::map<std::string, CLImageBufferDescriptor> image_buffers_;
std::map<std::string, CLCustomMemoryDescriptor> custom_memories_;
std::map<std::string, GPUObjectDescriptorPtr> object_refs_;
std::vector<GPUObjectPtr> objects_;
};
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/cl/cl_arguments.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/gpu_object.h"
#include "tensorflow/lite/delegates/gpu/cl/qcom_thin_filter.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
bool IsWordSymbol(char symbol) {
return absl::ascii_isalnum(symbol) || symbol == '_';
}
void ReplaceAllWords(const std::string& old_word, const std::string& new_word,
std::string* str) {
size_t position = str->find(old_word);
while (position != std::string::npos) {
char prev = position == 0 ? '.' : (*str)[position - 1];
char next = position + old_word.size() < str->size()
? (*str)[position + old_word.size()]
: '.';
if (IsWordSymbol(prev) || IsWordSymbol(next)) {
position = str->find(old_word, position + 1);
continue;
}
str->replace(position, old_word.size(), new_word);
position = str->find(old_word, position + new_word.size());
}
}
void AppendArgument(const std::string& arg, std::string* args) {
if (!args->empty()) {
absl::StrAppend(args, ",\n ");
}
absl::StrAppend(args, arg);
}
std::string GetImageModifier(AccessType access) {
switch (access) {
case AccessType::READ:
return "__read_only";
case AccessType::WRITE:
return "__write_only";
case AccessType::READ_WRITE:
return "__read_write";
}
}
std::string GetDefaultSamplers(const GpuInfo& gpu_info) {
std::string result;
result +=
"__constant sampler_t smp_none = CLK_NORMALIZED_COORDS_FALSE | "
"CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;\n";
if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) {
result +=
"__constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | "
"CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;\n";
} else {
result +=
"__constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | "
"CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;\n";
}
return result;
}
absl::Status CreateCLObject(GPUObjectDescriptor* desc, CLContext* context,
GPUObjectPtr* result) {
const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(desc);
if (buffer_desc) {
Buffer gpu_buffer;
RETURN_IF_ERROR(
gpu_buffer.CreateFromBufferDescriptor(*buffer_desc, context));
*result = std::make_unique<Buffer>(std::move(gpu_buffer));
return absl::OkStatus();
}
const auto* tensor_desc = dynamic_cast<const TensorDescriptor*>(desc);
if (tensor_desc) {
Tensor gpu_tensor;
RETURN_IF_ERROR(gpu_tensor.CreateFromDescriptor(*tensor_desc, context));
*result = std::make_unique<Tensor>(std::move(gpu_tensor));
return absl::OkStatus();
}
const auto* qcom_thin_filter_desc =
dynamic_cast<const QcomThinFilterDescriptor*>(desc);
if (qcom_thin_filter_desc) {
QcomThinFilter thin_filter;
RETURN_IF_ERROR(
thin_filter.CreateFromDescriptor(*qcom_thin_filter_desc, context));
*result = std::make_unique<QcomThinFilter>(std::move(thin_filter));
return absl::OkStatus();
}
return absl::InvalidArgumentError("Unknown GPU descriptor.");
}
}
constexpr char CLArguments::kArgsPrefix[];
absl::Status CLArguments::Init(const GpuInfo& gpu_info, CLContext* context,
Arguments* args, std::string* code) {
RETURN_IF_ERROR(AllocateObjects(*args, context));
RETURN_IF_ERROR(AddObjectArgs(gpu_info, *args));
args->MoveObjectRefs(&object_refs_);
const bool use_f32_for_halfs = gpu_info.IsPowerVR();
CopyArguments(*args, use_f32_for_halfs);
RETURN_IF_ERROR(SetObjectsResources(*args));
RenameArgumentsInCode(code);
args->ResolveArgsPass(code);
*code = absl::Substitute(*code, GetListOfArgs());
if (gpu_info.SupportsImages()) {
*code = GetDefaultSamplers(gpu_info) + *code;
}
return absl::OkStatus();
}
absl::Status CLArguments::Init(const GpuInfo& gpu_info, Arguments* args,
CLContext* context) {
RETURN_IF_ERROR(AllocateObjects(*args, context));
RETURN_IF_ERROR(AddObjectArgs(gpu_info, *args));
args->MoveObjectRefs(&object_refs_);
const bool use_f32_for_halfs = gpu_info.IsPowerVR();
CopyArguments(*args, use_f32_for_halfs);
RETURN_IF_ERROR(SetObjectsResources(*args));
return absl::OkStatus();
}
absl::Status CLArguments::AllocateObjects(const Arguments& args,
CLContext* context) {
objects_.resize(args.GetObjects().size());
int i = 0;
for (auto& t : args.GetObjects()) {
RETURN_IF_ERROR(CreateCLObject(t.second.get(), context, &objects_[i]));
i++;
}
return absl::OkStatus();
}
absl::Status CLArguments::AddObjectArgs(const GpuInfo& gpu_info,
const Arguments& args) {
for (const auto& t : args.GetObjects()) {
AddGPUResources(t.first, t.second->GetGPUResources(gpu_info));
}
for (const auto& t : args.GetObjectRefs()) {
AddGPUResources(t.first, t.second->GetGPUResources(gpu_info));
}
return absl::OkStatus();
}
absl::Status CLArguments::SetObjectsResources(const Arguments& args) {
int i = 0;
for (const auto& t : args.GetObjects()) {
GPUResourcesWithValue resources;
RETURN_IF_ERROR(objects_[i]->GetGPUResources(t.second.get(), &resources));
RETURN_IF_ERROR(SetGPUResources(t.first, resources));
i++;
}
return absl::OkStatus();
}
void CLArguments::CopyArguments(const Arguments& args, bool use_f32_for_halfs) {
for (const auto& fvalue : args.GetFloatValues()) {
auto& new_val = float_values_[fvalue.first];
new_val.value = fvalue.second.value;
new_val.active = fvalue.second.active;
if (fvalue.second.active) {
new_val.offset = shared_float4s_data_.size();
shared_float4s_data_.push_back(new_val.value);
}
}
for (const auto& ivalue : args.GetIntValues()) {
auto& new_val = int_values_[ivalue.first];
new_val.value = ivalue.second.value;
new_val.active = ivalue.second.active;
if (ivalue.second.active) {
new_val.offset = shared_int4s_data_.size();
shared_int4s_data_.push_back(new_val.value);
}
}
for (const auto& hfvalue : args.GetHalfValues()) {
auto& new_val = half_values_[hfvalue.first];
new_val.value = hfvalue.second.value;
new_val.active = hfvalue.second.active;
if (hfvalue.second.active) {
if (use_f32_for_halfs) {
new_val.store_as_f32 = true;
new_val.offset = shared_float4s_data_.size();
shared_float4s_data_.push_back(new_val.value);
} else {
new_val.store_as_f32 = false;
new_val.offset = shared_half4s_data_.size();
shared_half4s_data_.push_back(new_val.value);
}
}
}
int shared_int4s_aligned_size = AlignByN(shared_int4s_data_.size(), 4);
shared_int4s_data_.resize(shared_int4s_aligned_size);
int shared_float4s_aligned_size = AlignByN(shared_float4s_data_.size(), 4);
shared_float4s_data_.resize(shared_float4s_aligned_size);
int shared_half4s_aligned_size = AlignByN(shared_half4s_data_.size(), 4);
shared_half4s_data_.resize(shared_half4s_aligned_size);
}
void CLArguments::RenameArgumentsInCode(std::string* code) {
const std::string postfixes[4] = {"x", "y", "z", "w"};
for (const auto& fvalue : float_values_) {
if (fvalue.second.active) {
std::string index = std::to_string(fvalue.second.offset / 4);
std::string new_name =
"shared_float4_" + index + "." + postfixes[fvalue.second.offset % 4];
ReplaceAllWords(kArgsPrefix + fvalue.first, new_name, code);
}
}
for (const auto& ivalue : int_values_) {
if (ivalue.second.active) {
std::string index = std::to_string(ivalue.second.offset / 4);
std::string new_name =
"shared_int4_" + index + "." + postfixes[ivalue.second.offset % 4];
ReplaceAllWords(kArgsPrefix + ivalue.first, new_name, code);
}
}
for (const auto& hfvalue : half_values_) {
if (hfvalue.second.active) {
std::string index = std::to_string(hfvalue.second.offset / 4);
std::string new_name;
if (hfvalue.second.store_as_f32) {
new_name = "(half)(shared_float4_" + index + "." +
postfixes[hfvalue.second.offset % 4] + ")";
} else {
new_name = "shared_half4_" + index + "." +
postfixes[hfvalue.second.offset % 4];
}
ReplaceAllWords(kArgsPrefix + hfvalue.first, new_name, code);
}
}
}
void CLArguments::AddBuffer(const std::string& name,
const GPUBufferDescriptor& desc) {
buffers_[name].desc = desc;
}
void CLArguments::AddImage2D(const std::string& name,
const GPUImage2DDescriptor& desc) {
images2d_[name].desc = desc;
}
void CLArguments::AddImage2DArray(const std::string& name,
const GPUImage2DArrayDescriptor& desc) {
image2d_arrays_[name].desc = desc;
}
void CLArguments::AddImage3D(const std::string& name,
const GPUImage3DDescriptor& desc) {
images3d_[name].desc = desc;
}
void CLArguments::AddImageBuffer(const std::string& name,
const GPUImageBufferDescriptor& desc) {
image_buffers_[name].desc = desc;
}
void CLArguments::AddCustomMemory(const std::string& name,
const GPUCustomMemoryDescriptor& desc) {
custom_memories_[name].desc = desc;
}
void CLArguments::AddGPUResources(const std::string& name,
const GPUResources& resources) {
for (const auto& r : resources.buffers) {
AddBuffer(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.images2d) {
AddImage2D(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.image2d_arrays) {
AddImage2DArray(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.images3d) {
AddImage3D(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.image_buffers) {
AddImageBuffer(absl::StrCat(name, "_", r.first), r.second);
}
for (const auto& r : resources.custom_memories) {
AddCustomMemory(absl::StrCat(name, "_", r.first), r.second);
}
}
absl::Status CLArguments::SetInt(const std::string& name, int value) {
auto it = int_values_.find(name);
if (it == int_values_.end()) {
return absl::NotFoundError(
absl::StrCat("No int argument with name - ", name));
}
it->second.value = value;
if (it->second.active) {
shared_int4s_data_[it->second.offset] = value;
}
return absl::OkStatus();
}
absl::Status CLArguments::SetFloat(const std::string& name, float value) {
auto it = float_values_.find(name);
if (it == float_values_.end()) {
return absl::NotFoundError(
absl::StrCat("No float argument with name - ", name));
}
it->second.value = value;
if (it->second.active) {
shared_float4s_data_[it->second.offset] = value;
}
return absl::OkStatus();
}
absl::Status CLArguments::SetHalf(const std::string& name, half value) {
auto it = half_values_.find(name);
if (it == half_values_.end()) {
return absl::NotFoundError(
absl::StrCat("No half argument with name - ", name));
}
it->second.value = value;
if (it->second.active) {
if (it->second.store_as_f32) {
shared_float4s_data_[it->second.offset] = value;
} else {
shared_half4s_data_[it->second.offset] = value;
}
}
return absl::OkStatus();
}
absl::Status CLArguments::SetImage2D(const std::string& name, cl_mem memory) {
auto it = images2d_.find(name);
if (it == images2d_.end()) {
return absl::NotFoundError(
absl::StrCat("No image2D argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetBuffer(const std::string& name, cl_mem memory) {
auto it = buffers_.find(name);
if (it == buffers_.end()) {
return absl::NotFoundError(
absl::StrCat("No buffer argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetImage2DArray(const std::string& name,
cl_mem memory) {
auto it = image2d_arrays_.find(name);
if (it == image2d_arrays_.end()) {
return absl::NotFoundError(
absl::StrCat("No image2D array argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetImage3D(const std::string& name, cl_mem memory) {
auto it = images3d_.find(name);
if (it == images3d_.end()) {
return absl::NotFoundError(
absl::StrCat("No image3D argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetImageBuffer(const std::string& name,
cl_mem memory) {
auto it = image_buffers_.find(name);
if (it == image_buffers_.end()) {
return absl::NotFoundError(
absl::StrCat("No image buffer argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetCustomMemory(const std::string& name,
cl_mem memory) {
auto it = custom_memories_.find(name);
if (it == custom_memories_.end()) {
return absl::NotFoundError(
absl::StrCat("No custom memory argument with name - ", name));
}
it->second.memory = memory;
return absl::OkStatus();
}
absl::Status CLArguments::SetObjectRef(const std::string& name,
const GPUObject* object) {
auto it = object_refs_.find(name);
if (it == object_refs_.end()) {
return absl::NotFoundError(
absl::StrCat("No object ref with name - ", name));
}
GPUResourcesWithValue resources;
RETURN_IF_ERROR(object->GetGPUResources(it->second.get(), &resources));
return SetGPUResources(name, resources);
}
absl::Status CLArguments::SetGPUResources(
const std::string& name, const GPUResourcesWithValue& resources) {
for (const auto& r : resources.generic.ints) {
RETURN_IF_ERROR(SetInt(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.generic.floats) {
RETURN_IF_ERROR(SetFloat(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.buffers) {
RETURN_IF_ERROR(SetBuffer(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.images2d) {
RETURN_IF_ERROR(SetImage2D(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.image2d_arrays) {
RETURN_IF_ERROR(
SetImage2DArray(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.images3d) {
RETURN_IF_ERROR(SetImage3D(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.image_buffers) {
RETURN_IF_ERROR(SetImageBuffer(absl::StrCat(name, "_", r.first), r.second));
}
for (const auto& r : resources.custom_memories) {
RETURN_IF_ERROR(
SetCustomMemory(absl::StrCat(name, "_", r.first), r.second));
}
return absl::OkStatus();
}
std::string CLArguments::GetListOfArgs() {
std::string result;
for (auto& t : buffers_) {
const std::string type_name =
t.second.desc.data_type == DataType::FLOAT32 ? "float" : "half";
std::string attributes;
for (const auto& attr : t.second.desc.attributes) {
attributes += absl::StrCat(" __attribute__((", attr, "))");
}
std::string cl_type;
if (t.second.desc.data_type == DataType::BOOL) {
cl_type = ToCLDataType(DataType::UINT8, t.second.desc.element_size);
} else {
cl_type =
ToCLDataType(t.second.desc.data_type, t.second.desc.element_size);
}
AppendArgument(absl::StrCat(MemoryTypeToCLType(t.second.desc.memory_type),
" ", cl_type, "* ", t.first, attributes),
&result);
}
for (auto& t : image_buffers_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image1d_buffer_t ", t.first),
&result);
}
for (auto& t : images2d_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image2d_t ", t.first),
&result);
}
for (auto& t : image2d_arrays_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image2d_array_t ", t.first),
&result);
}
for (auto& t : images3d_) {
AppendArgument(absl::StrCat(GetImageModifier(t.second.desc.access_type),
" image3d_t ", t.first),
&result);
}
for (auto& t : custom_memories_) {
AppendArgument(absl::StrCat(t.second.desc.type_name, " ", t.first),
&result);
}
for (int i = 0; i < shared_int4s_data_.size() / 4; ++i) {
AppendArgument(absl::StrCat("int4 shared_int4_", i), &result);
}
for (int i = 0; i < shared_float4s_data_.size() / 4; ++i) {
AppendArgument(absl::StrCat("float4 shared_float4_", i), &result);
}
for (int i = 0; i < shared_half4s_data_.size() / 4; ++i) {
AppendArgument(absl::StrCat("half4 shared_half4_", i), &result);
}
return result;
}
absl::Status CLArguments::Bind(cl_kernel kernel, int offset) {
for (auto& t : buffers_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : image_buffers_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : images2d_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : image2d_arrays_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : images3d_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (auto& t : custom_memories_) {
const int error_code =
clSetKernelArg(kernel, offset, sizeof(cl_mem), &t.second.memory);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (int i = 0; i < shared_int4s_data_.size() / 4; ++i) {
const int error_code = clSetKernelArg(kernel, offset, sizeof(int32_t) * 4,
&shared_int4s_data_[i * 4]);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (int i = 0; i < shared_float4s_data_.size() / 4; ++i) {
const int error_code = clSetKernelArg(kernel, offset, sizeof(int32_t) * 4,
&shared_float4s_data_[i * 4]);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
for (int i = 0; i < shared_half4s_data_.size() / 4; ++i) {
const int error_code = clSetKernelArg(kernel, offset, sizeof(int16_t) * 4,
&shared_half4s_data_[i * 4]);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(absl::StrCat(
"Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
"(at index - ", offset, ")"));
}
offset++;
}
return absl::OkStatus();
}
bool CLArguments::HasEqualScalarArguments(const CLArguments& other) const {
return (other.int_values_ == int_values_ &&
other.float_values_ == float_values_ &&
other.half_values_ == half_values_);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/cl_arguments.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_test.h"
#include "tensorflow/lite/delegates/gpu/cl/gpu_object.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST(CLArgumentsTest, TestSelectorResolve) {
BufferDescriptor desc;
desc.element_type = DataType::FLOAT32;
desc.element_size = 4;
desc.memory_type = MemoryType::GLOBAL;
Arguments args;
args.AddObjectRef("weights", AccessType::READ,
std::make_unique<BufferDescriptor>(std::move(desc)));
std::string sample_code = R"(
__kernel void main_function($0) {
if (a < 3) {
value = args.weights.Read(id);
}
})";
CLArguments cl_args;
GpuInfo gpu_info;
ASSERT_OK(cl_args.Init(gpu_info, nullptr, &args, &sample_code));
EXPECT_TRUE(absl::StrContains(sample_code, "value = weights_buffer[id];"));
EXPECT_TRUE(
absl::StrContains(sample_code, "__global float4* weights_buffer"));
}
TEST(CLArgumentsTest, TestNoSelector) {
BufferDescriptor desc;
desc.element_type = DataType::FLOAT32;
desc.element_size = 4;
desc.memory_type = MemoryType::GLOBAL;
Arguments args;
args.AddObjectRef("weights", AccessType::READ,
std::make_unique<BufferDescriptor>(std::move(desc)));
std::string sample_code = R"(
if (a < 3) {
value = args.weights.UnknownSelector(id);
}
)";
CLArguments cl_args;
GpuInfo gpu_info;
EXPECT_FALSE(cl_args.Init(gpu_info, nullptr, &args, &sample_code).ok());
}
}
}
} |
993 | cpp | tensorflow/tensorflow | cl_device | tensorflow/lite/delegates/gpu/cl/cl_device.cc | tensorflow/lite/delegates/gpu/cl/cl_device_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_CL_CL_DEVICE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_CL_CL_DEVICE_H_
#include <string>
#include <vector>
#include "tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h"
#include "tensorflow/lite/delegates/gpu/cl/util.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace cl {
class CLDevice {
public:
CLDevice() = default;
CLDevice(cl_device_id id, cl_platform_id platform_id);
CLDevice(CLDevice&& device);
CLDevice& operator=(CLDevice&& device);
CLDevice(const CLDevice&);
CLDevice& operator=(const CLDevice&);
~CLDevice() {}
cl_device_id id() const { return id_; }
cl_platform_id platform() const { return platform_id_; }
std::string GetPlatformVersion() const;
void DisableOneLayerTextureArray();
const GpuInfo& GetInfo() const { return info_; }
mutable GpuInfo info_;
private:
cl_device_id id_ = nullptr;
cl_platform_id platform_id_ = nullptr;
};
absl::Status CreateDefaultGPUDevice(CLDevice* result);
template <typename T>
T GetDeviceInfo(cl_device_id id, cl_device_info info) {
T result;
cl_int error = clGetDeviceInfo(id, info, sizeof(T), &result, nullptr);
if (error != CL_SUCCESS) {
return -1;
}
return result;
}
template <typename T>
absl::Status GetDeviceInfo(cl_device_id id, cl_device_info info, T* result) {
cl_int error = clGetDeviceInfo(id, info, sizeof(T), result, nullptr);
if (error != CL_SUCCESS) {
return absl::InvalidArgumentError(CLErrorCodeToString(error));
}
return absl::OkStatus();
}
void ParseQualcommOpenClCompilerVersion(
const std::string& cl_driver_version,
AdrenoInfo::OpenClCompilerVersion* result);
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/cl/cl_device.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h"
#include "tensorflow/lite/delegates/gpu/cl/util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
namespace tflite {
namespace gpu {
namespace cl {
void ParseQualcommOpenClCompilerVersion(
const std::string& cl_driver_version,
AdrenoInfo::OpenClCompilerVersion* result) {
const std::string start = "Compiler E031.";
size_t position = cl_driver_version.find(start);
if (position == std::string::npos) {
return;
}
const size_t main_part_length = 8;
if (position + start.length() + main_part_length >
cl_driver_version.length()) {
return;
}
const std::string main_part =
cl_driver_version.substr(position + start.length(), main_part_length);
if (!absl::ascii_isdigit(main_part[0]) ||
!absl::ascii_isdigit(main_part[1]) || main_part[2] != '.' ||
!absl::ascii_isdigit(main_part[3]) ||
!absl::ascii_isdigit(main_part[4]) || main_part[5] != '.' ||
!absl::ascii_isdigit(main_part[6]) ||
!absl::ascii_isdigit(main_part[7])) {
return;
}
result->major = (main_part[0] - '0') * 10 + (main_part[1] - '0');
result->minor = (main_part[3] - '0') * 10 + (main_part[4] - '0');
result->patch = (main_part[6] - '0') * 10 + (main_part[7] - '0');
}
static void ParsePowerVRDriverVersion(const std::string& cl_driver_version,
PowerVRInfo::DriverVersion& result) {
size_t position = cl_driver_version.find('@');
if (position == std::string::npos) {
return;
}
int main = 0;
size_t curpos = 0;
while (curpos < position && absl::ascii_isdigit(cl_driver_version[curpos])) {
main = main * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
++curpos;
int minor = 0;
while (curpos < position) {
minor = minor * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
curpos = position + 1;
int id = 0;
while (curpos < cl_driver_version.length()) {
id = id * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
result.branch_main = main;
result.branch_minor = minor;
result.id = id;
}
template <>
std::string GetDeviceInfo<std::string>(cl_device_id id, cl_device_info info) {
size_t size;
cl_int error = clGetDeviceInfo(id, info, 0, nullptr, &size);
if (error != CL_SUCCESS) {
return "";
}
std::string result(size - 1, 0);
error = clGetDeviceInfo(id, info, size, &result[0], nullptr);
if (error != CL_SUCCESS) {
return "";
}
return result;
}
namespace {
template <typename T>
T GetPlatformInfo(cl_platform_id id, cl_platform_info info) {
T result;
cl_int error = clGetPlatformInfo(id, info, sizeof(T), &result, nullptr);
if (error != CL_SUCCESS) {
return -1;
}
return result;
}
std::string GetPlatformInfo(cl_platform_id id, cl_platform_info info) {
size_t size;
cl_int error = clGetPlatformInfo(id, info, 0, nullptr, &size);
if (error != CL_SUCCESS) {
return "";
}
std::string result(size - 1, 0);
error = clGetPlatformInfo(id, info, size, &result[0], nullptr);
if (error != CL_SUCCESS) {
return "";
}
return result;
}
void GetDeviceWorkDimsSizes(cl_device_id id, int3* result) {
int dims_count =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS);
if (dims_count < 3) {
return;
}
std::vector<size_t> limits(dims_count);
cl_int error =
clGetDeviceInfo(id, CL_DEVICE_MAX_WORK_ITEM_SIZES,
sizeof(size_t) * dims_count, limits.data(), nullptr);
if (error != CL_SUCCESS) {
return;
}
result->x = limits[0];
result->y = limits[1];
result->z = limits[2];
}
OpenClVersion ParseCLVersion(const std::string& version) {
const auto first_dot_pos = version.find_first_of('.');
if (first_dot_pos == std::string::npos) {
return OpenClVersion::kCl1_0;
}
const int major = version[first_dot_pos - 1] - '0';
const int minor = version[first_dot_pos + 1] - '0';
if (major == 1) {
if (minor == 2) {
return OpenClVersion::kCl1_2;
} else if (minor == 1) {
return OpenClVersion::kCl1_1;
} else {
return OpenClVersion::kCl1_0;
}
} else if (major == 2) {
if (minor == 2) {
return OpenClVersion::kCl2_2;
} else if (minor == 1) {
return OpenClVersion::kCl2_1;
} else {
return OpenClVersion::kCl2_0;
}
} else if (major == 3) {
return OpenClVersion::kCl3_0;
} else {
return OpenClVersion::kCl1_0;
}
}
bool IsGPUVersionInRange(int gpu_version, int min_version, int max_version) {
return gpu_version >= min_version && gpu_version < max_version;
}
GpuInfo GpuInfoFromDeviceID(cl_device_id id, cl_platform_id platform_id) {
GpuInfo info;
info.opencl_info.platform_version =
GetPlatformInfo(platform_id, CL_PLATFORM_VERSION);
info.opencl_info.device_name = GetDeviceInfo<std::string>(id, CL_DEVICE_NAME);
info.opencl_info.vendor_name =
GetDeviceInfo<std::string>(id, CL_DEVICE_VENDOR);
info.opencl_info.opencl_c_version =
GetDeviceInfo<std::string>(id, CL_DEVICE_OPENCL_C_VERSION);
info.opencl_info.driver_version =
GetDeviceInfo<std::string>(id, CL_DRIVER_VERSION);
const std::string gpu_description = absl::StrCat(
info.opencl_info.device_name, " ", info.opencl_info.vendor_name, " ",
info.opencl_info.opencl_c_version);
GetGpuInfoFromDeviceDescription(gpu_description, GpuApi::kOpenCl, &info);
info.opencl_info.cl_version =
ParseCLVersion(info.opencl_info.opencl_c_version);
info.opencl_info.extensions =
absl::StrSplit(GetDeviceInfo<std::string>(id, CL_DEVICE_EXTENSIONS), ' ');
const std::vector<std::string> unsupported_extensions =
GetUnsupportedExtensions();
for (const auto& unsupported_extension : unsupported_extensions) {
for (auto it = info.opencl_info.extensions.begin();
it != info.opencl_info.extensions.end();) {
if (*it == unsupported_extension) {
it = info.opencl_info.extensions.erase(it);
} else {
++it;
}
}
}
info.opencl_info.supports_fp16 = false;
info.opencl_info.supports_image3d_writes = false;
for (const auto& ext : info.opencl_info.extensions) {
if (ext == "cl_khr_fp16") {
info.opencl_info.supports_fp16 = true;
}
if (ext == "cl_khr_3d_image_writes") {
info.opencl_info.supports_image3d_writes = true;
}
}
info.opencl_info.supports_images =
GetDeviceInfo<cl_bool>(id, CL_DEVICE_IMAGE_SUPPORT);
cl_device_fp_config f32_config =
GetDeviceInfo<cl_device_fp_config>(id, CL_DEVICE_SINGLE_FP_CONFIG);
info.opencl_info.supports_fp32_rtn = f32_config & CL_FP_ROUND_TO_NEAREST;
if (info.opencl_info.supports_fp16) {
cl_device_fp_config f16_config;
auto status = GetDeviceInfo<cl_device_fp_config>(
id, CL_DEVICE_HALF_FP_CONFIG, &f16_config);
if (status.ok() && !info.IsAMD()) {
info.opencl_info.supports_fp16_rtn = f16_config & CL_FP_ROUND_TO_NEAREST;
} else {
f16_config = f32_config;
info.opencl_info.supports_fp16_rtn = info.opencl_info.supports_fp32_rtn;
}
} else {
info.opencl_info.supports_fp16_rtn = false;
}
if (info.IsPowerVR()) {
if (!info.powervr_info.IsBetterThan(PowerVRGpu::kRogueGm9xxx)) {
info.opencl_info.supports_fp16 = false;
} else if (!info.opencl_info.supports_fp16) {
info.opencl_info.supports_fp16 = true;
info.opencl_info.supports_fp16_rtn = info.opencl_info.supports_fp32_rtn;
}
}
if (!info.opencl_info.supports_image3d_writes &&
((info.IsAdreno() && info.adreno_info.IsAdreno4xx()) ||
info.IsNvidia())) {
info.opencl_info.supports_image3d_writes = true;
}
info.opencl_info.compute_units_count =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MAX_COMPUTE_UNITS);
info.opencl_info.image2d_max_width =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_WIDTH);
info.opencl_info.image2d_max_height =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_HEIGHT);
info.opencl_info.buffer_max_size =
GetDeviceInfo<cl_ulong>(id, CL_DEVICE_MAX_MEM_ALLOC_SIZE);
info.opencl_info.max_allocation_size =
GetDeviceInfo<cl_ulong>(id, CL_DEVICE_MAX_MEM_ALLOC_SIZE);
if (info.opencl_info.cl_version >= OpenClVersion::kCl1_2) {
info.opencl_info.image_buffer_max_size =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE_MAX_BUFFER_SIZE);
info.opencl_info.image_array_max_layers =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE_MAX_ARRAY_SIZE);
}
info.opencl_info.image3d_max_width =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE3D_MAX_WIDTH);
info.opencl_info.image3d_max_height =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_HEIGHT);
info.opencl_info.image3d_max_depth =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE3D_MAX_DEPTH);
int3 max_work_group_sizes;
GetDeviceWorkDimsSizes(id, &max_work_group_sizes);
info.opencl_info.max_work_group_size_x = max_work_group_sizes.x;
info.opencl_info.max_work_group_size_y = max_work_group_sizes.y;
info.opencl_info.max_work_group_size_z = max_work_group_sizes.z;
info.opencl_info.max_work_group_total_size =
GetDeviceInfo<size_t>(id, CL_DEVICE_MAX_WORK_GROUP_SIZE);
info.opencl_info.dedicated_local_memory =
(GetDeviceInfo<cl_device_local_mem_type>(id, CL_DEVICE_LOCAL_MEM_TYPE) ==
CL_LOCAL);
if (info.IsCL30OrHigher()) {
info.opencl_info.preferred_work_group_size_multiple =
GetDeviceInfo<size_t>(id, CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_MULTIPLE);
} else {
info.opencl_info.preferred_work_group_size_multiple = 0;
}
info.opencl_info.base_addr_align_in_bits =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MEM_BASE_ADDR_ALIGN);
info.opencl_info.image_pitch_alignment = 0;
if (info.opencl_info.cl_version == OpenClVersion::kCl2_0 ||
info.opencl_info.cl_version == OpenClVersion::kCl2_1 ||
info.opencl_info.cl_version == OpenClVersion::kCl2_2) {
info.opencl_info.image_pitch_alignment =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_IMAGE_PITCH_ALIGNMENT);
info.opencl_info.image_base_address_alignment =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT);
} else if (info.SupportsExtension("cl_khr_image2d_from_buffer")) {
cl_uint result = 0;
auto status =
GetDeviceInfo(id, CL_DEVICE_IMAGE_PITCH_ALIGNMENT_KHR, &result);
if (status.ok()) {
info.opencl_info.image_pitch_alignment = result;
}
result = 0;
status =
GetDeviceInfo(id, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT_KHR, &result);
if (status.ok()) {
info.opencl_info.image_base_address_alignment = result;
}
}
if (info.SupportsExtension("cl_intel_required_subgroup_size")) {
size_t sub_groups_ret_size;
cl_int status =
clGetDeviceInfo(id, 0x4108 , 0,
nullptr, &sub_groups_ret_size);
if (status == CL_SUCCESS) {
size_t sub_groups_count = sub_groups_ret_size / sizeof(size_t);
std::vector<size_t> sub_group_sizes(sub_groups_count);
status =
clGetDeviceInfo(id, 0x4108 ,
sub_groups_ret_size, sub_group_sizes.data(), nullptr);
if (status == CL_SUCCESS) {
for (int i = 0; i < sub_groups_count; ++i) {
info.supported_subgroup_sizes.push_back(sub_group_sizes[i]);
}
}
}
}
if (info.IsAdreno()) {
ParseQualcommOpenClCompilerVersion(info.opencl_info.driver_version,
&info.adreno_info.cl_compiler_version);
} else if (info.IsPowerVR()) {
ParsePowerVRDriverVersion(info.opencl_info.driver_version,
info.powervr_info.driver_version);
}
return info;
}
}
CLDevice::CLDevice(cl_device_id id, cl_platform_id platform_id)
: info_(GpuInfoFromDeviceID(id, platform_id)),
id_(id),
platform_id_(platform_id) {
if (info_.IsAdreno() &&
info_.adreno_info.adreno_gpu == AdrenoGpu::kAdreno630) {
acceleration::AndroidInfo android_info;
if (acceleration::RequestAndroidInfo(&android_info).ok()) {
info_.adreno_info.compiler_bugs_in_a6xx =
android_info.android_sdk_version == "26";
}
}
}
CLDevice::CLDevice(const CLDevice& device)
: info_(device.info_), id_(device.id_), platform_id_(device.platform_id_) {}
CLDevice& CLDevice::operator=(const CLDevice& device) {
if (this != &device) {
info_ = device.info_;
id_ = device.id_;
platform_id_ = device.platform_id_;
}
return *this;
}
CLDevice::CLDevice(CLDevice&& device)
: info_(std::move(device.info_)),
id_(device.id_),
platform_id_(device.platform_id_) {
device.id_ = nullptr;
device.platform_id_ = nullptr;
}
CLDevice& CLDevice::operator=(CLDevice&& device) {
if (this != &device) {
id_ = nullptr;
platform_id_ = nullptr;
info_ = std::move(device.info_);
std::swap(id_, device.id_);
std::swap(platform_id_, device.platform_id_);
}
return *this;
}
std::string CLDevice::GetPlatformVersion() const {
return GetPlatformInfo(platform_id_, CL_PLATFORM_VERSION);
}
void CLDevice::DisableOneLayerTextureArray() {
info_.adreno_info.support_one_layer_texture_array = false;
}
absl::Status CreateDefaultGPUDevice(CLDevice* result) {
cl_uint num_platforms;
cl_int status = clGetPlatformIDs(0, nullptr, &num_platforms);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetPlatformIDs returned %d", status));
}
if (num_platforms == 0) {
return absl::UnknownError("No supported OpenCL platform.");
}
std::vector<cl_platform_id> platforms(num_platforms);
status = clGetPlatformIDs(num_platforms, platforms.data(), nullptr);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetPlatformIDs returned %d", status));
}
cl_platform_id platform_id = platforms[0];
cl_uint num_devices;
status =
clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 0, nullptr, &num_devices);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetDeviceIDs returned %d", status));
}
if (num_devices == 0) {
return absl::UnknownError("No GPU on current platform.");
}
std::vector<cl_device_id> devices(num_devices);
status = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, num_devices,
devices.data(), nullptr);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetDeviceIDs returned %d", status));
}
*result = CLDevice(devices[0], platform_id);
LoadOpenCLFunctionExtensions(platform_id);
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/cl_device.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace cl {
TEST(QualcommOpenClCompilerVersionParsing, Base) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.53.41",
&result);
EXPECT_EQ(result.major, 79);
EXPECT_EQ(result.minor, 53);
EXPECT_EQ(result.patch, 41);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat0) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Assembler A337.79.53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat1) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.53.4",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat2) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031:79:53:41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat3) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.x53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat4) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.a9.53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
}
}
} |
994 | cpp | tensorflow/tensorflow | converter | tensorflow/lite/delegates/gpu/cl/kernels/converter.cc | tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONVERTER_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONVERTER_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/gl/command_queue.h"
#include "tensorflow/lite/delegates/gpu/spi.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<TensorObjectConverterBuilder> NewConverterBuilder(
CommandQueue* command_queue );
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/converter.h"
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_program.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::Status WrapSSBO(OpenGlBuffer ssbo, GlBuffer* buffer) {
int64_t size_bytes;
RETURN_IF_ERROR(GetSSBOSize(ssbo.id, &size_bytes));
*buffer = GlBuffer(GL_SHADER_STORAGE_BUFFER, ssbo.id, size_bytes, 0, false);
return absl::OkStatus();
}
std::string GetShaderHeader(const uint3& localsize) {
return absl::StrCat("#version 310 es\nlayout(local_size_x = ", localsize.x,
", local_size_y = ", localsize.y,
", local_size_z = ", localsize.z, ") in;\n");
}
class OpenGlConverterImpl : public TensorObjectConverter {
public:
explicit OpenGlConverterImpl(CommandQueue* command_queue)
: command_queue_(command_queue) {}
virtual absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def) = 0;
protected:
absl::Status InitializeProgram(const uint3& workgroup_size,
const std::string& shader_source) {
workgroup_size_ = workgroup_size;
GlShader shader;
RETURN_IF_ERROR(GlShader::CompileShader(
GL_COMPUTE_SHADER, GetShaderHeader(workgroup_size) + shader_source,
&shader));
return GlProgram::CreateWithShader(shader, &program_);
}
absl::Status Dispatch(const uint3& workload) {
uint3 num_workgroups = DivideRoundUp(workload, workgroup_size_);
if (command_queue_) {
return command_queue_->Dispatch(program_, num_workgroups);
}
return program_.Dispatch(num_workgroups);
}
GlProgram program_;
uint3 workgroup_size_;
CommandQueue* command_queue_;
};
bool IsSupportedDataType(DataType type) { return type == DataType::FLOAT32; }
uint32_t SizeInBytesDHWC4(const BHWC& shape) {
return shape.b * shape.h * shape.w * AlignByN(shape.c, 4) * sizeof(float);
}
uint32_t SizeInBytesBHWC(const BHWC& shape) {
return shape.DimensionsProduct() * sizeof(float);
}
class FromTensorConverter : public OpenGlConverterImpl {
public:
explicit FromTensorConverter(CommandQueue* command_queue)
: OpenGlConverterImpl(command_queue) {}
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsSupportedDataType(input.data_type) &&
IsSupportedDataType(output.data_type) &&
output.object_type == ObjectType::OPENGL_SSBO &&
output.data_layout == DataLayout::BHWC &&
input.object_type == ObjectType::OPENGL_SSBO &&
input.data_layout == DataLayout::DHWC4;
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def) final {
shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h,
output_def.dimensions.w, output_def.dimensions.c);
if (shape_.b != 1) {
return absl::UnimplementedError(
"FromTensorConverter: Batch size != 1 is not supported.");
}
return InitializeProgram(uint3(8, 4, 2), R"(
layout(std430) buffer;
precision highp float;
layout(binding = 0) readonly buffer B0 {
vec4 elements[];
} input_data;
layout(binding = 1) writeonly buffer B1 {
float elements[];
} output_data;
uniform ivec4 sizes;
void main() {
ivec3 gid = ivec3(gl_GlobalInvocationID.xyz);
if (gid.x >= sizes.x || gid.y >= sizes.y || gid.z >= sizes.z) {
return;
}
output_data.elements[(gid.y * sizes.x + gid.x) * sizes.z + gid.z] = input_data.elements[(gid.z / 4 * sizes.y + gid.y) * sizes.x + gid.x][gid.z % 4];
})");
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto output = std::get_if<OpenGlBuffer>(&output_obj);
if (!output || !output->id) {
return absl::InvalidArgumentError("Missing output in converter");
}
auto input = std::get_if<OpenGlBuffer>(&input_obj);
if (!input || !input->id) {
return absl::InvalidArgumentError("Missing input in converter");
}
if (input->id == output->id) {
return absl::InvalidArgumentError("Can not execute inplace conversion");
}
GlBuffer input_ssbo;
RETURN_IF_ERROR(WrapSSBO(*input, &input_ssbo));
GlBuffer output_ssbo;
RETURN_IF_ERROR(WrapSSBO(*output, &output_ssbo));
if (input_ssbo.bytes_size() != SizeInBytesDHWC4(shape_)) {
return absl::InvalidArgumentError(
"FromTensorConverter: input data size does not match expected size.");
}
if (output_ssbo.bytes_size() != SizeInBytesBHWC(shape_)) {
return absl::InvalidArgumentError(
"FromTensorConverter: output data size does not match expected "
"size.");
}
RETURN_IF_ERROR(program_.SetParameter(
{"sizes",
int4(static_cast<int32_t>(shape_.w), static_cast<int32_t>(shape_.h),
static_cast<int32_t>(shape_.c), 0)}));
RETURN_IF_ERROR(input_ssbo.BindToIndex(0));
RETURN_IF_ERROR(output_ssbo.BindToIndex(1));
return Dispatch(uint3(shape_.w, shape_.h, shape_.c));
}
BHWC shape_;
};
class ToTensorConverter : public OpenGlConverterImpl {
public:
explicit ToTensorConverter(CommandQueue* command_queue)
: OpenGlConverterImpl(command_queue) {}
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return IsSupportedDataType(input.data_type) &&
IsSupportedDataType(output.data_type) &&
input.object_type == ObjectType::OPENGL_SSBO &&
input.data_layout == DataLayout::BHWC &&
output.object_type == ObjectType::OPENGL_SSBO &&
output.data_layout == DataLayout::DHWC4;
}
absl::Status Init(const TensorObjectDef& input_def,
const TensorObjectDef& output_def) final {
shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h,
output_def.dimensions.w, output_def.dimensions.c);
if (shape_.b != 1) {
return absl::UnimplementedError(
"ToTensorConverter: Batch size != 1 is not supported.");
}
return InitializeProgram(uint3(8, 4, 2), R"(
layout(std430) buffer;
precision highp float;
layout(binding = 0) readonly buffer B0 {
float elements[];
} input_data;
layout(binding = 1) writeonly buffer B1 {
vec4 elements[];
} output_data;
uniform ivec4 sizes;
void main() {
ivec3 gid = ivec3(gl_GlobalInvocationID.xyz);
if (gid.x >= sizes.x || gid.y >= sizes.y || gid.z >= sizes.w) {
return;
}
vec4 v = vec4(0);
int dst_channel = gid.z * 4;
int index = (gid.y * sizes.x + gid.x) * sizes.z + dst_channel;
for (int i = 0; i < 4; ++i, ++index, ++dst_channel) {
if (dst_channel >= sizes.z) break;
v[i] = input_data.elements[index];
}
output_data.elements[(gid.z * sizes.y + gid.y) * sizes.x + gid.x] = v;
})");
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto output = std::get_if<OpenGlBuffer>(&output_obj);
if (!output || !output->id) {
return absl::InvalidArgumentError("Missing output in converter");
}
auto input = std::get_if<OpenGlBuffer>(&input_obj);
if (!input || !input->id) {
return absl::InvalidArgumentError("Missing input in converter");
}
if (input->id == output->id) {
return absl::InvalidArgumentError("Can not execute inplace conversion");
}
GlBuffer input_ssbo;
RETURN_IF_ERROR(WrapSSBO(*input, &input_ssbo));
GlBuffer output_ssbo;
RETURN_IF_ERROR(WrapSSBO(*output, &output_ssbo));
if (input_ssbo.bytes_size() != SizeInBytesBHWC(shape_)) {
return absl::InvalidArgumentError(
"ToTensorConverter: input data size does not match expected size.");
}
if (output_ssbo.bytes_size() != SizeInBytesDHWC4(shape_)) {
return absl::InvalidArgumentError(
"ToTensorConverter: output data size does not match expected size.");
}
auto d = DivideRoundUp(shape_.c, 4);
RETURN_IF_ERROR(program_.SetParameter(
{"sizes",
int4(static_cast<int32_t>(shape_.w), static_cast<int32_t>(shape_.h),
static_cast<int32_t>(shape_.c), static_cast<int32_t>(d))}));
RETURN_IF_ERROR(input_ssbo.BindToIndex(0));
RETURN_IF_ERROR(output_ssbo.BindToIndex(1));
return Dispatch(uint3(shape_.w, shape_.h, d));
}
BHWC shape_;
};
class TrivialCopier : public TensorObjectConverter {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return input.object_type == ObjectType::OPENGL_SSBO &&
input.data_type == output.data_type &&
input.object_type == output.object_type &&
input.data_layout == output.data_layout;
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto ssbo_input = std::get_if<OpenGlBuffer>(&input_obj);
auto ssbo_output = std::get_if<OpenGlBuffer>(&output_obj);
if (ssbo_input && ssbo_output) {
return Copy(*ssbo_input, *ssbo_output);
}
return absl::InternalError("Unexpected object");
}
absl::Status Copy(OpenGlBuffer input, OpenGlBuffer output) {
if (input.id == output.id) {
return absl::OkStatus();
}
GlBuffer input_obj;
RETURN_IF_ERROR(WrapSSBO(input, &input_obj));
GlBuffer output_obj;
RETURN_IF_ERROR(WrapSSBO(output, &output_obj));
return CopyBuffer(input_obj, output_obj);
}
};
class CpuCopier : public TensorObjectConverter {
public:
static bool IsSupported(const ObjectDef& input, const ObjectDef& output) {
return input.data_type == output.data_type &&
input.data_layout == output.data_layout &&
((input.object_type == ObjectType::CPU_MEMORY &&
output.object_type == ObjectType::OPENGL_SSBO) ||
(output.object_type == ObjectType::CPU_MEMORY &&
input.object_type == ObjectType::OPENGL_SSBO));
}
absl::Status Convert(const TensorObject& input_obj,
const TensorObject& output_obj) override {
auto cpu_input = std::get_if<CpuMemory>(&input_obj);
auto cpu_output = std::get_if<CpuMemory>(&output_obj);
if (cpu_input) {
auto ssbo_output = std::get_if<OpenGlBuffer>(&output_obj);
if (ssbo_output) {
GlBuffer gl_buffer;
RETURN_IF_ERROR(WrapSSBO(*ssbo_output, &gl_buffer));
return gl_buffer.Write(
absl::MakeConstSpan(static_cast<const uint8_t*>(cpu_input->data),
cpu_input->size_bytes));
}
} else if (cpu_output) {
auto ssbo_input = std::get_if<OpenGlBuffer>(&input_obj);
if (ssbo_input) {
GlBuffer gl_buffer;
RETURN_IF_ERROR(WrapSSBO(*ssbo_input, &gl_buffer));
return gl_buffer.Read(absl::MakeSpan(
static_cast<uint8_t*>(cpu_output->data), cpu_output->size_bytes));
}
}
return absl::InternalError("Unexpected object");
}
};
class TensorConverterBuilderImpl : public TensorObjectConverterBuilder {
public:
explicit TensorConverterBuilderImpl(CommandQueue* command_queue)
: command_queue_(command_queue) {}
bool IsSupported(const TensorObjectDef& input,
const TensorObjectDef& output) const final {
const auto& input_def = input.object_def;
const auto& output_def = output.object_def;
return input.dimensions == output.dimensions &&
(TrivialCopier::IsSupported(input_def, output_def) ||
CpuCopier::IsSupported(input_def, output_def) ||
FromTensorConverter::IsSupported(input_def, output_def) ||
ToTensorConverter::IsSupported(input_def, output_def));
}
absl::Status MakeConverter(
const TensorObjectDef& input, const TensorObjectDef& output,
std::unique_ptr<TensorObjectConverter>* converter) final {
std::unique_ptr<OpenGlConverterImpl> impl;
const auto& input_def = input.object_def;
const auto& output_def = output.object_def;
if (TrivialCopier::IsSupported(input_def, output_def)) {
*converter = std::make_unique<TrivialCopier>();
return absl::OkStatus();
}
if (CpuCopier::IsSupported(input_def, output_def)) {
*converter = std::make_unique<CpuCopier>();
return absl::OkStatus();
}
if (FromTensorConverter::IsSupported(input_def, output_def)) {
impl = std::make_unique<FromTensorConverter>(command_queue_);
} else if (ToTensorConverter::IsSupported(input_def, output_def)) {
impl = std::make_unique<ToTensorConverter>(command_queue_);
} else {
return absl::UnimplementedError("Unsupported conversion");
}
RETURN_IF_ERROR(impl->Init(input, output));
*converter = std::move(impl);
return absl::OkStatus();
}
private:
CommandQueue* command_queue_;
};
}
std::unique_ptr<TensorObjectConverterBuilder> NewConverterBuilder(
CommandQueue* command_queue) {
return std::make_unique<TensorConverterBuilderImpl>(command_queue);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/converter.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
inline std::vector<float> GenerateFloats(float multiplier, int size) {
std::vector<float> v(size);
for (int i = 0; i < size; ++i) {
v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1);
}
return v;
}
Dimensions ToDimensions(const BHWC& shape) {
return Dimensions(shape.b, shape.h, shape.w, shape.c);
}
absl::Status RunFromTensorTest(const BHWC& shape) {
std::vector<float> input =
GenerateFloats(0.01, GetElementsSizeForPHWC4(shape));
std::vector<float> output(shape.DimensionsProduct(), 0);
RETURN_IF_ERROR(
ConvertFromPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
shape.DimensionsProduct(), &output_buffer));
auto builder = NewConverterBuilder(nullptr);
TensorObjectDef input_def;
input_def.object_def.data_type = DataType::FLOAT32;
input_def.object_def.data_layout = DataLayout::DHWC4;
input_def.object_def.object_type = ObjectType::OPENGL_SSBO;
input_def.dimensions = ToDimensions(shape);
TensorObjectDef output_def = input_def;
output_def.object_def.data_layout = DataLayout::BHWC;
std::unique_ptr<TensorObjectConverter> converter;
RETURN_IF_ERROR(builder->MakeConverter(input_def, output_def, &converter));
RETURN_IF_ERROR(converter->Convert(OpenGlBuffer{input_buffer.id()},
OpenGlBuffer{output_buffer.id()}));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(FromTensor, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
auto status = RunFromTensorTest(shape);
EXPECT_TRUE(status.ok()) << status << ", shape = " << shape.h << " "
<< shape.w << " " << shape.c;
}
}
}
}
absl::Status RunToTensorTest(const BHWC& shape) {
std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct());
std::vector<float> output(GetElementsSizeForPHWC4(shape), 0);
RETURN_IF_ERROR(
ConvertToPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
GetElementsSizeForPHWC4(shape), &output_buffer));
auto builder = NewConverterBuilder(nullptr);
TensorObjectDef input_def;
input_def.object_def.data_type = DataType::FLOAT32;
input_def.object_def.data_layout = DataLayout::BHWC;
input_def.object_def.object_type = ObjectType::OPENGL_SSBO;
input_def.dimensions = ToDimensions(shape);
TensorObjectDef output_def = input_def;
output_def.object_def.data_layout = DataLayout::DHWC4;
std::unique_ptr<TensorObjectConverter> converter;
RETURN_IF_ERROR(builder->MakeConverter(input_def, output_def, &converter));
RETURN_IF_ERROR(converter->Convert(OpenGlBuffer{input_buffer.id()},
OpenGlBuffer{output_buffer.id()}));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(ToTensor, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
auto status = RunToTensorTest(shape);
EXPECT_TRUE(status.ok()) << status << ", shape = " << shape.h << " "
<< shape.w << " " << shape.c;
}
}
}
}
}
}
}
} |
995 | cpp | tensorflow/tensorflow | model | tensorflow/lite/delegates/gpu/common/model.cc | tensorflow/lite/core/model_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_MODEL_H_
#define TENSORFLOW_CORE_FRAMEWORK_MODEL_H_
#include <algorithm>
#include <cstdint>
#include <deque>
#include <functional>
#include <limits>
#include <list>
#include <memory>
#include <string>
#include <optional>
#include <thread>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/types/optional.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/histogram/histogram.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
namespace model {
constexpr int64_t kAutotune = -1;
constexpr char kParallelism[] = "parallelism";
constexpr char kBufferSize[] = "buffer_size";
constexpr char kCycleLength[] = "cycle_length";
constexpr char kDeterministic[] = "deterministic";
constexpr char kMaxBufferedElements[] = "max_buffered_elements";
constexpr char kModelInputTimeKey[] = "model_input_time";
constexpr double kRamBudgetShare = 0.5;
constexpr double kProcessingTimeEmaWeight = 0.1;
enum class TraversalOrder {
BFS = 0,
REVERSE_BFS = 1,
};
struct SharedState {
public:
SharedState(int64_t value, std::shared_ptr<mutex> mu,
std::shared_ptr<condition_variable> cond_var)
: value(value),
mu(std::move(mu)),
cond_var(std::move(cond_var)),
tunable(value == kAutotune) {}
double value;
const std::shared_ptr<mutex> mu;
const std::shared_ptr<condition_variable> cond_var;
const bool tunable;
};
struct Parameter {
Parameter(const string& name, std::shared_ptr<SharedState> state, double min,
double max)
: name(name),
value(state == nullptr || state->value == kAutotune ? min
: state->value),
min(min),
max(max),
state(std::move(state)) {}
explicit Parameter(const std::shared_ptr<Parameter> parameter)
: name(parameter->name),
value(parameter->value),
min(parameter->min),
max(parameter->max),
state(parameter->state) {}
const string name;
double value;
const double min;
const double max;
std::shared_ptr<SharedState> state;
};
std::shared_ptr<Parameter> MakeParameter(const string& name,
std::shared_ptr<SharedState> state,
double min, double max);
std::shared_ptr<Parameter> MakeParameter(const string& name,
std::shared_ptr<SharedState> state,
double min, double max, double value);
std::shared_ptr<Parameter> MakeNonTunableParameter(const string& name,
double value);
class RamBudgetManager {
public:
explicit RamBudgetManager(int64_t budget) : budget_(budget) {
if (budget <= 0) {
LOG(WARNING) << "RAM budget is " << budget
<< " which could prevent autotuner from properly adjusting "
"buffer sizes.";
}
}
bool RequestModelAllocation(int64_t total_bytes) {
mutex_lock l(mu_);
if (total_bytes > budget_ - legacy_prefetch_allocated_) {
return false;
}
model_allocated_ = total_bytes;
return true;
}
int64_t RequestModelBytes(int64_t delta_elements, double element_size) {
if (delta_elements == 0) {
return 0;
}
int64_t allocated_delta_elements = delta_elements;
mutex_lock l(mu_);
if (delta_elements > 0) {
int64_t max_delta_elements = static_cast<int64_t>(
(budget_ - legacy_prefetch_allocated_ - model_allocated_) /
element_size);
if (max_delta_elements < 0) {
return 0;
}
allocated_delta_elements = std::min(max_delta_elements, delta_elements);
}
model_allocated_ +=
static_cast<int64_t>(allocated_delta_elements * element_size);
return allocated_delta_elements;
}
bool RequestLegacyPrefetchBytes(int64_t delta_bytes) {
mutex_lock l(mu_);
if (delta_bytes > budget_ - legacy_prefetch_allocated_ - model_allocated_) {
return false;
}
legacy_prefetch_allocated_ += delta_bytes;
return true;
}
int64_t AvailableModelRam() const {
tf_shared_lock l(mu_);
return budget_ - legacy_prefetch_allocated_;
}
void UpdateBudget(int64_t budget) {
mutex_lock l(mu_);
budget_ = budget;
VLOG(2) << "Updated ram budget to " << budget;
}
std::string DebugString() {
mutex_lock l(mu_);
return absl::StrCat("RamBudgetManager: budget_: ", budget_,
" prefetch allocated: ", legacy_prefetch_allocated_,
" model allocated: ", model_allocated_);
}
private:
mutable mutex mu_;
int64_t budget_ TF_GUARDED_BY(mu_) = 0;
int64_t legacy_prefetch_allocated_ TF_GUARDED_BY(mu_) = 0;
int64_t model_allocated_ TF_GUARDED_BY(mu_) = 0;
};
class Node {
public:
struct Args {
int64_t id;
string name;
std::shared_ptr<Node> output;
};
using Factory = std::function<std::shared_ptr<Node>(Args)>;
using NodeVector = std::vector<std::shared_ptr<Node>>;
using NodePairList =
std::list<std::pair<std::shared_ptr<Node>, std::shared_ptr<Node>>>;
using ModelParameters =
std::vector<std::pair<string, std::shared_ptr<Parameter>>>;
using NodeValues = absl::flat_hash_map<string, double>;
using ParameterGradients =
absl::flat_hash_map<std::pair<string, string>, double>;
explicit Node(Args args)
: id_(args.id),
name_(std::move(args.name)),
autotune_(true),
buffered_bytes_(0),
peak_buffered_bytes_(0),
buffered_elements_(0),
buffered_elements_low_(std::numeric_limits<int64_t>::max()),
buffered_elements_high_(std::numeric_limits<int64_t>::min()),
bytes_consumed_(0),
bytes_produced_(0),
num_elements_(0),
processing_time_(0),
record_metrics_(true),
metrics_(name_),
output_(args.output.get()),
output_weak_ptr_(args.output) {}
virtual ~Node() {
std::deque<std::shared_ptr<Node>> queue;
{
mutex_lock l(mu_);
while (!inputs_.empty()) {
queue.push_back(inputs_.front());
inputs_.pop_front();
}
}
while (!queue.empty()) {
auto node = queue.back();
queue.pop_back();
{
mutex_lock l(node->mu_);
while (!node->inputs_.empty()) {
queue.push_back(node->inputs_.front());
node->inputs_.pop_front();
}
}
}
FlushMetrics();
}
void add_input(std::shared_ptr<Node> node) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
inputs_.push_back(node);
}
void add_processing_time(int64_t delta) TF_LOCKS_EXCLUDED(mu_) {
processing_time_ += delta;
}
bool autotune() const TF_LOCKS_EXCLUDED(mu_) { return autotune_; }
int64_t buffered_bytes() const TF_LOCKS_EXCLUDED(mu_) {
return buffered_bytes_;
}
int64_t peak_buffered_bytes() const TF_LOCKS_EXCLUDED(mu_) {
return peak_buffered_bytes_;
}
int64_t buffered_elements() const TF_LOCKS_EXCLUDED(mu_) {
return buffered_elements_;
}
int64_t buffered_elements_low() const TF_LOCKS_EXCLUDED(mu_) {
return buffered_elements_low_;
}
int64_t buffered_elements_high() const TF_LOCKS_EXCLUDED(mu_) {
return buffered_elements_high_;
}
int64_t bytes_consumed() const TF_LOCKS_EXCLUDED(mu_) {
return bytes_consumed_;
}
int64_t bytes_produced() const TF_LOCKS_EXCLUDED(mu_) {
return bytes_produced_;
}
bool has_tunable_parameters() const TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock l(mu_);
for (const auto& pair : parameters_) {
if (pair.second->state->tunable) return true;
}
return false;
}
int64_t id() const TF_LOCKS_EXCLUDED(mu_) { return id_; }
std::list<std::shared_ptr<Node>> inputs() const TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock l(mu_);
return inputs_;
}
string long_name() const { return strings::StrCat(name_, "(id:", id_, ")"); }
const string& name() const { return name_; }
int64_t num_elements() const TF_LOCKS_EXCLUDED(mu_) { return num_elements_; }
Node* output() const { return output_; }
std::shared_ptr<Node> output_shared() { return output_weak_ptr_.lock(); }
double parameter_value(const string& name) const TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock l(mu_);
return parameters_.at(name)->state->value;
}
int64_t processing_time() const TF_LOCKS_EXCLUDED(mu_) {
return processing_time_;
}
void record_bytes_consumed(int64_t num_bytes) {
bytes_consumed_ += num_bytes;
}
void record_bytes_produced(int64_t num_bytes) {
bytes_produced_ += num_bytes;
}
void record_buffer_event(int64_t bytes_delta, int64_t elements_delta) {
buffered_bytes_ += bytes_delta;
peak_buffered_bytes_.store(std::max(peak_buffered_bytes_, buffered_bytes_));
buffered_elements_ += elements_delta;
if (IsAsync()) {
int64_t low_watermark =
std::min(buffered_elements_low_, buffered_elements_);
buffered_elements_low_ = low_watermark;
int64_t high_watermark =
std::max(buffered_elements_high_, buffered_elements_);
buffered_elements_high_ = high_watermark;
}
}
void record_element() TF_LOCKS_EXCLUDED(mu_) {
num_elements_++;
{
mutex_lock l(mu_);
UpdateProcessingTimeEma();
}
}
void record_start(int64_t time_nanos) TF_LOCKS_EXCLUDED(mu_) {
DCHECK_EQ(work_start_, 0);
work_start_ = time_nanos;
}
void record_stop(int64_t time_nanos) TF_LOCKS_EXCLUDED(mu_) {
if (work_start_ != 0) {
processing_time_ += time_nanos - work_start_;
work_start_ = 0;
} else {
VLOG(1) << "Encountered a stop event without a matching start event.";
}
}
bool is_recording() TF_LOCKS_EXCLUDED(mu_) { return work_start_ > 0; }
void remove_input(std::shared_ptr<Node> input) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
inputs_.remove(input);
}
void set_autotune(bool autotune) TF_LOCKS_EXCLUDED(mu_) {
autotune_.store(autotune);
}
void ResetBufferWatermarks() {
if (!IsAsync()) {
return;
}
int64_t current_buffer_size = buffered_elements_;
buffered_elements_low_ = current_buffer_size;
buffered_elements_high_ = current_buffer_size;
}
virtual bool IsAsync() const { return false; }
virtual double Ratio() const { return 1.0; }
virtual double ComputeSelfTime() const;
absl::StatusOr<double> ParameterValue(const std::string& parameter_name) const
TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock l(mu_);
if (parameters_.contains(parameter_name)) {
return parameters_.at(parameter_name)->value;
}
return errors::NotFound("Parameter ", parameter_name,
" was not found in model node ", long_name());
}
static double ComputeWaitTime(double producer_time, double consumer_time,
double buffer_size,
double* producer_time_derivative,
double* consumer_time_derivative,
double* buffer_size_derivative);
ModelParameters CollectTunableParameters() const TF_LOCKS_EXCLUDED(mu_);
ModelParameters CollectNodeTunableParameters() const TF_LOCKS_EXCLUDED(mu_);
string DebugString() const TF_LOCKS_EXCLUDED(mu_);
void FlushMetrics() TF_LOCKS_EXCLUDED(mu_);
double OutputTime(NodeValues* input_times,
ParameterGradients* gradients) const TF_LOCKS_EXCLUDED(mu_);
std::shared_ptr<Node> Snapshot() const TF_LOCKS_EXCLUDED(mu_);
double SelfProcessingTime() const TF_LOCKS_EXCLUDED(mu_);
double TotalBufferedBytes() const TF_LOCKS_EXCLUDED(mu_);
double TotalMaximumBufferedBytes() const TF_LOCKS_EXCLUDED(mu_);
double TotalProcessingTime(NodeValues* processing_times)
TF_LOCKS_EXCLUDED(mu_);
virtual Status ToProto(ModelProto::Node* node_proto) const;
static Status FromProto(ModelProto::Node node_proto,
std::shared_ptr<Node> output,
std::shared_ptr<Node>* node);
NodeVector CollectNodes(TraversalOrder order,
bool collect_node(const std::shared_ptr<Node>)) const
TF_LOCKS_EXCLUDED(mu_);
bool TryDownsizeBuffer();
void CollectBufferParametersToUpsize(
absl::flat_hash_map<Node*, Parameter*>& node_parameters);
double AverageBufferedElementSize() const {
tf_shared_lock l(mu_);
return AverageBufferedElementSizeLocked();
}
void SyncStateValuesToParameterValues(const std::string& parameter_name);
void SetEstimatedElementSize(std::optional<int64_t> estimated_element_size) {
mutex_lock l(mu_);
estimated_element_size_ = estimated_element_size;
}
protected:
class Metrics {
public:
explicit Metrics(const string& name)
: bytes_consumed_counter_(metrics::GetTFDataBytesConsumedCounter(name)),
bytes_produced_counter_(metrics::GetTFDataBytesProducedCounter(name)),
num_elements_counter_(metrics::GetTFDataElementsCounter(name)),
recorded_bytes_consumed_(0),
recorded_bytes_produced_(0),
recorded_num_elements_(0) {}
void record_bytes_consumed(int64_t total_bytes) {
int64_t delta =
total_bytes - recorded_bytes_consumed_.exchange(total_bytes);
bytes_consumed_counter_->IncrementBy(delta);
}
void record_bytes_produced(int64_t total_bytes) {
int64_t delta =
total_bytes - recorded_bytes_produced_.exchange(total_bytes);
bytes_produced_counter_->IncrementBy(delta);
}
void record_num_elements(int64_t total_elements) {
int64_t delta =
total_elements - recorded_num_elements_.exchange(total_elements);
num_elements_counter_->IncrementBy(delta);
}
private:
monitoring::CounterCell* const bytes_consumed_counter_;
monitoring::CounterCell* const bytes_produced_counter_;
monitoring::CounterCell* const num_elements_counter_;
std::atomic<int64_t> recorded_bytes_consumed_;
std::atomic<int64_t> recorded_bytes_produced_;
std::atomic<int64_t> recorded_num_elements_;
};
void UpdateProcessingTimeEma() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (previous_processing_time_ == 0) {
if (num_elements_ > 0) {
processing_time_ema_ =
static_cast<double>(processing_time_) /
static_cast<double>(num_elements_ + buffered_elements_);
} else {
processing_time_ema_ = static_cast<double>(processing_time_);
}
} else {
processing_time_ema_ =
(1.0 - kProcessingTimeEmaWeight) * processing_time_ema_ +
kProcessingTimeEmaWeight *
static_cast<double>(processing_time_ - previous_processing_time_);
}
previous_processing_time_ = processing_time_;
}
int64_t num_inputs() const TF_SHARED_LOCKS_REQUIRED(mu_) {
int64_t num_inputs = 0;
for (auto& input : inputs_) {
if (input->autotune()) {
++num_inputs;
}
}
return num_inputs;
}
virtual std::shared_ptr<Node> Clone(std::shared_ptr<Node> output) const
TF_SHARED_LOCKS_REQUIRED(mu_) = 0;
double AverageBufferedElementSizeLocked() const TF_SHARED_LOCKS_REQUIRED(mu_);
double OutputTimeForInputs(const NodeValues& output_times) const
TF_SHARED_LOCKS_REQUIRED(mu_);
double OutputTimeGradientsForInputs(const NodeValues& output_time_gradients)
const TF_SHARED_LOCKS_REQUIRED(mu_);
virtual void InputTimeLocked(NodeValues* input_times) const
TF_SHARED_LOCKS_REQUIRED(mu_) = 0;
virtual void OutputTimeLocked(const NodeValues& input_times,
ParameterGradients* gradients,
NodeValues* output_times,
NodeValues* output_time_gradients) const
TF_SHARED_LOCKS_REQUIRED(mu_) = 0;
double TotalProcessingTimeForInputs(const NodeValues& total_processing_times)
TF_SHARED_LOCKS_REQUIRED(mu_);
double SelfProcessingTimeLocked() const TF_SHARED_LOCKS_REQUIRED(mu_);
virtual void TotalProcessingTimeLocked(NodeValues* processing_times,
NodeValues* total_processing_times)
TF_SHARED_LOCKS_REQUIRED(mu_) = 0;
NodeVector CollectNodesLocked(TraversalOrder order,
bool collect_node(const std::shared_ptr<Node>))
const TF_SHARED_LOCKS_REQUIRED(mu_);
ModelParameters CollectTunableParametersLocked() const
TF_SHARED_LOCKS_REQUIRED(mu_);
void CollectTunableParametersHelper(ModelParameters* parameters) const
TF_SHARED_LOCKS_REQUIRED(mu_);
void DebugStringHelper(absl::flat_hash_map<string, string>* debug_strings)
const TF_SHARED_LOCKS_REQUIRED(mu_);
std::shared_ptr<Node> SnapshotHelper(std::shared_ptr<Node> cloned_output,
NodePairList* node_pairs) const;
void TotalBufferedBytesHelper(NodeValues* total_bytes) const
TF_SHARED_LOCKS_REQUIRED(mu_);
void TotalMaximumBufferedBytesHelper(NodeValues* total_bytes) const
TF_SHARED_LOCKS_REQUIRED(mu_);
virtual double MaximumBufferedBytes() const TF_SHARED_LOCKS_REQUIRED(mu_);
static Status FromProtoHelper(ModelProto::Node node_proto,
std::shared_ptr<Node> node);
static thread_local int64_t work_start_;
mutable mutex mu_;
const int64_t id_;
const string name | #include "tensorflow/core/framework/model.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/model.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace model {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::AllOf;
using ::testing::HasSubstr;
std::function<int64_t()> CpuBudgetFunc(int64_t budget) {
return [budget]() { return budget; };
}
std::function<int64_t(int64_t)> RamBudgetFunc(int64_t budget) {
return [budget](int64_t) { return budget; };
}
int64_t CountParametersOnNode(const string& node_name,
const Model::ModelParameters& parameters) {
int64_t cnt = 0;
for (const auto& pair : parameters) {
if (pair.first == node_name) {
cnt++;
}
}
return cnt;
}
class AsyncInterleaveManyTest
: public ::testing::TestWithParam<std::tuple<int64_t, double>> {};
TEST_P(AsyncInterleaveManyTest, Model) {
const int64_t parallelism = std::get<0>(GetParam());
const double input_time = std::get<1>(GetParam());
std::shared_ptr<Node> async_interleave_many =
model::MakeAsyncInterleaveManyNode(
{0, "async_interleave_many", nullptr},
{model::MakeParameter("parallelism",
std::make_shared<SharedState>(
parallelism, nullptr, nullptr),
1,
8),
model::MakeParameter(kCycleLength, nullptr,
1,
1)});
std::shared_ptr<Node> meta_source =
model::MakeSourceNode({1, "meta_source", async_interleave_many});
async_interleave_many->add_input(meta_source);
auto cleanup_meta = gtl::MakeCleanup([async_interleave_many, meta_source]() {
async_interleave_many->remove_input(meta_source);
});
std::shared_ptr<Node> source1 =
model::MakeSourceNode({2, "source1", async_interleave_many});
async_interleave_many->add_input(source1);
auto cleanup1 = gtl::MakeCleanup([async_interleave_many, source1]() {
async_interleave_many->remove_input(source1);
});
std::shared_ptr<Node> source2 =
model::MakeSourceNode({3, "source2", async_interleave_many});
async_interleave_many->add_input(source2);
auto cleanup2 = gtl::MakeCleanup([async_interleave_many, source2]() {
async_interleave_many->remove_input(source2);
});
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = input_time;
EXPECT_EQ(async_interleave_many->buffered_bytes(), 0);
EXPECT_EQ(async_interleave_many->peak_buffered_bytes(), 0);
EXPECT_EQ(async_interleave_many->TotalBufferedBytes(), 0);
EXPECT_EQ(async_interleave_many->TotalMaximumBufferedBytes(), 0);
async_interleave_many->record_buffer_event(110, 10);
EXPECT_EQ(async_interleave_many->buffered_bytes(), 110);
EXPECT_EQ(async_interleave_many->peak_buffered_bytes(), 110);
EXPECT_EQ(async_interleave_many->TotalBufferedBytes(), 110);
EXPECT_EQ(async_interleave_many->TotalMaximumBufferedBytes(),
110 * parallelism / 10);
async_interleave_many->add_processing_time(100);
EXPECT_EQ(async_interleave_many->processing_time(), 100);
EXPECT_EQ(
async_interleave_many->TotalProcessingTime(nullptr),
0);
EXPECT_EQ(async_interleave_many->OutputTime(&input_times, nullptr), 0);
async_interleave_many->record_element();
EXPECT_EQ(async_interleave_many->num_elements(), 1);
EXPECT_EQ(
async_interleave_many->TotalProcessingTime(nullptr),
100);
EXPECT_LE(async_interleave_many->OutputTime(&input_times, nullptr), 100);
EXPECT_GE(async_interleave_many->OutputTime(&input_times, nullptr), 0);
source1->add_processing_time(200);
source2->add_processing_time(300);
EXPECT_EQ(
async_interleave_many->TotalProcessingTime(nullptr),
100);
EXPECT_LE(async_interleave_many->OutputTime(&input_times, nullptr), 100);
EXPECT_GE(async_interleave_many->OutputTime(&input_times, nullptr), 0);
source1->record_element();
source2->record_element();
EXPECT_EQ(
async_interleave_many->TotalProcessingTime(nullptr),
100 + 250);
EXPECT_LE(async_interleave_many->OutputTime(&input_times, nullptr),
100 + 250 / parallelism);
EXPECT_GE(async_interleave_many->OutputTime(&input_times, nullptr), 0);
async_interleave_many->record_element();
EXPECT_EQ(
async_interleave_many->TotalProcessingTime(nullptr),
50 + 250);
EXPECT_LE(async_interleave_many->OutputTime(&input_times, nullptr),
50 + 250 / parallelism);
EXPECT_GE(async_interleave_many->OutputTime(&input_times, nullptr), 0);
}
INSTANTIATE_TEST_SUITE_P(Test, AsyncInterleaveManyTest,
::testing::Combine(::testing::Values(1, 2),
::testing::Values(0, 50, 100,
200)));
class AsyncKnownRatioTest
: public ::testing::TestWithParam<std::tuple<int64_t, double, int64_t>> {};
TEST_P(AsyncKnownRatioTest, Model) {
const int64_t parallelism = std::get<0>(GetParam());
const double input_time = std::get<1>(GetParam());
const int64_t num_inputs_per_output = std::get<2>(GetParam());
std::shared_ptr<Node> async_known_many = model::MakeAsyncKnownRatioNode(
{0, "async_known_many", nullptr}, num_inputs_per_output,
{model::MakeParameter("parallelism",
std::make_shared<SharedState>(parallelism,
nullptr, nullptr),
1,
16)});
std::shared_ptr<Node> source1 =
model::MakeSourceNode({1, "source1", async_known_many});
async_known_many->add_input(source1);
std::shared_ptr<Node> source2 =
model::MakeSourceNode({2, "source2", async_known_many});
async_known_many->add_input(source2);
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = input_time;
EXPECT_EQ(async_known_many->buffered_bytes(), 0);
EXPECT_EQ(async_known_many->peak_buffered_bytes(), 0);
EXPECT_EQ(async_known_many->TotalBufferedBytes(), 0);
EXPECT_EQ(async_known_many->TotalMaximumBufferedBytes(), 0);
async_known_many->record_buffer_event(110, 10);
EXPECT_EQ(async_known_many->buffered_bytes(), 110);
EXPECT_EQ(async_known_many->peak_buffered_bytes(), 110);
EXPECT_EQ(async_known_many->TotalBufferedBytes(), 110);
EXPECT_EQ(async_known_many->TotalMaximumBufferedBytes(),
num_inputs_per_output == 0
? 110.0 * parallelism / 10
: 110.0 * parallelism / 10 / num_inputs_per_output);
source1->add_processing_time(100);
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
0);
EXPECT_EQ(async_known_many->OutputTime(&input_times, nullptr), 0);
source2->add_processing_time(200);
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
0);
EXPECT_EQ(async_known_many->OutputTime(&input_times, nullptr), 0);
source1->record_element();
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * 100);
EXPECT_LE(async_known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * 100);
EXPECT_GE(async_known_many->OutputTime(&input_times, nullptr), 0);
source2->record_element();
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (100 + 200));
EXPECT_LE(async_known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (100 + 200));
EXPECT_GE(async_known_many->OutputTime(&input_times, nullptr), 0);
source1->record_element();
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 200));
EXPECT_LE(async_known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 200));
EXPECT_GE(async_known_many->OutputTime(&input_times, nullptr), 0);
source2->record_element();
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100));
EXPECT_LE(async_known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100));
EXPECT_GE(async_known_many->OutputTime(&input_times, nullptr), 0);
async_known_many->add_processing_time(128);
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100));
EXPECT_LE(async_known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100));
EXPECT_GE(async_known_many->OutputTime(&input_times, nullptr), 0);
async_known_many->record_element();
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100) + 128);
EXPECT_LE(async_known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100) + 128 / parallelism);
EXPECT_GE(async_known_many->OutputTime(&input_times, nullptr), 0);
async_known_many->record_element();
EXPECT_EQ(async_known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100) + 64);
EXPECT_LE(async_known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100) + 64 / parallelism);
EXPECT_GE(async_known_many->OutputTime(&input_times, nullptr), 0);
}
INSTANTIATE_TEST_SUITE_P(Test, AsyncKnownRatioTest,
::testing::Combine(::testing::Values(1, 2, 4, 8),
::testing::Values(0, 50, 100, 200),
::testing::Values(0, 1, 2, 4)));
TEST(AsyncKnownRatioTest, LegacyPrefetchAutotuneShouldBeExportedAsTunable) {
static constexpr int IRRELEVANT_MIN = 1;
constexpr int IRRELEVANT_MAX = 16;
constexpr int IRRELEVANT_VALUE = 1;
const Node::Args irrelevant_args = {0, "async_known_many", nullptr};
std::shared_ptr<Node> async_known_many = model::MakeAsyncKnownRatioNode(
irrelevant_args, 100,
{model::MakeParameter(kBufferSize,
std::make_shared<SharedState>(IRRELEVANT_VALUE,
nullptr,
nullptr),
IRRELEVANT_MIN, IRRELEVANT_MAX)},
true);
std::shared_ptr<Node> cloned = async_known_many->Snapshot();
ModelProto::Node node;
ASSERT_EQ(cloned->ToProto(&node), absl::OkStatus());
ASSERT_EQ(node.parameters().size(), 1);
EXPECT_TRUE(node.parameters(0).tunable());
}
TEST(InterleaveManyTest, Model) {
auto parameter =
model::MakeParameter("cycle_length", nullptr, 1, 1);
std::shared_ptr<Node> interleave_many = model::MakeInterleaveManyNode(
{0, "interleave_many", nullptr},
{model::MakeParameter("cycle_length", nullptr, 1, 1)});
std::shared_ptr<Node> meta_source =
model::MakeSourceNode({1, "meta_source", interleave_many});
interleave_many->add_input(meta_source);
std::shared_ptr<Node> source1 =
model::MakeSourceNode({2, "source1", interleave_many});
interleave_many->add_input(source1);
std::shared_ptr<Node> source2 =
model::MakeSourceNode({3, "source2", interleave_many});
interleave_many->add_input(source2);
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = 0.0;
interleave_many->add_processing_time(100);
EXPECT_EQ(interleave_many->processing_time(), 100);
EXPECT_EQ(interleave_many->TotalProcessingTime(nullptr),
0);
EXPECT_EQ(interleave_many->OutputTime(&input_times, nullptr), 0);
interleave_many->record_element();
EXPECT_EQ(interleave_many->num_elements(), 1);
EXPECT_EQ(interleave_many->TotalProcessingTime(nullptr),
100);
EXPECT_EQ(interleave_many->OutputTime(&input_times, nullptr), 100);
source1->add_processing_time(200);
source2->add_processing_time(300);
EXPECT_EQ(interleave_many->TotalProcessingTime(nullptr),
100);
EXPECT_EQ(interleave_many->OutputTime(&input_times, nullptr), 100);
source1->record_element();
source2->record_element();
EXPECT_EQ(interleave_many->TotalProcessingTime(nullptr),
350);
EXPECT_EQ(interleave_many->OutputTime(&input_times, nullptr), 350);
interleave_many->record_element();
EXPECT_EQ(interleave_many->TotalProcessingTime(nullptr),
300);
EXPECT_EQ(interleave_many->OutputTime(&input_times, nullptr), 300);
}
class KnownRatioTest : public ::testing::TestWithParam<int64_t> {};
TEST_P(KnownRatioTest, Model) {
const int64_t num_inputs_per_output = GetParam();
std::shared_ptr<Node> known_many = model::MakeKnownRatioNode(
{0, "known_many", nullptr}, num_inputs_per_output);
std::shared_ptr<Node> source1 =
model::MakeSourceNode({1, "source1", known_many});
known_many->add_input(source1);
std::shared_ptr<Node> source2 =
model::MakeSourceNode({2, "source2", known_many});
known_many->add_input(source2);
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = 0.0;
source1->add_processing_time(100);
EXPECT_EQ(known_many->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr), 0);
source2->add_processing_time(200);
EXPECT_EQ(known_many->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr), 0);
source1->record_element();
EXPECT_EQ(known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * 100);
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * 100);
source2->record_element();
EXPECT_EQ(known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (100 + 200));
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (100 + 200));
source1->record_element();
EXPECT_EQ(known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 200));
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 200));
source2->record_element();
EXPECT_EQ(known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100));
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100));
known_many->add_processing_time(128);
EXPECT_EQ(known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100));
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100));
known_many->record_element();
EXPECT_EQ(known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100) + 128);
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100) + 128);
known_many->record_element();
EXPECT_EQ(known_many->TotalProcessingTime(nullptr),
num_inputs_per_output * (50 + 100) + 64);
EXPECT_EQ(known_many->OutputTime(&input_times, nullptr),
num_inputs_per_output * (50 + 100) + 64);
}
INSTANTIATE_TEST_SUITE_P(Test, KnownRatioTest, ::testing::Values(0, 1, 2, 4));
TEST(SourceTest, Model) {
std::shared_ptr<Node> source = model::MakeSourceNode({0, "source", nullptr});
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = 0.0;
source->add_processing_time(100);
EXPECT_EQ(source->processing_time(), 100);
EXPECT_EQ(source->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(source->OutputTime(&input_times, nullptr), 0);
source->record_element();
EXPECT_EQ(source->num_elements(), 1);
EXPECT_EQ(source->TotalProcessingTime(nullptr), 100);
EXPECT_EQ(source->OutputTime(&input_times, nullptr), 100);
source->record_element();
EXPECT_EQ(source->num_elements(), 2);
EXPECT_EQ(source->TotalProcessingTime(nullptr), 50);
EXPECT_EQ(source->OutputTime(&input_times, nullptr), 50);
}
TEST(UnknownRatioTest, Model) {
std::shared_ptr<Node> unknown_many =
model::MakeUnknownRatioNode({0, "unknown_many", nullptr});
std::shared_ptr<Node> source1 =
model::MakeSourceNode({1, "source1", unknown_many});
unknown_many->add_input(source1);
std::shared_ptr<Node> source2 =
model::MakeSourceNode({2, "source2", unknown_many});
unknown_many->add_input(source2);
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = 0.0;
unknown_many->add_processing_time(100);
EXPECT_EQ(unknown_many->processing_time(), 100);
EXPECT_EQ(unknown_many->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(unknown_many->OutputTime(&input_times, nullptr), 0);
unknown_many->record_element();
EXPECT_EQ(unknown_many->num_elements(), 1);
EXPECT_EQ(unknown_many->TotalProcessingTime(nullptr),
100);
EXPECT_EQ(unknown_many->OutputTime(&input_times, nullptr), 100);
source1->add_processing_time(100);
source2->add_processing_time(200);
EXPECT_EQ(unknown_many->TotalProcessingTime(nullptr),
100);
EXPECT_EQ(unknown_many->OutputTime(&input_times, nullptr), 100);
source1->record_element();
source2->record_element();
EXPECT_EQ(unknown_many->TotalProcessingTime(nullptr),
400);
EXPECT_EQ(unknown_many->OutputTime(&input_times, nullptr), 400);
unknown_many->record_element();
EXPECT_EQ(unknown_many->TotalProcessingTime(nullptr),
200);
EXPECT_EQ(unknown_many->OutputTime(&input_times, nullptr), 200);
}
class AsyncUnknownRatioTest
: public ::testing::TestWithParam<std::tuple<int64_t, double>> {};
TEST_P(AsyncUnknownRatioTest, Model) {
const int64_t parallelism = std::get<0>(GetParam());
const double input_time = std::get<1>(GetParam());
std::shared_ptr<Node> async_unknown_many = model::MakeAsyncUnknownRatioNode(
{0, "async_unknown_many", nullptr},
{model::MakeParameter("parallelism",
std::make_shared<SharedState>(parallelism,
nullptr, nullptr),
1,
16)});
std::shared_ptr<Node> source1 =
model::MakeSourceNode({1, "source1", async_unknown_many});
async_unknown_many->add_input(source1);
std::shared_ptr<Node> source2 =
model::MakeSourceNode({2, "source2", async_unknown_many});
async_unknown_many->add_input(source2);
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = input_time;
EXPECT_EQ(async_unknown_many->buffered_bytes(), 0);
EXPECT_EQ(async_unknown_many->peak_buffered_bytes(), 0);
EXPECT_EQ(async_unknown_many->TotalBufferedBytes(), 0);
EXPECT_EQ(async_unknown_many->TotalMaximumBufferedBytes(), 0);
async_unknown_many->record_buffer_event(110, 10);
EXPECT_EQ(async_unknown_many->buffered_bytes(), 110);
EXPECT_EQ(async_unknown_many->peak_buffered_bytes(), 110);
EXPECT_EQ(async_unknown_many->TotalBufferedBytes(), 110);
EXPECT_EQ(async_unknown_many->TotalMaximumBufferedBytes(),
110.0 * parallelism / 10);
source1->add_processing_time(100);
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(async_unknown_many->OutputTime(&input_times, nullptr), 0);
source2->add_processing_time(200);
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(async_unknown_many->OutputTime(&input_times, nullptr), 0);
source1->record_element();
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(async_unknown_many->OutputTime(&input_times, nullptr), 0);
async_unknown_many->record_element();
double ratio = 1.0;
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * 100);
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr), 100);
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr), 0);
source2->record_element();
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * (100 + 200));
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr),
ratio * (100 + 200));
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr), 0);
source2->record_element();
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * (100 + 100));
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr),
ratio * (100 + 100));
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr), 0);
source1->record_element();
ratio = 2.0;
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * (50 + 100));
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr),
ratio * (50 + 100));
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr), 0);
source2->record_element();
source2->record_element();
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * (50 + 50));
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr),
ratio * (50 + 50));
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr), 0);
async_unknown_many->add_processing_time(128);
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * (50 + 50) + 128);
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr),
ratio * (50 + 50) + 128 / parallelism);
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr),
128 / parallelism);
async_unknown_many->record_element();
ratio = 1.0;
EXPECT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * (50 + 50) + 128 / 2);
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr),
ratio * (50 + 50) + 128 / 2 / parallelism);
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr),
128 / 2 / parallelism);
async_unknown_many->record_element();
ratio = 2.0 / 3.0;
EXPECT_FLOAT_EQ(
async_unknown_many->TotalProcessingTime(nullptr),
ratio * (50 + 50) + 128 / 3.0);
EXPECT_LE(async_unknown_many->OutputTime(&input_times, nullptr),
ratio * (50 + 50) + 128 / 3.0 / parallelism);
EXPECT_GE(async_unknown_many->OutputTime(&input_times, nullptr),
128 / 3.0 / parallelism);
}
INSTANTIATE_TEST_SUITE_P(Test, AsyncUnknownRatioTest,
::testing::Combine(::testing::Values(1, 2, 4, 8),
::testing::Values(0, 50, 100,
200)));
TEST(UnknownTest, Model) {
std::shared_ptr<Node> unknown =
model::MakeUnknownNode({0, "unknown", nullptr});
std::shared_ptr<Node> source1 =
model::MakeSourceNode({1, "source1", unknown});
unknown->add_input(source1);
std::shared_ptr<Node> source2 =
model::MakeSourceNode({2, "source2", unknown});
unknown->add_input(source2);
Model::NodeValues input_times;
input_times[kModelInputTimeKey] = 0.0;
source1->add_processing_time(100);
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 0);
source2->add_processing_time(100);
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 0);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 0);
source1->record_element();
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 100);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 100);
source2->record_element();
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 200);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 200);
source1->record_element();
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 150);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 150);
source2->record_element();
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 100);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 100);
unknown->add_processing_time(100);
EXPECT_EQ(unknown->processing_time(), 100);
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 100);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 100);
unknown->record_element();
EXPECT_EQ(unknown->num_elements(), 1);
EXPECT_EQ(unknown->TotalProcessingTime(nullptr), 100);
EXPECT_EQ(unknown->OutputTime(&input_times, nullptr), 100);
}
TEST(BufferedBytesTest, Node) {
std::shared_ptr<Node> node = model::MakeAsyncInterleaveManyNode(
{-1, "TestNode", nullptr},
{model::MakeParameter(
"parallelism",
std::make_shared<SharedState>(3, nullptr, nullptr),
1, 7),
model::MakeParameter(kCycleLength, nullptr,
1,
1)});
EXPECT_EQ(node->id(), -1);
EXPECT_EQ(node->name(), "TestNode");
EXPECT_EQ(node->output(), nullptr);
EXPECT_EQ(node->buffered_bytes(), 0);
EXPECT_EQ(node->buffered_elements(), 0);
EXPECT_EQ(node->peak_buffered_bytes(), 0);
EXPECT_EQ(node->TotalBufferedBytes(), 0);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 0);
node->record_buffer_event(20, 1);
EXPECT_EQ(node->buffered_bytes(), 20);
EXPECT_EQ(node->peak_buffered_bytes(), 20);
EXPECT_EQ(node->buffered_elements(), 1);
EXPECT_EQ(node->TotalBufferedBytes(), 20);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 60);
node->record_buffer_event(10, 1);
EXPECT_EQ(node->buffered_bytes(), 30);
EXPECT_EQ(node->peak_buffered_bytes(), 30);
EXPECT_EQ(node->buffered_elements(), 2);
EXPECT_EQ(node->TotalBufferedBytes(), 30);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 45);
node->record_buffer_event(18, 1);
EXPECT_EQ(node->buffered_bytes(), 48);
EXPECT_EQ(node->peak_buffered_bytes(), 48);
EXPECT_EQ(node->buffered_elements(), 3);
EXPECT_EQ(node->bytes_produced(), 0);
EXPECT_EQ(node->num_elements(), 0);
EXPECT_EQ(node->TotalBufferedBytes(), 48);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 48);
node->record_buffer_event(-20, -1);
node->record_element();
node->record_bytes_produced(20);
EXPECT_EQ(node->buffered_bytes(), 28);
EXPECT_EQ(node->peak_buffered_bytes(), 48);
EXPECT_EQ(node->buffered_elements(), 2);
EXPECT_EQ(node->bytes_produced(), 20);
EXPECT_EQ(node->num_elements(), 1);
EXPECT_EQ(node->TotalBufferedBytes(), 28);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 51);
node->record_buffer_event(-10, -1);
node->record_element();
node->record_bytes_produced(10);
EXPECT_EQ(node->buffered_bytes(), 18);
EXPECT_EQ(node->peak_buffered_bytes(), 48);
EXPECT_EQ(node->buffered_elements(), 1);
EXPECT_EQ(node->bytes_produced(), 30);
EXPECT_EQ(node->num_elements(), 2);
EXPECT_EQ(node->TotalBufferedBytes(), 18);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 49.5);
EXPECT_EQ(node->processing_time(), 0);
node->record_start(1);
EXPECT_EQ(node->processing_time(), 0);
node->record_stop(41);
EXPECT_EQ(node->processing_time(), 40);
node->add_processing_time(2);
EXPECT_EQ(node->processing_time(), 42);
std::shared_ptr<Node> input = model::MakeAsyncKnownRatioNode(
{0, "TestInput", node}, 2,
{model::MakeParameter("parallelism",
std::make_shared<SharedState>(5, nullptr, nullptr),
0, 6)});
EXPECT_EQ(input->output(), node.get());
EXPECT_EQ(node->inputs().size(), 0);
node->add_input(input);
EXPECT_EQ(node->inputs().size(), 1);
EXPECT_EQ(node->inputs().front(), input);
input->record_buffer_event(28, 1);
EXPECT_EQ(node->bytes_consumed(), 0);
EXPECT_EQ(node->buffered_bytes(), 18);
EXPECT_EQ(node->peak_buffered_bytes(), 48);
EXPECT_EQ(node->TotalBufferedBytes(), 46);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 119.5);
input->record_buffer_event(-28, -1);
input->record_element();
input->record_bytes_produced(28);
node->record_bytes_consumed(28);
EXPECT_EQ(node->bytes_consumed(), 28);
EXPECT_EQ(node->buffered_bytes(), 18);
EXPECT_EQ(node->peak_buffered_bytes(), 48);
EXPECT_EQ(node->TotalBufferedBytes(), 18);
EXPECT_EQ(node->TotalMaximumBufferedBytes(), 119.5);
node->remove_input(input);
EXPECT_EQ(node->inputs().size(), 0);
}
double weighted_processing_time(int64_t num_elements, double processing_time,
double prior) {
if (num_elements < 30) {
double prior_weight = 1.0L / static_cast<double>(2 << num_elements);
return prior_weight * prior + (1.0L - prior_weight) * processing_time;
} else {
return processing_time;
}
}
TEST(TestManyElements, Model) {
std::shared_ptr<Node> interleave_many = model::MakeInterleaveManyNode(
{0, "interleave_many", nullptr},
{model::MakeParameter("cycle_length", nullptr, 1, 1)});
std::shared_ptr<Node> source1 =
model::MakeSourceNode({1, "source1", interleave_many});
interleave_many->add_input(source1);
interleave_many->add_processing_time(100);
interleave_many->record_element();
source1->add_processing_time(200);
for (int i = 0; i < 100; i++) {
source1->record_element();
}
EXPECT_LE(interleave_many->TotalProcessingTime(nullptr),
(weighted_processing_time(100, 2, 0)) + 100);
EXPECT_GE(interleave_many->TotalProcessingTime(nullptr),
0);
}
TEST(CollectAutotuneParametersWithElementsTest, Model) {
std::shared_ptr<Node> unknown =
model::MakeUnknownNode({0, "unknown", nullptr});
std::shared_ptr<Node> async_known_ratio = model::MakeAsyncKnownRatioNode(
{1, "source", unknown}, 2,
{model::MakeParameter("parallelism",
std::make_shared<SharedState>(
model::kAutotune, nullptr, nullptr),
1,
5)});
async_known_ratio->record_element();
unknown->add_input(async_known_ratio);
Model::ModelParameters parameters = unknown->CollectTunableParameters();
EXPECT_EQ(CountParametersOnNod |
996 | cpp | tensorflow/tensorflow | gpu_model | tensorflow/lite/delegates/gpu/common/gpu_model.cc | tensorflow/lite/delegates/gpu/cl/testing/gpu_model_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_GPU_MODEL_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_GPU_MODEL_H_
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_model_generated.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_hints.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
namespace tflite {
namespace gpu {
struct GpuNode {
std::unique_ptr<GPUOperation> gpu_operation;
std::vector<ValueId> inputs;
std::vector<ValueId> outputs;
std::string name;
GpuNode() = default;
GpuNode(GpuNode&& node) = default;
GpuNode& operator=(GpuNode&& node) = default;
GpuNode(const GpuNode&) = delete;
GpuNode& operator=(const GpuNode&) = delete;
};
struct CreateGpuModelInfo {
CalculationsPrecision precision;
TensorStorageType storage_type;
ModelHints hints;
absl::flat_hash_map<ValueId, TensorDescriptor> predefined;
absl::flat_hash_map<ValueId, GpuSpatialTensor*> external_immutable_tensors;
absl::flat_hash_map<ValueId, TensorDescriptor> external_mutable_tensors;
};
struct GpuModel {
std::vector<std::pair<ValueId, ValueId>> input_ids_and_refs;
std::vector<std::pair<ValueId, ValueId>> variable_ids_and_refs;
std::vector<std::pair<ValueId, ValueId>> output_ids_and_refs;
std::vector<GpuNode> nodes;
absl::flat_hash_map<ValueId, TensorDescriptor> tensors;
absl::flat_hash_map<ValueId, TensorDescriptor> const_tensors;
};
absl::Status GraphToGpuModel(const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info, GpuModel* gpu_model);
flatbuffers::Offset<data::GpuModel> Encode(
const GpuModel& gpu_model, flatbuffers::FlatBufferBuilder* builder);
absl::Status Decode(const data::GpuModel* fb_gpu_model, GpuModel* gpu_model);
absl::Status RunGraphTransformsForGpuModel(GraphFloat32* graph);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/gpu_model.h"
#include <algorithm>
#include <any>
#include <map>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/operation_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/special_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h"
#include "tensorflow/lite/delegates/gpu/common/task/serialization_base.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/add_bias.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
namespace tflite {
namespace gpu {
namespace {
bool IsReady(const absl::flat_hash_set<ValueId>& ready_tensors,
const GpuNode& node) {
for (const ValueId in_id : node.inputs) {
if (ready_tensors.find(in_id) == ready_tensors.end()) {
return false;
}
}
return true;
}
absl::Status MergeGpuNodes(const GpuInfo& gpu_info, GpuNode* src,
GpuNode* dst) {
for (int j = 1; j < src->inputs.size(); ++j) {
dst->inputs.push_back(src->inputs[j]);
}
dst->outputs[0] = src->outputs[0];
dst->name += " -> " + src->name;
return dst->gpu_operation->AddOperation(gpu_info, src->gpu_operation.get());
}
flatbuffers::Offset<data::TensorDescWithId> Encode(
const TensorDescriptor& desc, const ValueId& id,
flatbuffers::FlatBufferBuilder* builder) {
auto desc_fb = Encode(desc, builder);
data::TensorDescWithIdBuilder desc_builder(*builder);
desc_builder.add_desc(desc_fb);
desc_builder.add_id(id);
return desc_builder.Finish();
}
flatbuffers::Offset<data::GpuNode> Encode(
const GpuNode& node, flatbuffers::FlatBufferBuilder* builder) {
auto op_fb = Encode(*node.gpu_operation, builder);
std::vector<int32_t> in_ids(node.inputs.size());
for (int i = 0; i < in_ids.size(); ++i) {
in_ids[i] = node.inputs[i];
}
std::vector<int32_t> out_ids(node.outputs.size());
for (int i = 0; i < out_ids.size(); ++i) {
out_ids[i] = node.outputs[i];
}
auto in_ids_fb = builder->CreateVector(in_ids);
auto out_ids_fb = builder->CreateVector(out_ids);
auto name_fb = builder->CreateString(node.name);
data::GpuNodeBuilder node_builder(*builder);
node_builder.add_gpu_op(op_fb);
node_builder.add_input_ids(in_ids_fb);
node_builder.add_output_ids(out_ids_fb);
node_builder.add_name(name_fb);
return node_builder.Finish();
}
absl::Status Decode(const data::GpuNode* fb_node, GpuNode* node) {
GPUOperation op;
RETURN_IF_ERROR(Decode(fb_node->gpu_op(), &op));
node->gpu_operation = std::make_unique<GPUOperation>(std::move(op));
for (auto in_fb : *fb_node->input_ids()) {
node->inputs.push_back(in_fb);
}
for (auto out_fb : *fb_node->output_ids()) {
node->outputs.push_back(out_fb);
}
node->name = std::string(fb_node->name()->c_str(), fb_node->name()->size());
return absl::OkStatus();
}
bool IsAssociativeLinkableOp(const Node& node,
const std::vector<Value*>& inputs,
const std::vector<Value*>& outputs) {
if (inputs.size() == 1) {
return false;
}
const OperationType op_type = OperationTypeFromString(node.operation.type);
if (op_type != OperationType::ADD && op_type != OperationType::MUL) {
return false;
}
const auto dst_shape = outputs[0]->tensor.shape;
for (int i = 0; i < inputs.size(); ++i) {
const auto src_shape = inputs[i]->tensor.shape;
if (dst_shape.b != src_shape.b && src_shape.b == 1) {
return false;
}
if (dst_shape.h != src_shape.h && src_shape.h == 1) {
return false;
}
if (dst_shape.w != src_shape.w && src_shape.w == 1) {
return false;
}
if (dst_shape.c != src_shape.c && src_shape.c == 1) {
return false;
}
}
return true;
}
absl::Status CheckExternalTensorDescription(const GpuInfo& gpu_info,
const TensorDescriptor& tensor_desc,
const BHWC& shape,
DataType data_type) {
if (tensor_desc.GetDataType() != data_type) {
return absl::InvalidArgumentError(
"Global precision and precision of predefined/external tensors must be "
"synchronized.");
}
if (tensor_desc.HasAxis(Axis::DEPTH)) {
return absl::InvalidArgumentError(
"Currently no support of Depth dimension in predefined/external "
"tensors.");
}
if (tensor_desc.HasAxis(Axis::BATCH) && shape.b == 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.HasAxis(Axis::BATCH) && shape.b != 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.CanCreateTensorWithShape(gpu_info, shape).ok()) {
return absl::UnavailableError(
"Current device can not allocate tensor with this shape for "
"predefined/external descriptor.");
}
return absl::OkStatus();
}
class TensorReserver {
public:
TensorReserver() : next_(0) {}
ValueId Add(const TensorDescriptor& dummy) {
reservations_[next_] = dummy;
return next_++;
}
void Add(ValueId id, const TensorDescriptor& dummy) {
reservations_[id] = dummy;
}
ValueId GetNewId() { return next_++; }
void SetNext(ValueId id) { next_ = id; }
TensorDescriptor Get(ValueId id) { return reservations_[id]; }
public:
absl::flat_hash_map<ValueId, TensorDescriptor> reservations_;
ValueId next_;
};
absl::Status ReserveGraphTensors(const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info,
const GraphFloat32& graph,
TensorReserver* tensor_reserver) {
ValueId max_id = 0;
auto tensors = graph.values();
for (auto& t : tensors) {
auto data_type = DeduceDataTypeFromPrecision(create_info.precision);
if (t->tensor.type != DataType::FLOAT32 &&
t->tensor.type != DataType::FLOAT16) {
data_type = t->tensor.type;
}
const auto shape = graph.GetValue(t->id)->tensor.shape;
auto it_predefined = create_info.predefined.find(t->id);
auto it_immutable_external =
create_info.external_immutable_tensors.find(t->id);
auto it_mutable_external = create_info.external_mutable_tensors.find(t->id);
int external_categories_count = 0;
TensorDescriptor tensor_desc;
if (it_predefined != create_info.predefined.end()) {
external_categories_count++;
tensor_desc = it_predefined->second;
}
if (it_immutable_external != create_info.external_immutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_immutable_external->second->GetDescriptor();
}
if (it_mutable_external != create_info.external_mutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_mutable_external->second;
}
if (external_categories_count > 1) {
return absl::InvalidArgumentError(
"Tensors ids from predefined / external_immutable_tensors / "
"external_mutable_tensors should not intersect.");
}
if (external_categories_count == 1) {
if (!(graph.IsGraphInput(t->id) || graph.IsGraphOutput(t->id))) {
return absl::InvalidArgumentError(
"Currently external can be used only for graph inputs/outputs");
}
RETURN_IF_ERROR(CheckExternalTensorDescription(gpu_info, tensor_desc,
shape, data_type));
} else {
TensorStorageType storage_type = create_info.storage_type;
Layout layout = shape.b == 1 ? Layout::HWC : Layout::BHWC;
const bool can_use_single_texture =
storage_type == TensorStorageType::TEXTURE_2D ||
storage_type == TensorStorageType::TEXTURE_3D ||
storage_type == TensorStorageType::TEXTURE_ARRAY;
if (shape.c < 4 && can_use_single_texture &&
TensorDescriptor{data_type, TensorStorageType::SINGLE_TEXTURE_2D,
layout}
.CanCreateTensorWithShape(gpu_info, shape)
.ok()) {
storage_type = TensorStorageType::SINGLE_TEXTURE_2D;
}
tensor_desc = TensorDescriptor{data_type, storage_type, layout};
RETURN_IF_ERROR(
tensor_desc.UpdateToSupportedStorageType(gpu_info, shape));
if (gpu_info.IsApiMetal() &&
storage_type == TensorStorageType::TEXTURE_2D) {
if (!(gpu_info.IsApple() && gpu_info.apple_info.IsFamilyApple1())) {
tensor_desc.SetUseBufferForWriteOnlyTexture2d(true);
}
}
}
tensor_desc.SetBHWCShape(shape);
tensor_reserver->Add(t->id, tensor_desc);
max_id = std::max(max_id, t->id);
}
tensor_reserver->SetNext(max_id + 1);
return absl::OkStatus();
}
absl::Status ConvertOperations(const GpuInfo& gpu_info,
const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
TensorReserver* tensor_reserver,
GpuModel* gpu_model) {
std::map<ValueId, TensorDescriptor> tensor_descriptors;
const auto values = graph.values();
for (auto value : values) {
tensor_descriptors[value->id] = tensor_reserver->Get(value->id);
}
std::set<NodeId> consumed_nodes;
std::vector<Node*> graph_nodes = graph.nodes();
std::map<ValueId, int>
tensor_usages;
for (const auto& input : gpu_model->input_ids_and_refs) {
tensor_usages[input.first] = -1;
}
std::vector<SharedWeightsConvDesc> shared_conv_weights;
std::vector<SharedWeightsConvDesc>* shared_conv_weights_ptr =
create_info.hints.Check(ModelHints::kReuseConvWeights)
? &shared_conv_weights
: nullptr;
for (int i = 0; i < graph_nodes.size(); ++i) {
const Node& node = *graph_nodes[i];
if (consumed_nodes.find(node.id) != consumed_nodes.end()) {
continue;
}
auto op_type = OperationTypeFromString(node.operation.type);
if (op_type == OperationType::CONSTANT) {
auto attr =
std::any_cast<ConstTensorAttributes>(node.operation.attributes);
auto outputs = graph.FindOutputs(node.id);
gpu_model->const_tensors[outputs[0]->id] =
tensor_reserver->Get(outputs[0]->id);
gpu_model->const_tensors[outputs[0]->id].UploadData(attr.tensor);
continue;
}
GPUOperationsSubgraph gpu_subgraph;
if (GPUSubgraphFromGraph(create_info.hints, gpu_info, create_info.precision,
graph, node.id, tensor_descriptors,
&consumed_nodes, &gpu_subgraph)
.ok()) {
} else {
auto inputs = graph.FindInputs(node.id);
auto outputs = graph.FindOutputs(node.id);
if (IsAssociativeLinkableOp(node, inputs, outputs)) {
int latest_written_tensor_index = 0;
int last_usage = tensor_usages[inputs[0]->id];
for (int j = 1; j < inputs.size(); ++j) {
if (tensor_usages[inputs[j]->id] > last_usage) {
last_usage = tensor_usages[inputs[j]->id];
latest_written_tensor_index = j;
}
}
std::swap(inputs[0], inputs[latest_written_tensor_index]);
}
consumed_nodes.insert(node.id);
OperationDef op_def;
op_def.precision = create_info.precision;
for (int j = 0; j < inputs.size(); ++j) {
op_def.src_tensors.push_back(tensor_reserver->Get(inputs[j]->id));
}
for (int j = 0; j < outputs.size(); ++j) {
op_def.dst_tensors.push_back(tensor_reserver->Get(outputs[j]->id));
}
RETURN_IF_ERROR(GPUOperationFromNode(
gpu_info, op_def, create_info.hints, inputs, outputs, node,
shared_conv_weights_ptr, &gpu_subgraph));
}
absl::flat_hash_map<int, ValueId> mapping_to_global_ids;
for (int j = 0; j < gpu_subgraph.new_tensors.size(); ++j) {
const auto& t = gpu_subgraph.new_tensors[j];
if (!t.GetData().empty()) {
auto global_id = tensor_reserver->GetNewId();
gpu_model->const_tensors[global_id] =
std::move(gpu_subgraph.new_tensors[j]);
mapping_to_global_ids[j] = global_id;
} else {
auto global_id = tensor_reserver->Add(t);
mapping_to_global_ids[j] = global_id;
}
}
if (!shared_conv_weights.empty() && !mapping_to_global_ids.empty()) {
shared_conv_weights.back().RemapIds(mapping_to_global_ids);
}
for (auto& gpu_op : gpu_subgraph.operations) {
GpuNode gpu_node;
gpu_node.gpu_operation = std::move(gpu_op.operation);
gpu_node.inputs.resize(gpu_op.input_ids.size());
for (int j = 0; j < gpu_op.input_ids.size(); ++j) {
int id = gpu_op.input_ids[j];
if (id >= 0) {
gpu_node.inputs[j] = id;
} else {
gpu_node.inputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.outputs.resize(gpu_op.output_ids.size());
for (int j = 0; j < gpu_op.output_ids.size(); ++j) {
int id = gpu_op.output_ids[j];
if (id >= 0) {
gpu_node.outputs[j] = id;
tensor_usages[id] = i;
} else {
gpu_node.outputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.name = gpu_op.name;
gpu_model->nodes.push_back(std::move(gpu_node));
}
}
return absl::OkStatus();
}
absl::Status MergeElementwiseNodes(const GpuInfo& gpu_info,
GpuModel* gpu_model) {
auto& nodes = gpu_model->nodes;
for (int elem_root_index = 1; elem_root_index < nodes.size();
++elem_root_index) {
auto& elem_root = nodes[elem_root_index];
if (!(elem_root.inputs.size() == 1 || elem_root.inputs.size() == 2) ||
elem_root.outputs.size() != 1 ||
!elem_root.gpu_operation->IsLinkable()) {
continue;
}
std::map<int, int> prev_nodes;
for (int j = elem_root_index - 1; j >= 0; --j) {
for (int k = 0; k < elem_root.inputs.size(); ++k) {
if (elem_root.inputs[k] == nodes[j].outputs[0]) {
prev_nodes[k] = j;
break;
}
}
}
if (prev_nodes.size() == 1) {
if (elem_root.inputs.size() != 1) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
auto& prev_node = nodes[prev_first_node_index];
if (prev_node.inputs.size() != 1 || prev_node.outputs.size() != 1 ||
!prev_node.gpu_operation->IsLinkable()) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(FuseSimpleElemWithSimpleElem(
gpu_info, std::move(*prev_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (prev_nodes.size() == 2) {
if (elem_root.inputs.size() != 2 ||
elem_root.gpu_operation->GetElementwiseInputsCount() != 2) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
const int prev_second_node_index = prev_nodes[1];
auto& prev_first_node = nodes[prev_first_node_index];
auto& prev_second_node = nodes[prev_second_node_index];
if (prev_first_node.gpu_operation->IsLinkable() &&
!prev_second_node.gpu_operation->IsLinkable() &&
prev_second_node.outputs.size() == 1 &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 ||
first_node_parent_index != prev_second_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsFirstInput(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (!prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (second_node_parent_index == -1 ||
second_node_parent_index != prev_first_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsSecondInput(
gpu_info, std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_second_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_second_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_second_node_index] = std::move(new_node);
elem_root_index = prev_second_node_index;
continue;
}
if (prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 || second_node_parent_index == -1 ||
first_node_parent_index != second_node_parent_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWith2SimpleElem(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + prev_second_node.name +
" -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
int first_prev_node_index =
std::min(prev_first_node_index, prev_second_node_index);
int second_prev_node_index =
std::max(prev_first_node_index, prev_second_node_index);
nodes.erase(nodes.begin() + elem_root_index);
nodes.erase(nodes.begin() + second_prev_node_index);
nodes[first_prev_node_index] = std::move(new_node);
elem_root_index = first_prev_node_index - 1;
continue;
}
}
}
return absl::OkStatus();
}
absl::Status MergeNodes(const GpuInfo& gpu_info, GpuModel* gpu_model) {
absl::flat_hash_set<ValueId> ready_tensors;
absl::flat_hash_set<ValueId> output_tensors;
for (const auto& input : gpu_model->input_ids_and_refs) {
ready_tensors.insert(input.first);
}
for (const auto& output : gpu_model->output_ids_and_refs) {
output_tensors.insert(output.first);
}
auto& nodes = gpu_model->nodes;
for (int i = 0; i < nodes.size(); ++i) {
auto& node = nodes[i];
bool node_has_graph_output = false;
for (const auto& out_id : node.outputs) {
ready_tensors.insert(out_id);
if (output_tensors.find(out_id) != output_tensors.end()) {
node_has_graph_output = true;
}
}
if (node_has_graph_output || node.outputs.size() != 1) {
continue;
}
std::vector<int> next_nodes;
int link_index = 0;
for (int j = i + 1; j < nodes.size(); ++j) {
for (int k = 0; k < nodes[j].inputs.size(); ++k) {
if (nodes[j].inputs[k] == node.outputs[0]) {
next_nodes.push_back(j);
link_index = k;
}
}
}
if (next_nodes.size() != 1 || link_index != 0) {
continue;
}
auto& linkable_node = nodes[next_nodes[0]];
if (!linkable_node.gpu_operation->IsLinkable() ||
linkable_node.outputs.size() != 1 ||
!IsReady(ready_tensors, linkable_node)) {
continue;
}
RETURN_IF_ERROR(MergeGpuNodes(gpu_info, &linkable_node, &node));
nodes.erase(nodes.begin() + next_nodes[0]);
i -= 1;
}
return absl::OkStatus();
}
void CopyExternals(const GraphFloat32& graph, GpuModel* gpu_model) {
const auto inputs = graph.inputs();
for (const auto& value : inputs) {
gpu_model->input_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto variable_inputs = graph.variable_inputs();
for (const auto& value : variable_inputs) {
gpu_model->variable_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto outputs = graph.outputs();
for (const auto& value : outputs) {
gpu_model->output_ids_and | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_model_test_util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, LinkingConvolutionAndCosOp) {
auto status = TestLinkingConvolutionAndCosOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMul) {
auto status = TestLinkingConvolution2InputMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputBroadcastMul2InputMul) {
auto status = TestLinkingConvolution2InputBroadcastMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputBroadcastMul) {
auto status = TestLinkingConvolution2InputMul2InputBroadcastMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMulCos) {
auto status = TestLinkingConvolution2InputMul2InputMulCos(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanh2InputDiff) {
auto status = TestLinkingConvolutionFirstTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionSecondTanh2InputDiff) {
auto status = TestLinkingConvolutionSecondTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanhSecondCos2InputDiff) {
auto status = TestLinkingConvolutionFirstTanhSecondCos2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingComplex0) {
auto status = TestLinkingComplex0(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvElem2InputAddElemsOp) {
auto status = TestLinkingConvElem2InputAddElemsOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingSliceCastOp) {
auto status = TestLinkingSliceCastOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddAddMulOp) {
auto status = TestLinkingAddAddMulOp(&exec_env_,
true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddMulOp) {
auto status =
TestLinkingAddAddMulOp(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} |
997 | cpp | tensorflow/tensorflow | data_type | tensorflow/lite/delegates/gpu/common/data_type.cc | tensorflow/lite/delegates/gpu/common/data_type_test.cc | #ifndef XLA_STREAM_EXECUTOR_DATA_TYPE_H_
#define XLA_STREAM_EXECUTOR_DATA_TYPE_H_
#include <complex>
#include <cstdint>
#include "tsl/platform/ml_dtypes.h"
#include "tsl/protobuf/dnn.pb.h"
namespace Eigen {
struct bfloat16;
struct half;
}
namespace stream_executor {
namespace dnn {
template <typename T>
struct ToDataType;
template <>
struct ToDataType<tsl::float8_e4m3fn> {
static constexpr DataType value = DataType::kF8E4M3FN;
};
template <>
struct ToDataType<tsl::float8_e5m2> {
static constexpr DataType value = DataType::kF8E5M2;
};
template <>
struct ToDataType<tsl::float8_e4m3fnuz> {
static constexpr DataType value = DataType::kF8E4M3FNUZ;
};
template <>
struct ToDataType<tsl::float8_e5m2fnuz> {
static constexpr DataType value = DataType::kF8E5M2FNUZ;
};
template <>
struct ToDataType<float> {
static constexpr DataType value = DataType::kFloat;
};
template <>
struct ToDataType<double> {
static constexpr DataType value = DataType::kDouble;
};
template <>
struct ToDataType<Eigen::half> {
static constexpr DataType value = DataType::kHalf;
};
template <>
struct ToDataType<Eigen::bfloat16> {
static constexpr DataType value = DataType::kBF16;
};
template <>
struct ToDataType<int8_t> {
static constexpr DataType value = DataType::kInt8;
};
template <>
struct ToDataType<int32_t> {
static constexpr DataType value = DataType::kInt32;
};
template <>
struct ToDataType<int64_t> {
static constexpr DataType value = DataType::kInt64;
};
template <>
struct ToDataType<std::complex<float>> {
static constexpr DataType value = DataType::kComplexFloat;
};
template <>
struct ToDataType<std::complex<double>> {
static constexpr DataType value = DataType::kComplexDouble;
};
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <stddef.h>
#include <string>
#include "absl/strings/str_cat.h"
namespace tflite {
namespace gpu {
namespace {
std::string ToGlslType(const std::string& scalar_type,
const std::string& vec_type, int vec_size) {
return vec_size == 1 ? scalar_type : absl::StrCat(vec_type, vec_size);
}
std::string GetGlslPrecisionModifier(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
return "lowp ";
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return "mediump ";
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return "highp ";
case DataType::BOOL:
return "";
default:
return "";
}
}
}
size_t SizeOf(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
case DataType::BOOL:
return 1;
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return 2;
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return 4;
case DataType::FLOAT64:
case DataType::INT64:
case DataType::UINT64:
return 8;
case DataType::UNKNOWN:
return 0;
}
return 0;
}
std::string ToString(DataType data_type) {
switch (data_type) {
case DataType::FLOAT16:
return "float16";
case DataType::FLOAT32:
return "float32";
case DataType::FLOAT64:
return "float64";
case DataType::INT16:
return "int16";
case DataType::INT32:
return "int32";
case DataType::INT64:
return "int64";
case DataType::INT8:
return "int8";
case DataType::UINT16:
return "uint16";
case DataType::UINT32:
return "uint32";
case DataType::UINT64:
return "uint64";
case DataType::UINT8:
return "uint8";
case DataType::BOOL:
return "bool";
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToCLDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToMetalDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
DataType ToMetalTextureType(DataType data_type) {
switch (data_type) {
case DataType::FLOAT32:
case DataType::FLOAT16:
case DataType::INT32:
case DataType::INT16:
case DataType::UINT32:
case DataType::UINT16:
return data_type;
case DataType::INT8:
return DataType::INT16;
case DataType::UINT8:
case DataType::BOOL:
return DataType::UINT16;
default:
return DataType::UNKNOWN;
}
}
std::string ToGlslShaderDataType(DataType data_type, int vec_size,
bool add_precision, bool explicit_fp16) {
const std::string precision_modifier =
add_precision ? GetGlslPrecisionModifier(data_type) : "";
switch (data_type) {
case DataType::FLOAT16:
if (explicit_fp16) {
return ToGlslType("float16_t", "f16vec", vec_size);
} else {
return precision_modifier + ToGlslType("float", "vec", vec_size);
}
case DataType::FLOAT32:
return precision_modifier + ToGlslType("float", "vec", vec_size);
case DataType::FLOAT64:
return precision_modifier + ToGlslType("double", "dvec", vec_size);
case DataType::INT8:
case DataType::INT16:
case DataType::INT32:
case DataType::INT64:
return precision_modifier + ToGlslType("int", "ivec", vec_size);
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
case DataType::UINT64:
return precision_modifier + ToGlslType("uint", "uvec", vec_size);
case DataType::BOOL:
return ToGlslType("bool", "bvec", vec_size);
case DataType::UNKNOWN:
return "unknown";
}
return "unknown";
}
}
} | #include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace {
TEST(DataTypeTest, GlslShaderDataTypes) {
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT16));
EXPECT_EQ("mediump float",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
false));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, false,
true));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
true));
EXPECT_EQ("vec4", ToGlslShaderDataType(DataType::FLOAT16, 4));
EXPECT_EQ("mediump vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
false));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, false,
true));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32));
EXPECT_EQ("highp float",
ToGlslShaderDataType(DataType::FLOAT32, 1, true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32, 1,
false));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2));
EXPECT_EQ("highp vec2",
ToGlslShaderDataType(DataType::FLOAT32, 2, true));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2,
false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT32, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT16, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT8, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, true));
EXPECT_EQ("highp int",
ToGlslShaderDataType(DataType::INT32, 1, true));
EXPECT_EQ("mediump int",
ToGlslShaderDataType(DataType::INT16, 1, true));
EXPECT_EQ("lowp int",
ToGlslShaderDataType(DataType::INT8, 1, true));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT32, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT16, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT8, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, true));
EXPECT_EQ("highp uint",
ToGlslShaderDataType(DataType::UINT32, 1, true));
EXPECT_EQ("mediump uint",
ToGlslShaderDataType(DataType::UINT16, 1, true));
EXPECT_EQ("lowp uint",
ToGlslShaderDataType(DataType::UINT8, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL));
EXPECT_EQ("bvec4", ToGlslShaderDataType(DataType::BOOL, 4));
EXPECT_EQ("bool",
ToGlslShaderDataType(DataType::BOOL, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL, 1,
false));
}
}
}
} |
998 | cpp | tensorflow/tensorflow | convert | tensorflow/lite/delegates/gpu/common/convert.cc | third_party/xla/xla/tests/convert_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_CONVERT_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_CONVERT_H_
#include <stdint.h>
#include <vector>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
absl::Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out);
absl::Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
absl::Span<HalfBits> out);
uint32_t GetElementsSizeForPHWC4(const BHWC& shape);
absl::Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out);
absl::Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in,
const BHWC& shape, absl::Span<float> out);
std::vector<float> ConvertToPHWC4(
const Tensor<BHWC, DataType::FLOAT32>& tensor);
std::vector<float> ConvertToPHWC4(const Tensor<HWC, DataType::FLOAT32>& tensor);
uint32_t GetElementsSizeForPIOHW4(const OHWI& shape);
absl::Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
absl::Span<float> out);
std::vector<float> ConvertToPIOHW4(
const Tensor<OHWI, DataType::FLOAT32>& tensor);
uint32_t GetElementsSizeForPHWO4I4(const OHWI& shape);
std::vector<float> ConvertToPHWO4I4(
const Tensor<OHWI, DataType::FLOAT32>& tensor);
std::vector<float> ConvertToPHWO4I4Transposed(
const Tensor<OHWI, DataType::FLOAT32>& tensor);
uint3 Get3DSizeForPHWO4I4(const OHWI& shape);
uint32_t GetElementsSizeForPHWO4I4(const IHWO& shape);
absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
absl::Span<float> out);
std::vector<float> ConvertToPHWO4I4(
const Tensor<IHWO, DataType::FLOAT32>& tensor);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include <stdint.h>
#include <string.h>
#include <string>
#include <vector>
#include "fp16.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace {
constexpr int kPhwc4ChannelsInPlane = 4;
constexpr int kPhwo4i4ChannelsInPlane = 4;
constexpr int kPiohw4ChannelsInPlane = 4;
absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const OHWI& shape,
absl::Span<float> out, bool reverse_space) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
}
float* output = out.data();
for (int p = 0; p < DivideRoundUp(shape.o, kPhwo4i4ChannelsInPlane); ++p) {
for (int h = 0; h < shape.h; ++h) {
for (int w = 0; w < shape.w; ++w) {
for (int c = 0; c < DivideRoundUp(shape.i, kPhwo4i4ChannelsInPlane);
++c) {
for (int co = 0; co < kPhwo4i4ChannelsInPlane; ++co) {
for (int ci = 0; ci < kPhwo4i4ChannelsInPlane; ++ci) {
float value = 0;
if (c * kPhwo4i4ChannelsInPlane + ci < shape.i &&
p * kPhwo4i4ChannelsInPlane + co < shape.o) {
int tensor_o = p * kPhwo4i4ChannelsInPlane + co;
int tensor_i = c * kPhwo4i4ChannelsInPlane + ci;
const int in_h = reverse_space ? shape.h - 1 - h : h;
const int in_w = reverse_space ? shape.w - 1 - w : w;
value = in[shape.LinearIndex({tensor_o, in_h, in_w, tensor_i})];
}
(*output++) = value;
}
}
}
}
}
}
return absl::OkStatus();
}
}
uint32_t GetElementsSizeForPHWO4I4(const OHWI& shape) {
return AlignByN(shape.i, kPhwo4i4ChannelsInPlane) *
AlignByN(shape.o, kPhwo4i4ChannelsInPlane) * shape.h * shape.w;
}
uint32_t GetElementsSizeForPHWO4I4(const IHWO& shape) {
return AlignByN(shape.i, kPhwo4i4ChannelsInPlane) *
AlignByN(shape.o, kPhwo4i4ChannelsInPlane) * shape.h * shape.w;
}
std::vector<float> ConvertToPHWO4I4(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()),
false)
.IgnoreError();
return transposed;
}
std::vector<float> ConvertToPHWO4I4Transposed(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()),
true)
.IgnoreError();
return transposed;
}
uint3 Get3DSizeForPHWO4I4(const OHWI& shape) {
return uint3(AlignByN(shape.i, 4), shape.h * shape.w,
DivideRoundUp(shape.o, 4));
}
absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
absl::Span<float> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWO4I4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
}
const int dst_depth = DivideRoundUp(shape.o, 4);
const int src_depth = DivideRoundUp(shape.i, 4);
float* output = out.data();
for (int f = 0; f < dst_depth; ++f) {
for (int y = 0; y < shape.h; ++y) {
for (int x = 0; x < shape.w; ++x) {
for (int ch = 0; ch < src_depth; ++ch) {
for (int co = 0; co < 4; ++co) {
for (int ci = 0; ci < 4; ++ci) {
const int src_channel = ch * 4 + ci;
const int dst_channel = f * 4 + co;
float value = 0;
if (src_channel < shape.i && dst_channel < shape.o) {
value = in[shape.LinearIndex({src_channel, y, x, dst_channel})];
}
(*output++) = value;
}
}
}
}
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPHWO4I4(
const Tensor<IHWO, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWO4I4(tensor.shape));
ConvertToPHWO4I4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
uint32_t GetElementsSizeForPIOHW4(const OHWI& shape) {
return AlignByN(shape.o * shape.i, kPiohw4ChannelsInPlane) * shape.h *
shape.w;
}
absl::Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
absl::Span<float> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPIOHW4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPIOHW4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPIOHW4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPIOHW4(shape)));
}
int32_t output_channels = shape.o * shape.i;
int32_t num_planes = DivideRoundUp(output_channels, kPiohw4ChannelsInPlane);
float* output = out.data();
for (int p = 0; p < num_planes; ++p) {
for (int h = 0; h < shape.h; ++h) {
for (int w = 0; w < shape.w; ++w) {
for (int c = 0; c < kPiohw4ChannelsInPlane; ++c) {
int output_c = p * kPiohw4ChannelsInPlane + c;
(*output++) = output_c >= output_channels
? 0
: in[shape.LinearIndex({output_c % shape.o, h, w,
output_c / shape.o})];
}
}
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPIOHW4(
const Tensor<OHWI, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPIOHW4(tensor.shape));
ConvertToPIOHW4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
template <typename T>
absl::Status ValidateConvertToPHWC4(absl::Span<const float> in,
const BHWC& shape, absl::Span<T> out) {
if (in.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWC4: Input data size does not match expected size: ",
in.size(), " != ", shape.DimensionsProduct()));
}
if (out.size() != GetElementsSizeForPHWC4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertToPHWC4: Output data size does not match expected size: ",
out.size(), " != ", GetElementsSizeForPHWC4(shape)));
}
return absl::OkStatus();
}
absl::Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
if (shape.c == 4) {
std::memcpy(out.data(), in.data(),
shape.DimensionsProduct() * sizeof(float));
return absl::OkStatus();
}
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
float* dest =
out.data() + b * num_pixels * num_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_full_planes; p++) {
const float* src =
in.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
std::memcpy(dest, src, kPhwc4ChannelsInPlane * sizeof(float));
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
}
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src =
in.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
float* dest = out.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_pixels; p++) {
std::memcpy(dest, src, remaining_channels * sizeof(float));
std::memset(dest + remaining_channels, 0,
(4 - remaining_channels) * sizeof(float));
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
return absl::OkStatus();
}
absl::Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
absl::Span<HalfBits> out) {
RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
HalfBits* dest =
out.data() + b * num_pixels * num_planes * kPhwc4ChannelsInPlane;
for (int p = 0; p < num_full_planes; p++) {
const float* src =
in.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = fp16_ieee_from_fp32_value(src[2]);
dest[3] = fp16_ieee_from_fp32_value(src[3]);
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
}
}
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src =
in.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
HalfBits* dest = out.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
switch (remaining_channels) {
case 1:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = 0;
dest[2] = 0;
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
case 2:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = 0;
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
case 3:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_from_fp32_value(src[0]);
dest[1] = fp16_ieee_from_fp32_value(src[1]);
dest[2] = fp16_ieee_from_fp32_value(src[2]);
dest[3] = 0;
src += shape.c;
dest += kPhwc4ChannelsInPlane;
}
break;
default:
return absl::UnimplementedError(
"ConvertToPHWC4Half: Unsupported channels per planes count.");
}
}
return absl::OkStatus();
}
std::vector<float> ConvertToPHWC4(
const Tensor<BHWC, DataType::FLOAT32>& tensor) {
std::vector<float> transposed(GetElementsSizeForPHWC4(tensor.shape));
ConvertToPHWC4(tensor.data, tensor.shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
std::vector<float> ConvertToPHWC4(
const Tensor<HWC, DataType::FLOAT32>& tensor) {
const BHWC batched_shape =
BHWC(1, tensor.shape.h, tensor.shape.w, tensor.shape.c);
std::vector<float> transposed(GetElementsSizeForPHWC4(batched_shape));
ConvertToPHWC4(tensor.data, batched_shape,
absl::MakeSpan(transposed.data(), transposed.size()))
.IgnoreError();
return transposed;
}
uint32_t GetElementsSizeForPHWC4(const BHWC& shape) {
return shape.b * shape.h * shape.w * AlignByN(shape.c, kPhwc4ChannelsInPlane);
}
template <typename T>
absl::Status ValidateConvertFromPHWC4(absl::Span<const T> in, const BHWC& shape,
absl::Span<float> out) {
if (in.size() != GetElementsSizeForPHWC4(shape)) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertFromPHWC4: Input data size does not match expected size: ",
in.size(), " != ", GetElementsSizeForPHWC4(shape)));
}
if (out.size() != shape.DimensionsProduct()) {
return absl::InvalidArgumentError(absl::StrCat(
"ConvertFromPHWC4: Output data size does not match expected size: ",
out.size(), " != ", shape.DimensionsProduct()));
}
return absl::OkStatus();
}
absl::Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
if (shape.c == 4) {
std::memcpy(out.data(), in.data(),
shape.DimensionsProduct() * sizeof(float));
return absl::OkStatus();
}
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
const float* src = in.data() + b * padded_size;
for (int p = 0; p < num_full_planes; p++) {
float* dest =
out.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
std::memcpy(dest, src, kPhwc4ChannelsInPlane * sizeof(float));
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
}
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const float* src = in.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
float* dest =
out.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
for (int p = 0; p < num_pixels; p++) {
std::memcpy(dest, src, remaining_channels * sizeof(float));
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
return absl::OkStatus();
}
absl::Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in,
const BHWC& shape, absl::Span<float> out) {
RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
int num_planes = DivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
const int num_pixels = shape.h * shape.w;
const int padded_size = num_pixels * num_planes * kPhwc4ChannelsInPlane;
const int num_full_planes = shape.c / kPhwc4ChannelsInPlane;
for (int b = 0; b < shape.b; b++) {
const HalfBits* src = in.data() + b * padded_size;
for (int p = 0; p < num_full_planes; p++) {
float* dest =
out.data() + shape.LinearIndex({b, 0, 0, p * kPhwc4ChannelsInPlane});
for (int i = 0; i < num_pixels; i++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
dest[2] = fp16_ieee_to_fp32_value(src[2]);
dest[3] = fp16_ieee_to_fp32_value(src[3]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
}
}
const int remaining_channels =
shape.c - num_full_planes * kPhwc4ChannelsInPlane;
if (remaining_channels == 0) {
return absl::OkStatus();
}
for (int b = 0; b < shape.b; b++) {
const HalfBits* src = in.data() + b * padded_size +
num_pixels * num_full_planes * kPhwc4ChannelsInPlane;
float* dest =
out.data() +
shape.LinearIndex({b, 0, 0, num_full_planes * kPhwc4ChannelsInPlane});
switch (remaining_channels) {
case 1:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
case 2:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
case 3:
for (int p = 0; p < num_pixels; p++) {
dest[0] = fp16_ieee_to_fp32_value(src[0]);
dest[1] = fp16_ieee_to_fp32_value(src[1]);
dest[2] = fp16_ieee_to_fp32_value(src[2]);
src += kPhwc4ChannelsInPlane;
dest += shape.c;
}
break;
default:
return absl::UnimplementedError(
"ConvertToPHWC4Half: Unsupported channels per planes count.");
}
}
return absl::OkStatus();
}
}
} | #include <array>
#include <cstdint>
#include <limits>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ConvertTest : public ClientLibraryTestBase {
public:
explicit ConvertTest(se::Platform* platform = nullptr)
: ClientLibraryTestBase(platform) {
mutable_debug_options()->add_xla_disable_hlo_passes("algsimp");
mutable_debug_options()->add_xla_disable_hlo_passes("inline");
mutable_debug_options()->add_xla_disable_hlo_passes(
"simplify-fp-conversions");
mutable_debug_options()->set_xla_allow_excess_precision(false);
}
};
template <typename T>
class ConvertTestT : public ConvertTest {
public:
using ConvertTest::ConvertTest;
};
using FloatingPointTypeList =
::testing::Types<tsl::float8_e5m2, tsl::float8_e4m3fn, tsl::float8_e5m2fnuz,
tsl::float8_e4m3fnuz, Eigen::half, bfloat16, float,
double>;
TYPED_TEST_SUITE(ConvertTestT, FloatingPointTypeList);
TEST_F(ConvertTest, ConvertR1S32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {42, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 0, -64});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {42, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint32_t>(&builder, {42, 0, 64});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1PRED) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.0f, 0.0f, 64.0f});
ConvertElementType(a, PRED);
std::array<bool, 3> expected = {true, false, true};
ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {42, 64});
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {1, 0, 1};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {1, 0, 1};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
ConvertElementType(a, F32);
std::vector<float> expected = {1., 0., 1.};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1S0S32ToR1S0F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {});
ConvertElementType(a, F32);
std::vector<float> expected = {};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.6, 64.4});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1S64ToR1F32) {
XlaBuilder builder(TestName());
std::vector<int64_t> arg{
-9223371216516022272,
-2,
-1,
-0x7FFFFFFF,
-0x80000000,
0,
1,
2,
1073742145,
1073742656,
0x7FFFFFFF,
0x80000000,
826720496944058148,
4296062029846194332,
0x0007FB72E4000000LL,
0x0007FB72E4000001LL,
0x0007FB72E6000000LL,
0x0007FB72E7000000LL,
0x0007FB72E7FFFFFFLL,
0x0007FB72E8000000LL,
0x0007FB72E8000001LL,
0x0007FB72EA000000LL,
0x0007FB72EB000000LL,
0x0007FB72EBFFFFFFLL,
0x0007FB72EC000000LL,
0x7FFFFF0000000000LL,
0x7FFFFF8000000000LL,
0x7FFFFFFFFFFFFF00,
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF),
static_cast<int64_t>(0x0000f234e67e0001LL),
static_cast<int64_t>(0x8000000000000000),
static_cast<int64_t>(0x8000000000000000LL),
static_cast<int64_t>(0x8000000000000001LL),
static_cast<int64_t>(0x8000008000000000LL),
static_cast<int64_t>(0x8000010000000000LL),
};
Literal arg_literal = LiteralUtil::CreateR1<int64_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<float>(arg[i]);
}
ComputeAndCompareR1<float>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U32ToR1F32) {
XlaBuilder builder(TestName());
std::vector<uint32_t> arg{0, 1, 0x1000, 0x7fffffff,
0x80000000, 0x80000001, 0x80000002, 0x80000003,
0x80000080, 0x80000081, 0x80000082, 0xFFFFFFFF};
Literal arg_literal = LiteralUtil::CreateR1<uint32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, F32);
std::vector<float> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<float>(arg[i]);
}
ComputeAndCompareR1<float>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1U32) {
XlaBuilder builder(TestName());
std::vector<float> arg{0.0f, 1.0f, 16777216.0f,
16777218.0f, 2147483647.0f, 4294967040.0f};
Literal arg_literal = LiteralUtil::CreateR1<float>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, U32);
std::vector<uint32_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<uint32_t>(arg[i]);
}
ComputeAndCompareR1<uint32_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<uint32_t> arg{0, 1, 0x1000, 0x7fffffff, 0x80000082, 0xFFFFFFFF};
Literal arg_literal = LiteralUtil::CreateR1<uint32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1S32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<int32_t> arg{0, 1, 0x1000, -1, -0x1000};
Literal arg_literal = LiteralUtil::CreateR1<int32_t>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1S64) {
XlaBuilder builder(TestName());
std::vector<float> arg{0.0f,
0.5f,
0.99f,
1.0f,
1.5f,
1.99f,
2.0f,
2.01f,
2147483648.f,
-0.5f,
-0.99f,
-1.0f,
-1.5f,
-1.99f,
-2.0f,
-2.01f,
9223371487098961920.f,
9223370937343148032.f,
-9223371487098961920.f,
-9223370937343148032.f};
Literal arg_literal = LiteralUtil::CreateR1<float>({arg});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S64);
std::vector<int64_t> expected(arg.size());
for (int64_t i = 0; i < arg.size(); ++i) {
expected[i] = static_cast<int64_t>(arg[i]);
}
ComputeAndCompareR1<int64_t>(&builder, expected, {arg_data.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, F32);
std::vector<float> expected = {32.0, 64.0};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1S32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, S32);
std::vector<int32_t> expected = {32, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1U8ToR1U32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {32, 64});
ConvertElementType(a, U32);
std::vector<uint32_t> expected = {32, 64};
ComputeAndCompareR1<uint32_t>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F64) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {32.0f, 64.0f});
ConvertElementType(a, F64);
std::vector<double> expected = {32.0, 64.0};
ComputeAndCompareR1<double>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertR1F64ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<double>(&builder, {32.0, 64.0});
ConvertElementType(a, F32);
std::vector<float> expected = {32.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertS32Extremes) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int32_t>(&builder, {std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max()});
ConvertElementType(a, F32);
std::vector<float> expected = {
static_cast<float>(std::numeric_limits<int32_t>::min()),
static_cast<float>(std::numeric_limits<int32_t>::max())};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
TEST_F(ConvertTest, ConvertMapToS32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(F32, {}), "in");
ConvertElementType(param, S32);
auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<int32_t> expected = {42, 64};
ComputeAndCompareR1<int32_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertMapToF32) {
XlaBuilder builder(TestName());
auto b = builder.CreateSubBuilder("convert");
auto param = Parameter(b.get(), 0, ShapeUtil::MakeShape(S32, {}), "in");
ConvertElementType(param, F32);
auto a = ConstantR1<int32_t>(&builder, {42, 64});
Map(&builder, {a}, b->BuildAndNoteError(), {0});
std::vector<float> expected = {42.0f, 64.0f};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
}
TEST_F(ConvertTest, ConvertReshape) {
XlaBuilder builder(TestName());
auto input = ConstantR1<int32_t>(&builder, {42});
auto reshape = Reshape(input, {0}, {});
ConvertElementType(reshape, F32);
ComputeAndCompareR0<float>(&builder, 42.0f, {}, ErrorSpec(0.0001));
}
std::vector<float> GetInterestingF16ConversionTestCases() {
float infinity = std::numeric_limits<float>::infinity();
float half_min_positive_normal = absl::bit_cast<float, uint32_t>(0x38800000);
float half_max_subnormal = absl::bit_cast<float, uint32_t>(0x387fc000);
float half_min_positive_subnormal =
absl::bit_cast<float, uint32_t>(0x33800000);
float half_max = 65504.0f;
std::vector<float> test_cases(
{-infinity, -(half_max * 2 + 1), -half_max, -42.0f, -1.0f,
-half_min_positive_subnormal, -half_max_subnormal,
-half_min_positive_normal, -0.0f, 0.0f, half_min_positive_subnormal,
half_max_subnormal, half_min_positive_normal, 1.0f, 42.0f, half_max,
(half_max * 2 + 1), infinity});
return test_cases;
}
XLA_TEST_F(ConvertTest, ConvertR1F16ToR1F32) {
std::vector<float> test_cases = GetInterestingF16ConversionTestCases();
std::vector<half> input;
absl::c_transform(test_cases, std::back_inserter(input),
[](float f) { return Eigen::half(f); });
std::vector<float> expected_output;
absl::c_transform(input, std::back_inserter(expected_output),
[](Eigen::half h) { return static_cast<float>(h); });
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
client_->TransferToServer(LiteralUtil::CreateR1<half>(input)));
XlaBuilder builder(TestName());
ConvertElementType(
Parameter(&builder, 0,
ShapeUtil::MakeShape(F16, {static_cast<int64_t>(input.size())}),
"param"),
F32);
ComputeAndCompareR1<float>(&builder, expected_output, {dot_lhs_handle.get()});
}
XLA_TEST_F(ConvertTest, ConvertR1F32ToR1F16) {
std::vector<float> input = GetInterestingF16ConversionTestCases();
std::vector<half> expected_output;
absl::c_transform(input, std::back_inserter(expected_output),
[](float f) { return Eigen::half(f); });
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> dot_lhs_handle,
client_->TransferToServer(LiteralUtil::CreateR1<float>(input)));
XlaBuilder builder(TestName());
ConvertElementType(
Parameter(&builder, 0,
ShapeUtil::MakeShape(F32, {static_cast<int64_t>(input.size())}),
"param"),
F16);
ComputeAndCompareR1<half>(&builder, expected_output, {dot_lhs_handle.get()});
}
XLA_TEST_F(ConvertTest, ConvertC64ToC64) {
XlaBuilder builder(TestName());
std::vector<complex64> x = {{42.0f, 64.0f}};
ConvertElementType(ConstantR1<complex64>(&builder, x), C64);
ComputeAndCompareR1<complex64>(&builder, x, {}, ErrorSpec(0.0001));
}
XLA_TEST_F(ConvertTest, ConvertS64S64) {
XlaBuilder builder(TestName());
std::vector<int64_t> x = {{-42, 64}};
ConvertElementType(ConstantR1<int64_t>(&builder, x), S64);
ComputeAndCompareR1<int64_t>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64U64) {
XlaBuilder builder(TestName());
std::vector<uint64_t> x = {{42, 64}};
ConvertElementType(ConstantR1<uint64_t>(&builder, x), U64);
ComputeAndCompareR1<uint64_t>(&builder, x, {});
}
XLA_TEST_F(ConvertTest, ConvertU64S64) {
XlaBuilder builder(TestName());
std::vector<uint64_t> unsigned_x = {{42, UINT64_MAX}};
ConvertElementType(ConstantR1<uint64_t>(&builder, unsigned_x), S64);
std::vector<int64_t> signed_x = {{42, -1}};
ComputeAndCompareR1<int64_t>(&builder, signed_x, {});
}
XLA_TEST_F(ConvertTest, ConvertS64U64) {
XlaBuilder builder(TestName());
std::vector<int64_t> signed_x = {{42, -1, INT64_MIN}};
ConvertElementType(ConstantR1<int64_t>(&builder, signed_x), U64);
std::vector<uint64_t> unsigned_x = {{42, UINT64_MAX, IPow<uint64_t>(2, 63)}};
ComputeAndCompareR1<uint64_t>(&builder, unsigned_x, {});
}
TEST_F(ConvertTest, ConvertR1S4ToR1S8) {
XlaBuilder builder(TestName());
auto a = ConstantR1<s4>(&builder, {s4(0), s4(1), s4(2), s4(-8)});
ConvertElementType(a, S8);
std::vector<int8_t> expected = {0, 1, 2, -8};
ComputeAndCompareR1<int8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S4ParameterToR1S8) {
XlaBuilder builder(TestName());
Literal arg_literal =
LiteralUtil::CreateR1<s4>({s4(0), s4(1), s4(2), s4(-8)});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, S8);
std::vector<int8_t> expected = {0, 1, 2, -8};
ComputeAndCompareR1<int8_t>(&builder, expected, {arg_data.get()});
}
TEST_F(ConvertTest, ConvertR1U4ToR1U8) {
XlaBuilder builder(TestName());
auto a = ConstantR1<u4>(&builder, {u4(0), u4(1), u4(2), u4(15)});
ConvertElementType(a, U8);
std::vector<uint8_t> expected = {0, 1, 2, 15};
ComputeAndCompareR1<uint8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U4ParameterToR1U8) {
XlaBuilder builder(TestName());
Literal arg_literal =
LiteralUtil::CreateR1<u4>({u4(0), u4(1), u4(2), u4(15)});
auto arg_param = Parameter(&builder, 0, arg_literal.shape(), "arg_param");
std::unique_ptr<GlobalData> arg_data =
client_->TransferToServer(arg_literal).value();
ConvertElementType(arg_param, U8);
std::vector<uint8_t> expected = {0, 1, 2, 15};
ComputeAndCompareR1<uint8_t>(&builder, expected, {arg_data.get()});
}
TEST_F(ConvertTest, ConvertR1S8ToR1S4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int8_t>(&builder, {0, 1, 2, -8});
ConvertElementType(a, S4);
std::vector<s4> expected = {s4(0), s4(1), s4(2), s4(-8)};
ComputeAndCompareR1<s4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1U8ToR1U4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<uint8_t>(&builder, {0, 1, 2, 15});
ConvertElementType(a, U4);
std::vector<u4> expected = {u4(0), u4(1), u4(2), u4(15)};
ComputeAndCompareR1<u4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S8ToR1S4Roundtrip) {
XlaBuilder builder(TestName());
auto a = ConstantR1<int8_t>(&builder, {0, 8, -8, -9, 127, -128});
auto b = ConvertElementType(a, S4);
ConvertElementType(b, S8);
std::vector<int8_t> expected = {0, -8, -8, 7, -1, 0};
ComputeAndCompareR1<int8_t>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1S4) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {0., 2.5, -2.5});
ConvertElementType(a, S4);
std::vector<s4> expected = {s4(0), s4(2), s4(-2)};
ComputeAndCompareR1<s4>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S4ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<s4>(&builder, {s4(0), s4(1), s4(2), s4(-8)});
ConvertElementType(a, F32);
std::vector<float> expected = {0, 1, 2, -8};
ComputeAndCompareR1<float>(&builder, expected, {});
}
XLA_TEST_F(ConvertTest, ConvertBF16F32) {
XlaBuilder builder(TestName());
std::vector<bfloat16> all_bfloats(1 << 16);
for (int i = 0; i < all_bfloats.size(); ++i) {
all_bfloats[i] =
Eigen::numext::bit_cast<bfloat16>(static_cast<uint16_t>(i));
}
std::vector<uint32_t> expected(all_bfloats.size());
for (int i = 0; i < expected.size(); ++i) {
expected[i] = (1U << 16) * i;
}
xla::XlaOp all_bfloats_bf16 = ConstantR1<bfloat16>(&builder, all_bfloats);
xla::XlaOp all_bfloats_f32 = ConvertElementType(all_bfloats_bf16, F32);
BitcastConvertType(all_bfloats_f32, U32);
TF_ASSERT_OK_AND_ASSIGN(const auto results, ExecuteAndTransfer(&builder, {}));
for (int i = 0; i < expected.size(); ++i) {
const auto result = results.Get<uint32_t>({i});
const auto correct = expected[i];
if (all_bfloats[i] != 0.0f &&
all_bfloats[i] < std::numeric_limits<float>::min()) {
const float same_signed_zero =
Eigen::numext::signbit(all_bfloats[i]) ? -0.0f : 0.0f;
if (result != correct) {
EXPECT_EQ(result, absl::bit_cast<uint32_t>(same_signed_zero));
}
} else if (Eigen::numext::isnan(all_bfloats[i])) {
ASSERT_TRUE(std::isnan(absl::bit_cast<float>(correct)));
EXPECT_TRUE(std::isnan(absl::bit_cast<float>(result)));
} else {
EXPECT_EQ(result, correct);
}
}
}
XLA_TEST_F(ConvertTest, ConvertF16F8e5m2Roundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{nan, nan},
{inf, inf},
{0x1.2p0, 0x1p0},
{0x1.6p0, 0x1.8p0},
{0x1.Cp15, 0x1.Cp15},
{0x1.DFCp15, 0x1.Cp15},
{0x1.Ep15, inf},
{0x1p16, inf},
{0x1p-14, 0x1p-14},
{0x1.8p-15, 0x1.8p-15},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E5M2);
ConvertElementType(f8, F16);
const bool saved =
execution_options_.debug_options().xla_allow_excess_precision();
execution_options_.mutable_debug_options()->set_xla_allow_excess_precision(
false);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {});
execution_options_.mutable_debug_options()->set_xla_allow_excess_precision(
saved);
}
XLA_TEST_F(ConvertTest, ConvertF8e5m2F16RoundtripExhaustive) {
XlaBuilder builder(TestName());
std::vector<tsl::float8_e5m2> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
Eigen::numext::bit_cast<tsl::float8_e5m2>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_f8 = ConstantR1<tsl::float8_e5m2>(&builder, all_f8);
xla::XlaOp all_f8_as_f16 = ConvertElementType(all_f8_as_f8, F16);
ConvertElementType(all_f8_as_f16, F8E5M2);
ComputeAndCompareR1<tsl::float8_e5m2>(&builder, all_f8, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF8e5m2F16RoundtripExhaustive2) {
XlaBuilder builder(this->TestName());
std::vector<Eigen::half> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<Eigen::half>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<Eigen::half>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF8e5m2BF16RoundtripExhaustive3) {
XlaBuilder builder(this->TestName());
std::vector<bfloat16> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<bfloat16>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_bf16_to_f8 = ConstantR1<bfloat16>(&builder, inputs);
ConvertElementType(all_bf16_to_f8, F8E5M2);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3fnRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, -0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Cp8, 0x1.Cp8},
{0x1.Dp8, 0x1.Cp8},
{0x1.D04p8, nan},
{0x1p9, nan},
{0x1p-6, 0x1p-6},
{0x1.Ep-7, 0x1p-6},
{0x1.0p-8, 0x1.0p-8},
{0x1.4p-8, 0x1.0p-8},
{0x1.Cp-8, 0x1.0p-7},
{0x1.5p-7, 0x1.4p-7},
{0x1.3p-7, 0x1.4p-7},
{0x1p-10, 0},
{0x1.004p-10, 0x1p-9},
{0x1.DFCp-7, 0x1.Cp-7},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected_roundtrip});
}
auto f8 =
ConvertElementType(ConstantR1<Eigen::half>(&builder, inputs), F8E4M3FN);
ConvertElementType(f8, F16);
const bool saved =
execution_options_.debug_options().xla_allow_excess_precision();
execution_options_.mutable_debug_options()->set_xla_allow_excess_precision(
false);
ComputeAndCompareR1<Eigen::half>(&builder, expected_roundtrip, {});
execution_options_.mutable_debug_options()->set_xla_allow_excess_precision(
saved);
}
XLA_TEST_F(ConvertTest, ConvertF8e4m3fnF16RoundtripExhaustive) {
XlaBuilder builder(TestName());
std::vector<tsl::float8_e4m3fn> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
Eigen::numext::bit_cast<tsl::float8_e4m3fn>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_f8 = ConstantR1<tsl::float8_e4m3fn>(&builder, all_f8);
xla::XlaOp all_f8_as_f16 = ConvertElementType(all_f8_as_f8, F16);
ConvertElementType(all_f8_as_f16, F8E4M3FN);
ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF8e4m3fnF16RoundtripExhaustive2) {
XlaBuilder builder(TestName());
std::vector<float> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(static_cast<float>(
Eigen::numext::bit_cast<tsl::float8_e4m3fn>(static_cast<uint8_t>(i))));
}
xla::XlaOp all_f8_as_f32 = ConstantR1<float>(&builder, all_f8);
ConvertElementType(all_f8_as_f32, F8E4M3FN);
ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF8e4m3fnF16RoundtripExhaustive3) {
XlaBuilder builder(TestName());
std::vector<tsl::float8_e4m3fn> all_f8;
for (int i = 0; i < 256; i++) {
all_f8.push_back(
Eigen::numext::bit_cast<tsl::float8_e4m3fn>(static_cast<uint8_t>(i)));
}
xla::XlaOp all_f8_as_f8 = ConstantR1<tsl::float8_e4m3fn>(&builder, all_f8);
ConvertElementType(all_f8_as_f8, F32);
ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF8e4m3fnF16RoundtripExhaustive4) {
XlaBuilder builder(this->TestName());
std::vector<Eigen::half> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<Eigen::half>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_f16_to_f8 = ConstantR1<Eigen::half>(&builder, inputs);
ConvertElementType(all_f16_to_f8, F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF8e4m3fnBF16RoundtripExhaustive5) {
XlaBuilder builder(this->TestName());
std::vector<bfloat16> inputs;
for (int i = 0; i < 65536; i++) {
inputs.push_back(
Eigen::numext::bit_cast<bfloat16>(static_cast<uint16_t>(i)));
}
xla::XlaOp all_bf16_to_f8 = ConstantR1<bfloat16>(&builder, inputs);
ConvertElementType(all_bf16_to_f8, F8E4M3FN);
this->ComputeAndCompare(&builder, {}, ErrorSpec(0.));
}
XLA_TEST_F(ConvertTest, ConvertF16F8e4m3b11fnuzRoundtrip) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
float inf = std::numeric_limits<float>::infinity();
struct TestCase {
float input;
float expected_roundtrip;
} test_cases[] = {
{0.0, 0.0},
{-0.0, 0.0},
{1.0, 1.0},
{-1.0, -1.0},
{inf, nan},
{0x1.1p0, 0x1p0},
{0x1.3p0, 0x1.4p0},
{0x1.Ep4, 0x1.Ep4},
{0x1.EFCp4, 0x1.Ep4},
{0x1.Fp4, nan},
{0x1p5, nan},
{0x1p-10, 0x1p-10},
{0x1.Ep-11, 0x1p-10},
{0x1.0p-12, 0x1.0p-12},
{0x1.4p-12, 0x1.0p-12},
{0x1.Cp-12, 0x1.0p-11},
{0x1.5p-11, 0x1.4p-11},
{0x1.3p-11, 0x1.4p-11},
{0x1p-14, 0},
{0x1.004p-14, 0x1p-13},
{0x1.DFCp-11, 0x1.Cp-11},
};
std::vector<Eigen::half> inputs;
std::vector<Eigen::half> expected_roundtrip;
for (auto test_case : test_cases) {
inputs.push_back(Eigen::half{test_case.input});
expected_roundtrip.push_back(Eigen::half{test_case.expected |
999 | cpp | tensorflow/tensorflow | winograd_util | tensorflow/lite/delegates/gpu/common/winograd_util.cc | tensorflow/lite/delegates/gpu/common/winograd_util_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_WINOGRAD_UTIL_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_WINOGRAD_UTIL_H_
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
std::vector<float> AtMatrixForWinograd4x4To6x6();
std::vector<float> BtMatrixForWinograd4x4To6x6();
void RearrangeWeightsToWinograd4x4To6x6Weights(
const Tensor<OHWI, DataType::FLOAT32>& src_weights,
Tensor<OHWI, DataType::FLOAT32>* dst_weights);
bool IsSuitableForWinograd4x4To6x6(const Convolution2DAttributes& attr);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/winograd_util.h"
#include <cmath>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
std::vector<float> GetTransposedMatrixForWinograd(int width, int height) {
const float kDelta = std::sqrt(2.0f) / 2.0f;
std::vector<float> px(width);
px[0] = 0.0f;
const int points_count = (width - 1) / 2;
for (int i = 0; i < points_count; ++i) {
px[i * 2 + 1] = kDelta * (i + 1.0f);
px[i * 2 + 2] = -kDelta * (i + 1.0f);
}
px[width - 1] = 1.0f;
std::vector<float> py(width, 1.0f);
py[width - 1] = 0.0f;
std::vector<float> result(height * width);
for (int y = 0; y < width; ++y) {
for (int x = 0; x < height; ++x) {
result[x * width + y] =
std::pow(px[y], 1.0f * x) * std::pow(py[y], (height - 1.0f) - x);
}
}
return result;
}
std::vector<float> GetInversedMatrixForWinograd(int rank) {
auto matrix = GetTransposedMatrixForWinograd(rank, rank);
std::vector<float> inverted(rank * rank, 0.0f);
for (int i = 0; i < rank; ++i) {
inverted[i * rank + i] = 1.0f;
}
for (int i = 1; i < rank - 1; ++i) {
float inv_t = 1.0f / matrix[i * rank + i];
for (int x = i; x < rank; ++x) {
matrix[i * rank + x] *= inv_t;
}
for (int x = 0; x < rank; ++x) {
inverted[i * rank + x] *= inv_t;
}
for (int y = 0; y < rank; ++y) {
if (y == i) continue;
float t = matrix[y * rank + i];
for (int x = i; x < rank; ++x) {
matrix[y * rank + x] -= t * matrix[i * rank + x];
}
for (int x = 0; x < rank; ++x) {
inverted[y * rank + x] -= t * inverted[i * rank + x];
}
}
}
return inverted;
}
std::vector<float> Multiply(const std::vector<float>& a_mat,
const std::vector<float>& b_mat, int m, int n,
int k) {
std::vector<float> result(m * k);
for (int y = 0; y < m; ++y) {
for (int x = 0; x < k; ++x) {
float sum = 0.0f;
for (int i = 0; i < n; ++i) {
sum += a_mat[y * n + i] * b_mat[i * k + x];
}
result[y * k + x] = sum;
}
}
return result;
}
}
std::vector<float> AtMatrixForWinograd4x4To6x6() {
return GetTransposedMatrixForWinograd(6, 4);
}
std::vector<float> BtMatrixForWinograd4x4To6x6() {
return GetInversedMatrixForWinograd(6);
}
void RearrangeWeightsToWinograd4x4To6x6Weights(
const Tensor<OHWI, DataType::FLOAT32>& src_weights,
Tensor<OHWI, DataType::FLOAT32>* dst_weights) {
OHWI dst_shape;
dst_shape.o = src_weights.shape.o;
dst_shape.h = 6;
dst_shape.w = 6;
dst_shape.i = src_weights.shape.i;
dst_weights->shape = dst_shape;
dst_weights->data.resize(dst_shape.DimensionsProduct());
auto gt_mat = GetTransposedMatrixForWinograd(6, 3);
std::vector<float> g_mat(gt_mat.size());
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 6; ++x) {
g_mat[x * 3 + y] = gt_mat[y * 6 + x];
}
}
for (int d = 0; d < src_weights.shape.o; ++d) {
for (int s = 0; s < src_weights.shape.i; ++s) {
std::vector<float> in_vals(9);
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
const int f_index = src_weights.shape.LinearIndex({d, y, x, s});
in_vals[y * 3 + x] = src_weights.data[f_index];
}
}
auto temp_vals = Multiply(g_mat, in_vals, 6, 3, 3);
auto out_vals = Multiply(temp_vals, gt_mat, 6, 3, 6);
for (int y = 0; y < 6; ++y) {
for (int x = 0; x < 6; ++x) {
const int f_index = dst_shape.LinearIndex({d, y, x, s});
dst_weights->data[f_index] = out_vals[y * 6 + x];
}
}
}
}
}
bool IsSuitableForWinograd4x4To6x6(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 3 && attr.weights.shape.h == 3 &&
attr.dilations == HW(1, 1) && attr.strides == HW(1, 1) &&
attr.groups == 1;
}
}
} | #include "tensorflow/lite/delegates/gpu/common/winograd_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
TEST(Winograd, CorrectAttributesFor4x4To6x6) {
Convolution2DAttributes attr;
attr.padding.prepended = HW(1, 2);
attr.padding.appended = HW(0, 1);
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.weights.shape = OHWI(1, 3, 3, 1);
EXPECT_TRUE(IsSuitableForWinograd4x4To6x6(attr));
}
TEST(Winograd, IncorrectAttributesFor4x4To6x6) {
Convolution2DAttributes attr;
attr.padding.prepended = HW(1, 2);
attr.padding.appended = HW(0, 1);
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.weights.shape = OHWI(1, 2, 3, 1);
EXPECT_FALSE(IsSuitableForWinograd4x4To6x6(attr));
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.