ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
1,500 | cpp | tensorflow/tensorflow | sparse_xent_op | tensorflow/core/kernels/sparse_xent_op.cc | tensorflow/core/kernels/sparse_xent_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SPARSE_XENT_OP_H_
#define TENSORFLOW_CORE_KERNELS_SPARSE_XENT_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace sparse_xent_helpers {
template <typename T>
typename TTypes<const T, 1>::Tensor32Bit To32BitConst(
typename TTypes<T>::Vec in) {
return To32Bit(typename TTypes<T>::ConstVec(in.data(), in.dimensions()));
}
template <typename T>
typename TTypes<const T, 2>::Tensor32Bit To32BitConst(
typename TTypes<T>::Matrix in) {
return To32Bit(typename TTypes<T>::ConstMatrix(in.data(), in.dimensions()));
}
}
namespace generator {
template <typename T, typename Index>
class SparseXentLossGenerator {
public:
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SparseXentLossGenerator(
typename TTypes<const T, 2>::Tensor32Bit logits,
typename TTypes<const T, 1>::Tensor32Bit sum_exp_logits,
typename TTypes<const Index, 1>::Tensor32Bit labels,
const Index max_depth)
: logits_(logits),
sum_exp_logits_(sum_exp_logits),
labels_(labels),
max_depth_(max_depth) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
operator()(const Eigen::array<int, 2>& coords) const {
const int batch = coords[0];
const int depth = coords[1];
const Index label = tensorflow::internal::SubtleMustCopy(labels_(batch));
if (!FastBoundsCheck(label, max_depth_)) {
return Eigen::NumTraits<T>::quiet_NaN();
}
return TF_PREDICT_FALSE(label == depth)
? (Eigen::numext::log(sum_exp_logits_(batch)) - logits_(coords))
: T(0.0);
};
private:
typename TTypes<const T, 2>::Tensor32Bit logits_;
typename TTypes<const T, 1>::Tensor32Bit sum_exp_logits_;
typename TTypes<const Index, 1>::Tensor32Bit labels_;
const Index max_depth_;
};
template <typename T, typename Index>
class SparseXentGradGenerator {
public:
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SparseXentGradGenerator(
typename TTypes<const T, 2>::Tensor32Bit exp_logits,
typename TTypes<const T, 1>::Tensor32Bit sum_exp_logits,
typename TTypes<const Index, 1>::Tensor32Bit labels,
const Index max_depth)
: exp_logits_(exp_logits),
sum_exp_logits_(sum_exp_logits),
labels_(labels),
max_depth_(max_depth) {}
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
operator()(const Eigen::array<int, 2>& coords) const {
const int batch = coords[0];
const int depth = coords[1];
const Index label = tensorflow::internal::SubtleMustCopy(labels_(batch));
if (!FastBoundsCheck(label, max_depth_)) {
return Eigen::NumTraits<T>::quiet_NaN();
}
T subtract = TF_PREDICT_FALSE(depth == label) ? T(1.0) : T(0.0);
return exp_logits_(coords) / sum_exp_logits_(batch) - subtract;
};
private:
typename TTypes<const T, 2>::Tensor32Bit exp_logits_;
typename TTypes<const T, 1>::Tensor32Bit sum_exp_logits_;
typename TTypes<const Index, 1>::Tensor32Bit labels_;
const Index max_depth_;
};
}
namespace functor {
template <typename Device, typename T>
struct RowMaxReduction {
static inline void Compute(OpKernelContext* ctx,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<T>::Vec maximum) {
Eigen::IndexList<Eigen::type2index<1> > along_row;
Device d = ctx->eigen_device<Device>();
To32Bit(maximum).device(d) = To32Bit(logits).maximum(along_row);
}
};
template <typename Device, typename T, typename Index>
struct SparseXentFunctor {
void operator()(OpKernelContext* ctx, typename TTypes<T>::ConstMatrix logits,
typename TTypes<Index>::ConstVec labels,
typename TTypes<T>::Vec scratch, typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop);
};
template <typename Device, typename T, typename Index>
struct SparseXentEigenImpl {
static void Compute(OpKernelContext* ctx,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<Index>::ConstVec labels,
typename TTypes<T>::Vec scratch,
typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop) {
const int kBatchDim = 0;
const int kClassDim = 1;
const int batch_size = logits.dimension(kBatchDim);
const int num_classes = logits.dimension(kClassDim);
Eigen::IndexList<Eigen::type2index<kClassDim> > along_class;
Eigen::IndexList<int, Eigen::type2index<1> > batch_by_one;
batch_by_one.set(0, batch_size);
Eigen::IndexList<int> batch_only;
batch_only.set(0, batch_size);
Eigen::IndexList<Eigen::type2index<1>, int> one_by_class;
one_by_class.set(1, num_classes);
RowMaxReduction<Device, T>::Compute(ctx, logits, scratch);
Device d = ctx->eigen_device<Device>();
To32Bit(backprop).device(d) =
To32Bit(logits) -
To32Bit(scratch).reshape(batch_by_one).broadcast(one_by_class);
To32Bit(scratch).device(d) = To32Bit(backprop).exp().sum(along_class);
generator::SparseXentLossGenerator<T, Index> sparse_xent_loss_gen(
sparse_xent_helpers::To32BitConst<T>(backprop),
sparse_xent_helpers::To32BitConst<T>(scratch), To32Bit(labels),
backprop.dimension(1) );
To32Bit(loss).device(d) =
To32Bit(backprop).generate(sparse_xent_loss_gen).sum(along_class);
To32Bit(backprop).device(d) = To32Bit(backprop).exp();
generator::SparseXentGradGenerator<T, Index> sparse_xent_grad_gen(
sparse_xent_helpers::To32BitConst<T>(backprop),
sparse_xent_helpers::To32BitConst<T>(scratch), To32Bit(labels),
backprop.dimension(1) );
To32Bit(backprop).device(d) =
To32Bit(backprop).generate(sparse_xent_grad_gen);
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/sparse_xent_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Index>
Status CheckInvalidLabelIndex(const Tensor& labels, int64_t max_index) {
if (labels.NumElements() == 0) return absl::OkStatus();
const auto label_values = labels.vec<Index>();
int64_t bad_index;
auto min_max_dim_value = std::minmax_element(
label_values.data(), label_values.data() + label_values.size());
if (*min_max_dim_value.first < 0 || *min_max_dim_value.second >= max_index) {
bad_index = (*min_max_dim_value.first < 0) ? *min_max_dim_value.first
: *min_max_dim_value.second;
return errors::InvalidArgument(
"Received a label value of ", bad_index,
" which is outside the valid range of [0, ", max_index,
"). Label values: ", labels.SummarizeValue(labels.NumElements()));
}
return absl::OkStatus();
}
template <typename Device, typename T, typename Index>
class SparseSoftmaxXentWithLogitsOp : public OpKernel {
public:
explicit SparseSoftmaxXentWithLogitsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& logits = context->input(0);
const Tensor& labels = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(logits.shape()),
errors::InvalidArgument("logits must be 2-D, but got shape ",
logits.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(labels.shape()),
errors::InvalidArgument("labels must be 1-D, but got shape ",
labels.shape().DebugString()));
OP_REQUIRES(context, logits.dim_size(0) == labels.dim_size(0),
errors::InvalidArgument(
"logits and labels must have the same first dimension, "
"got logits shape ",
logits.shape().DebugString(), " and labels shape ",
labels.shape().DebugString()));
OP_REQUIRES(context, logits.dim_size(1) > 0,
errors::InvalidArgument(
"Must have at least one class, but got logits shape ",
logits.shape().DebugString()));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"The GPU implementation of SparseSoftmaxCrossEntropyWithLogits"
" that would have been executed is not deterministic. Note that"
" the Python API uses an alternative, deterministic,"
" GPU-accelerated path when determinsim is enabled."));
}
Tensor scratch;
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
labels.shape(), &scratch));
Tensor* loss_out = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{1}, 0, labels.shape(), &loss_out));
Tensor* back_out = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 1, logits.shape(), &back_out));
if (logits.dim_size(0) > 0) {
if (std::is_same<Device, CPUDevice>::value) {
OP_REQUIRES_OK(
context, CheckInvalidLabelIndex<Index>(labels, logits.dim_size(1)));
}
functor::SparseXentFunctor<Device, T, Index> functor;
functor(context, logits.matrix<T>(), labels.vec<Index>(),
scratch.vec<T>(), loss_out->vec<T>(), back_out->matrix<T>());
}
}
};
namespace functor {
template <typename T, typename Index>
struct SparseXentFunctor<CPUDevice, T, Index> {
void operator()(OpKernelContext* ctx, typename TTypes<T>::ConstMatrix logits,
typename TTypes<Index>::ConstVec labels,
typename TTypes<T>::Vec scratch, typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop) {
SparseXentEigenImpl<CPUDevice, T, Index>::Compute(ctx, logits, labels,
scratch, loss, backprop);
}
};
}
#define REGISTER(Dev, T, Index) \
REGISTER_KERNEL_BUILDER( \
Name("SparseSoftmaxCrossEntropyWithLogits") \
.Device(DEVICE_##Dev) \
.TypeConstraint<T>("T") \
.TypeConstraint<Index>("Tlabels"), \
SparseSoftmaxXentWithLogitsOp<Dev##Device, T, Index>);
REGISTER(CPU, float, int32)
REGISTER(CPU, float, int64_t)
REGISTER(CPU, double, int32)
REGISTER(CPU, double, int64_t)
REGISTER(CPU, Eigen::half, int32)
REGISTER(CPU, Eigen::half, int64_t)
REGISTER(CPU, bfloat16, int32)
REGISTER(CPU, bfloat16, int64_t)
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
REGISTER(GPU, float, int32)
REGISTER(GPU, float, int64_t)
REGISTER(GPU, Eigen::half, int32)
REGISTER(GPU, Eigen::half, int64_t)
REGISTER(GPU, Eigen::bfloat16, int32)
REGISTER(GPU, Eigen::bfloat16, int64_t)
#endif
#undef REGISTER
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/xent_op.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <class T>
static Graph* SparseXent(int batch_size, int num_classes, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor logits(type, TensorShape({batch_size, num_classes}));
logits.flat<T>().setRandom();
Tensor labels(DT_INT64, TensorShape({batch_size}));
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dist(0, num_classes - 1);
auto labels_t = labels.flat<int64_t>();
for (int i = 0; i < batch_size; ++i) {
labels_t(i) = dist(gen);
}
test::graph::Binary(g, "SparseSoftmaxCrossEntropyWithLogits",
test::graph::Constant(g, logits),
test::graph::Constant(g, labels));
return g;
}
#define BM_SparseXentDev(BATCH, CLASS, DEVICE, C_TYPE, TF_TYPE) \
static void BM_SparseXent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, SparseXent<C_TYPE>(BATCH, CLASS, TF_TYPE), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * BATCH * CLASS; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_SparseXent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_SparseXentDev(8, 1000000, gpu, float, DT_FLOAT);
BM_SparseXentDev(16, 10000, gpu, float, DT_FLOAT);
BM_SparseXentDev(16, 30000, gpu, float, DT_FLOAT);
BM_SparseXentDev(16, 100000, gpu, float, DT_FLOAT);
BM_SparseXentDev(32, 10000, gpu, float, DT_FLOAT);
BM_SparseXentDev(32, 30000, gpu, float, DT_FLOAT);
BM_SparseXentDev(32, 100000, gpu, float, DT_FLOAT);
BM_SparseXentDev(64, 10000, gpu, float, DT_FLOAT);
BM_SparseXentDev(64, 30000, gpu, float, DT_FLOAT);
BM_SparseXentDev(64, 100000, gpu, float, DT_FLOAT);
#endif
#define BM_SparseXentDev_CPU(C_TYPE, TF_TYPE) \
BM_SparseXentDev(8, 1000000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(16, 10000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(16, 100000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(32, 10000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(32, 100000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(64, 10000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(64, 100000, cpu, C_TYPE, TF_TYPE);
BM_SparseXentDev_CPU(float, DT_FLOAT);
BM_SparseXentDev_CPU(bfloat16, DT_BFLOAT16);
} |
1,501 | cpp | tensorflow/tensorflow | range_sampler | tensorflow/core/kernels/range_sampler.cc | tensorflow/core/kernels/range_sampler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_RANGE_SAMPLER_H_
#define TENSORFLOW_CORE_KERNELS_RANGE_SAMPLER_H_
#include <vector>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/random/distribution_sampler.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/weighted_picker.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tsl {
class Env;
}
namespace tensorflow {
using Env = tsl::Env;
class RangeSampler {
public:
explicit RangeSampler(int64_t range) : range_(range) { CHECK_GT(range_, 0); }
virtual ~RangeSampler();
virtual int64_t Sample(random::SimplePhilox* rnd) const = 0;
virtual float Probability(int64_t value) const = 0;
void SampleBatch(random::SimplePhilox* rnd, bool unique,
absl::Span<int64_t> batch) const;
void SampleBatchGetExpectedCount(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count) const;
virtual void SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const;
virtual bool NeedsUpdates() const { return false; }
virtual void Update(absl::Span<const int64_t> values) {
LOG(FATAL) << "Update not supported for this sampler type.";
}
int64_t range() { return range_; }
protected:
const int64_t range_;
};
class AllSampler : public RangeSampler {
public:
explicit AllSampler(int64_t range);
~AllSampler() override {}
int64_t Sample(random::SimplePhilox* rnd) const override {
LOG(FATAL) << "Should not be called";
return 0;
}
float Probability(int64_t value) const override {
LOG(FATAL) << "Should not be called";
return 0;
}
void SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const override;
};
class UniformSampler : public RangeSampler {
public:
explicit UniformSampler(int64_t range);
~UniformSampler() override {}
int64_t Sample(random::SimplePhilox* rnd) const override;
float Probability(int64_t value) const override;
private:
const float inv_range_;
};
class LogUniformSampler : public RangeSampler {
public:
explicit LogUniformSampler(int64_t range);
~LogUniformSampler() override {}
int64_t Sample(random::SimplePhilox* rnd) const override;
float Probability(int64_t value) const override;
private:
const double log_range_;
};
class ThreadUnsafeUnigramSampler : public RangeSampler {
public:
explicit ThreadUnsafeUnigramSampler(int64_t range);
~ThreadUnsafeUnigramSampler() override {}
int64_t Sample(random::SimplePhilox* rnd) const override;
float Probability(int64_t value) const override;
bool NeedsUpdates() const override { return true; }
void Update(absl::Span<const int64_t> values) override;
private:
random::WeightedPicker picker_;
};
class UnigramSampler : public RangeSampler {
public:
explicit UnigramSampler(int64_t range);
~UnigramSampler() override {}
int64_t Sample(random::SimplePhilox* rnd) const override;
float Probability(int64_t value) const override;
void SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const override;
bool NeedsUpdates() const override { return true; }
void Update(absl::Span<const int64_t> values) override;
private:
ThreadUnsafeUnigramSampler unsafe_sampler_ TF_GUARDED_BY(mu_);
mutable mutex mu_;
};
class FixedUnigramSampler : public RangeSampler {
public:
FixedUnigramSampler(int64_t range, float distortion, int32_t num_reserved_ids,
int32_t num_shards, int32_t shard);
Status SetDistributionSampler(Env* env, const string& vocab_file);
Status SetDistributionSampler(const std::vector<float>& unigrams);
float Probability(int64_t value) const override;
int64_t Sample(random::SimplePhilox* rnd) const override;
private:
std::unique_ptr<random::DistributionSampler> dist_sampler_;
std::vector<float> weights_;
float total_weight_;
int32 num_shards_;
int32 shard_;
float distortion_;
void FillReservedIds(int32_t num_reserved_ids);
Status LoadFromFile(Env* env, const string& vocab_file, float distortion);
void LoadFromUnigrams(const std::vector<float>& unigrams, float distortion);
};
}
#endif
#include "tensorflow/core/kernels/range_sampler.h"
#include <cmath>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using gtl::ArraySlice;
using gtl::MutableArraySlice;
RangeSampler::~RangeSampler() {}
void RangeSampler::SampleBatch(random::SimplePhilox* rnd, bool unique,
absl::Span<int64_t> batch) const {
SampleBatchGetExpectedCount(rnd, unique, batch, absl::Span<float>(),
absl::Span<const int64_t>(), absl::Span<float>());
}
void RangeSampler::SampleBatchGetExpectedCount(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count) const {
SampleBatchGetExpectedCountAvoid(rnd, unique, batch, batch_expected_count,
extras, extras_expected_count,
absl::Span<const int64_t>());
}
namespace {
static float ExpectedCountHelper(float p, int batch_size, int num_tries) {
if (num_tries == batch_size) {
return p * batch_size;
}
return -std::expm1(num_tries * std::log1p(-p));
}
}
void RangeSampler::SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const {
const int batch_size = batch.size();
int num_tries;
if (unique) {
CHECK_LE(static_cast<int64_t>(batch_size + avoided_values.size()), range_);
std::unordered_set<int64_t> used(batch_size);
used.insert(avoided_values.begin(), avoided_values.end());
int num_picked = 0;
num_tries = 0;
while (num_picked < batch_size) {
num_tries++;
CHECK_LT(num_tries, kint32max);
int64_t value = Sample(rnd);
if (gtl::InsertIfNotPresent(&used, value)) {
batch[num_picked++] = value;
}
}
} else {
CHECK_EQ(avoided_values.size(), size_t{0})
<< "avoided_values only supported with unique=true";
for (int i = 0; i < batch_size; i++) {
batch[i] = Sample(rnd);
}
num_tries = batch_size;
}
if (!batch_expected_count.empty()) {
CHECK_EQ(batch_size, batch_expected_count.size());
for (int i = 0; i < batch_size; i++) {
batch_expected_count[i] =
ExpectedCountHelper(Probability(batch[i]), batch_size, num_tries);
}
}
CHECK_EQ(extras.size(), extras_expected_count.size());
for (size_t i = 0; i < extras.size(); i++) {
extras_expected_count[i] =
ExpectedCountHelper(Probability(extras[i]), batch_size, num_tries);
}
}
AllSampler::AllSampler(int64_t range) : RangeSampler(range) {}
void AllSampler::SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const {
const int batch_size = batch.size();
CHECK_EQ(range_, batch_size);
for (int i = 0; i < batch_size; i++) {
batch[i] = i;
}
if (!batch_expected_count.empty()) {
CHECK_EQ(batch_size, batch_expected_count.size());
for (int i = 0; i < batch_size; i++) {
batch_expected_count[i] = 1;
}
}
CHECK_EQ(size_t{0}, avoided_values.size());
CHECK_EQ(extras.size(), extras_expected_count.size());
for (size_t i = 0; i < extras.size(); i++) {
extras_expected_count[i] = 1;
}
}
UniformSampler::UniformSampler(int64_t range)
: RangeSampler(range), inv_range_(1.0 / range) {}
int64_t UniformSampler::Sample(random::SimplePhilox* rnd) const {
return rnd->Uniform64(range_);
}
float UniformSampler::Probability(int64_t value) const { return inv_range_; }
LogUniformSampler::LogUniformSampler(int64_t range)
: RangeSampler(range), log_range_(log1p(range)) {}
int64_t LogUniformSampler::Sample(random::SimplePhilox* rnd) const {
const int64_t value =
static_cast<int64_t>(exp(rnd->RandDouble() * log_range_)) - 1;
DCHECK_GE(value, 0);
return value % range_;
}
float LogUniformSampler::Probability(int64_t value) const {
return (log((value + 2.0) / (value + 1.0))) / log_range_;
}
ThreadUnsafeUnigramSampler::ThreadUnsafeUnigramSampler(int64_t range)
: RangeSampler(range), picker_(range) {
CHECK_LT(range, kint32max);
}
int64_t ThreadUnsafeUnigramSampler::Sample(random::SimplePhilox* rnd) const {
return picker_.Pick(rnd);
}
float ThreadUnsafeUnigramSampler::Probability(int64_t value) const {
return static_cast<float>(picker_.get_weight(value)) / picker_.total_weight();
}
void ThreadUnsafeUnigramSampler::Update(absl::Span<const int64_t> values) {
int num_updates = std::min(static_cast<int>(values.size()),
kint32max - picker_.total_weight());
for (int i = 0; i < num_updates; i++) {
const int64_t value = values[i];
picker_.set_weight(value, picker_.get_weight(value) + 1);
}
}
UnigramSampler::UnigramSampler(int64_t range)
: RangeSampler(range), unsafe_sampler_(range) {
CHECK_LT(range, kint32max);
}
int64_t UnigramSampler::Sample(random::SimplePhilox* rnd) const {
tf_shared_lock lock(mu_);
return unsafe_sampler_.Sample(rnd);
}
float UnigramSampler::Probability(int64_t value) const {
tf_shared_lock lock(mu_);
return unsafe_sampler_.Probability(value);
}
void UnigramSampler::SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const {
tf_shared_lock lock(mu_);
unsafe_sampler_.SampleBatchGetExpectedCountAvoid(
rnd, unique, batch, batch_expected_count, extras, extras_expected_count,
avoided_values);
}
void UnigramSampler::Update(absl::Span<const int64_t> values) {
mutex_lock lock(mu_);
unsafe_sampler_.Update(values);
}
FixedUnigramSampler::FixedUnigramSampler(int64_t range, float distortion,
int32_t num_reserved_ids,
int32_t num_shards, int32_t shard)
: RangeSampler(range),
total_weight_(0.0),
num_shards_(num_shards),
shard_(shard),
distortion_(distortion) {
FillReservedIds(num_reserved_ids);
}
Status FixedUnigramSampler::SetDistributionSampler(Env* env,
const string& vocab_file) {
TF_RETURN_IF_ERROR(LoadFromFile(env, vocab_file, distortion_));
if (!TF_PREDICT_TRUE(FixedUnigramSampler::range() == weights_.size()))
return (errors::InvalidArgument("range is ", FixedUnigramSampler::range(),
" must be equal to weights size ",
weights_.size()));
dist_sampler_.reset(new random::DistributionSampler(weights_));
return absl::OkStatus();
}
Status FixedUnigramSampler::SetDistributionSampler(
const std::vector<float>& unigrams) {
LoadFromUnigrams(unigrams, distortion_);
if (!TF_PREDICT_TRUE(FixedUnigramSampler::range() == weights_.size()))
return (errors::InvalidArgument("range is ", FixedUnigramSampler::range(),
" must be equal to weights size ",
weights_.size()));
dist_sampler_.reset(new random::DistributionSampler(weights_));
return absl::OkStatus();
}
float FixedUnigramSampler::Probability(int64_t value) const {
if (value < 0 || static_cast<size_t>(value) >= weights_.size()) {
return 0.0;
}
return weights_.at(value) / total_weight_;
}
int64_t FixedUnigramSampler::Sample(random::SimplePhilox* rnd) const {
return dist_sampler_->Sample(rnd);
}
void FixedUnigramSampler::FillReservedIds(int32_t num_reserved_ids) {
for (int32_t word_id = 0; word_id < num_reserved_ids; ++word_id) {
if (word_id % num_shards_ == shard_) weights_.push_back(0.0);
}
}
Status FixedUnigramSampler::LoadFromFile(Env* env, const string& vocab_file,
float distortion) {
std::unique_ptr<RandomAccessFile> file;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(vocab_file, &file));
io::InputBuffer in(file.get(), 262144 );
string line;
int32_t word_id = weights_.size();
while (in.ReadLine(&line).ok()) {
std::vector<string> cols = str_util::Split(line, ',');
if (cols.empty()) continue;
if (word_id % num_shards_ == shard_) {
float w = 0.0;
if (!strings::safe_strtof(cols.at(cols.size() - 1), &w)) {
return errors::InvalidArgument("Wrong vocabulary format at line: ",
line);
}
w = std::pow(w, distortion);
total_weight_ += w;
weights_.push_back(w);
}
++word_id;
}
return absl::OkStatus();
}
void FixedUnigramSampler::LoadFromUnigrams(const std::vector<float>& unigrams,
float distortion) {
int32_t word_id = weights_.size();
for (float w : unigrams) {
if (word_id % num_shards_ == shard_) {
w = std::pow(w, distortion);
total_weight_ += w;
weights_.push_back(w);
}
++word_id;
}
}
} | #include "tensorflow/core/kernels/range_sampler.h"
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using gtl::ArraySlice;
using gtl::MutableArraySlice;
class RangeSamplerTest : public ::testing::Test {
protected:
void CheckProbabilitiesSumToOne() {
double sum = 0;
for (int i = 0; i < sampler_->range(); i++) {
sum += sampler_->Probability(i);
}
EXPECT_NEAR(sum, 1.0, 1e-4);
}
void CheckHistogram(int num_samples, float tolerance) {
const int range = sampler_->range();
std::vector<int> h(range);
std::vector<int64_t> a(num_samples);
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rnd(&philox);
sampler_->SampleBatch(&rnd, false, absl::MakeSpan(a));
for (int i = 0; i < num_samples; i++) {
int64_t val = a[i];
ASSERT_GE(val, 0);
ASSERT_LT(val, range);
h[val]++;
}
for (int val = 0; val < range; val++) {
EXPECT_NEAR((h[val] + 0.0) / num_samples, sampler_->Probability(val),
tolerance);
}
}
void Update1() {
std::vector<int64_t> a(10);
for (int i = 0; i < 10; i++) {
a[i] = 3;
}
sampler_->Update(a);
}
void Update2() {
int64_t a[10];
for (int i = 0; i < 10; i++) {
a[i] = i;
}
for (int64_t i = 1; i < 10; i++) {
sampler_->Update(absl::Span<const int64_t>(a + i, 10 - i));
}
}
std::unique_ptr<RangeSampler> sampler_;
};
TEST_F(RangeSamplerTest, UniformProbabilities) {
sampler_.reset(new UniformSampler(10));
for (int i = 0; i < 10; i++) {
CHECK_EQ(sampler_->Probability(i), sampler_->Probability(0));
}
}
TEST_F(RangeSamplerTest, UniformChecksum) {
sampler_.reset(new UniformSampler(10));
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, UniformHistogram) {
sampler_.reset(new UniformSampler(10));
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, LogUniformProbabilities) {
int range = 1000000;
sampler_.reset(new LogUniformSampler(range));
for (int i = 100; i < range; i *= 2) {
float ratio = sampler_->Probability(i) / sampler_->Probability(i / 2);
EXPECT_NEAR(ratio, 0.5, 0.1);
}
}
TEST_F(RangeSamplerTest, LogUniformChecksum) {
sampler_.reset(new LogUniformSampler(10));
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, LogUniformHistogram) {
sampler_.reset(new LogUniformSampler(10));
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, UnigramProbabilities1) {
sampler_.reset(new UnigramSampler(10));
Update1();
EXPECT_NEAR(sampler_->Probability(3), 0.55, 1e-4);
for (int i = 0; i < 10; i++) {
if (i != 3) {
ASSERT_NEAR(sampler_->Probability(i), 0.05, 1e-4);
}
}
}
TEST_F(RangeSamplerTest, UnigramProbabilities2) {
sampler_.reset(new UnigramSampler(10));
Update2();
for (int i = 0; i < 10; i++) {
ASSERT_NEAR(sampler_->Probability(i), (i + 1) / 55.0, 1e-4);
}
}
TEST_F(RangeSamplerTest, UnigramChecksum) {
sampler_.reset(new UnigramSampler(10));
Update1();
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, UnigramHistogram) {
sampler_.reset(new UnigramSampler(10));
Update1();
CheckHistogram(1000, 0.05);
}
static const char kVocabContent[] =
"w1,1\n"
"w2,2\n"
"w3,4\n"
"w4,8\n"
"w5,16\n"
"w6,32\n"
"w7,64\n"
"w8,128\n"
"w9,256";
TEST_F(RangeSamplerTest, FixedUnigramProbabilities) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
for (int i = 0; i < 9; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, i * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramNoExistingFilename) {
Env* env = Env::Default();
string fname = "NoExistingFile";
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
Status s = test_sampler->SetDistributionSampler(env, fname);
sampler_.reset(test_sampler);
EXPECT_TRUE(absl::IsNotFound(s)) << s;
}
TEST_F(RangeSamplerTest, FixedUnigramNoMatchingRangeWeights) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(8, 0.8, 0, 1, 0);
Status s = test_sampler->SetDistributionSampler(env, fname);
sampler_.reset(test_sampler);
EXPECT_TRUE(absl::IsInvalidArgument(s)) << s;
}
TEST_F(RangeSamplerTest, FixedUnigramChecksum) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, FixedUnigramHistogram) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve1) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(10, 0.8, 1, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
for (int i = 1; i < 10; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 1) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve2) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(11, 0.8, 2, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
ASSERT_NEAR(sampler_->Probability(1), 0, 1e-4);
for (int i = 2; i < 11; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 2) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesFromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
for (int i = 0; i < 9; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, i * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramChecksumFromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, FixedUnigramHistogramFromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve1FromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(10, 0.8, 1, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
for (int i = 1; i < 10; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 1) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve2FromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(11, 0.8, 2, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
ASSERT_NEAR(sampler_->Probability(1), 0, 1e-4);
for (int i = 2; i < 11; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 2) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, All) {
int batch_size = 10;
sampler_.reset(new AllSampler(10));
std::vector<int64_t> batch(batch_size);
std::vector<float> batch_expected(batch_size);
std::vector<int64_t> extras(2);
std::vector<float> extras_expected(2);
extras[0] = 0;
extras[1] = batch_size - 1;
sampler_->SampleBatchGetExpectedCount(nullptr,
false, absl::MakeSpan(batch),
absl::MakeSpan(batch_expected), extras,
absl::MakeSpan(extras_expected));
for (int i = 0; i < batch_size; i++) {
EXPECT_EQ(i, batch[i]);
EXPECT_EQ(1, batch_expected[i]);
}
EXPECT_EQ(1, extras_expected[0]);
EXPECT_EQ(1, extras_expected[1]);
}
TEST_F(RangeSamplerTest, Unique) {
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rnd(&philox);
const int range = 100;
const int batch_size = 50;
const int num_batches = 100;
sampler_.reset(new LogUniformSampler(range));
std::vector<int> histogram(range);
std::vector<int64_t> batch(batch_size);
std::vector<int64_t> all_values(range);
for (int i = 0; i < range; i++) {
all_values[i] = i;
}
std::vector<float> expected(range);
sampler_->SampleBatchGetExpectedCount(&rnd, true, absl::MakeSpan(batch),
absl::Span<float>(), all_values,
absl::MakeSpan(expected));
std::set<int64_t> s(batch.begin(), batch.end());
CHECK_EQ(batch_size, s.size());
for (int trial = 0; trial < num_batches; trial++) {
std::vector<float> trial_expected(range);
sampler_->SampleBatchGetExpectedCount(&rnd, true, absl::MakeSpan(batch),
absl::Span<float>(), all_values,
absl::MakeSpan(trial_expected));
for (int i = 0; i < range; i++) {
EXPECT_NEAR(expected[i], trial_expected[i], expected[i] * 0.5);
}
for (int i = 0; i < batch_size; i++) {
histogram[batch[i]]++;
}
}
for (int i = 0; i < range; i++) {
const float average_count = static_cast<float>(histogram[i]) / num_batches;
EXPECT_NEAR(expected[i], average_count, 0.2);
}
}
TEST_F(RangeSamplerTest, Avoid) {
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rnd(&philox);
sampler_.reset(new LogUniformSampler(100));
std::vector<int64_t> avoided(2);
avoided[0] = 17;
avoided[1] = 23;
std::vector<int64_t> batch(98);
sampler_->SampleBatchGetExpectedCountAvoid(
&rnd, true, absl::MakeSpan(batch), absl::Span<float>(),
absl::Span<const int64_t>(), absl::Span<float>(), avoided);
int sum = 0;
for (auto val : batch) {
sum += val;
}
const int expected_sum = 100 * 99 / 2 - avoided[0] - avoided[1];
EXPECT_EQ(expected_sum, sum);
}
}
} |
1,502 | cpp | tensorflow/tensorflow | control_flow_ops | tensorflow/core/kernels/control_flow_ops.cc | tensorflow/core/kernels/control_flow_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_CONTROL_FLOW_OPS_H_
#define TENSORFLOW_CORE_KERNELS_CONTROL_FLOW_OPS_H_
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
class ControlTriggerOp : public OpKernel {
public:
explicit ControlTriggerOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {}
bool IsExpensive() override { return false; }
};
class SwitchOp : public OpKernel {
public:
explicit SwitchOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override;
bool IsExpensive() override { return false; }
~SwitchOp() override {}
SwitchOp(const SwitchOp&) = delete;
void operator=(const SwitchOp&) = delete;
};
class SwitchNOp : public OpKernel {
public:
explicit SwitchNOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override;
bool IsExpensive() override { return false; }
~SwitchNOp() override {}
SwitchNOp(const SwitchNOp&) = delete;
void operator=(const SwitchNOp&) = delete;
};
class MergeOp : public OpKernel {
public:
explicit MergeOp(OpKernelConstruction* context);
void Compute(OpKernelContext* context) override;
bool IsExpensive() override { return false; }
~MergeOp() override {}
MergeOp(const MergeOp&) = delete;
void operator=(const MergeOp&) = delete;
};
class EnterOp : public OpKernel {
public:
explicit EnterOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override;
bool IsExpensive() override { return false; }
~EnterOp() override {}
EnterOp(const EnterOp&) = delete;
void operator=(const EnterOp&) = delete;
};
class ExitOp : public OpKernel {
public:
explicit ExitOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override;
bool IsExpensive() override { return false; }
~ExitOp() override {}
ExitOp(const ExitOp&) = delete;
void operator=(const ExitOp&) = delete;
};
class NextIterationOp : public OpKernel {
public:
explicit NextIterationOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override;
bool IsExpensive() override { return false; }
~NextIterationOp() override {}
NextIterationOp(const NextIterationOp&) = delete;
void operator=(const NextIterationOp&) = delete;
};
class LoopCondOp : public OpKernel {
public:
explicit LoopCondOp(OpKernelConstruction* context);
~LoopCondOp() override;
void Compute(OpKernelContext* context) override;
bool IsExpensive() override;
LoopCondOp(const LoopCondOp&) = delete;
void operator=(const LoopCondOp&) = delete;
};
}
#endif
#include <vector>
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status SwitchShape(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
ShapeHandle out = c->input(0);
c->set_output(0, out);
c->set_output(1, out);
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
c->set_output_handle_shapes_and_types(0, *handle_data);
c->set_output_handle_shapes_and_types(1, *handle_data);
}
return absl::OkStatus();
}
Status SwitchNShape(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
ShapeHandle out = c->input(0);
int num_outs;
TF_RETURN_IF_ERROR(c->GetAttr("num_outs", &num_outs));
for (int i = 0; i < num_outs; i++) {
c->set_output(i, out);
}
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
for (int i = 0; i < num_outs; i++) {
c->set_output_handle_shapes_and_types(i, *handle_data);
}
}
return absl::OkStatus();
}
}
REGISTER_OP("Switch")
.Input("data: T")
.Input("pred: bool")
.Output("output_false: T")
.Output("output_true: T")
.Attr("T: type")
.SetForwardTypeFn(full_type::ReplicateInput(0, 2))
.SetShapeFn(SwitchShape);
REGISTER_OP("RefSwitch")
.Input("data: Ref(T)")
.Input("pred: bool")
.Output("output_false: Ref(T)")
.Output("output_true: Ref(T)")
.Attr("T: type")
.SetAllowsUninitializedInput()
.SetShapeFn(SwitchShape);
REGISTER_OP("_SwitchN")
.Input("data: T")
.Input("output_index: int32")
.Output("outputs: num_outs * T")
.Attr("num_outs: int >= 1")
.Attr("T: type")
.SetShapeFn(SwitchNShape);
REGISTER_OP("RefSelect")
.Input("index: int32")
.Input("inputs: Ref(N * T)")
.Output("output: Ref(T)")
.Attr("T: type")
.Attr("N: int >= 1")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
ShapeHandle first_input = c->input(1);
if (!c->FullyDefined(first_input)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
for (int i = 2; i < c->num_inputs(); ++i) {
ShapeHandle input = c->input(i);
if (!c->FullyDefined(input) ||
!c->Merge(first_input, input, &unused).ok()) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
}
c->set_output(0, first_input);
return absl::OkStatus();
});
namespace {
Status MergeShape(InferenceContext* c) {
ShapeHandle out = c->input(0);
if (!c->RankKnown(out)) {
out = c->UnknownShape();
} else {
int32_t rank = c->Rank(out);
for (int i = 1; i < c->num_inputs(); ++i) {
ShapeHandle input = c->input(i);
if (!c->RankKnown(input) || c->Rank(input) != rank) {
out = c->UnknownShape();
break;
}
for (int d = 0; d < rank; ++d) {
if (c->Value(c->Dim(input, d)) != c->Value(c->Dim(out, d))) {
TF_RETURN_IF_ERROR(c->ReplaceDim(out, d, c->UnknownDim(), &out));
}
}
}
}
c->set_output(0, out);
c->set_output(1, c->Scalar());
return absl::OkStatus();
}
TypeInferenceFn MergeTypeFn() {
std::vector<TypeInferenceFn> func_list{full_type::Merge(),
full_type::Tensor(TFT_INT32)};
return full_type::Tuple(func_list);
}
}
REGISTER_OP("Merge")
.Input("inputs: N * T")
.Output("output: T")
.Output("value_index: int32")
.Attr("T: type")
.Attr("N: int >= 1")
.SetForwardTypeFn(MergeTypeFn())
.SetShapeFn(MergeShape);
REGISTER_OP("RefMerge")
.Input("inputs: Ref(N * T)")
.Output("output: Ref(T)")
.Output("value_index: int32")
.Attr("T: type")
.Attr("N: int >= 1")
.SetShapeFn(MergeShape);
REGISTER_OP("Enter")
.Input("data: T")
.Output("output: T")
.Attr("T: type")
.Attr("frame_name: string")
.Attr("is_constant: bool = false")
.Attr("parallel_iterations: int = 10")
.SetForwardTypeFn(full_type::ReplicateInput())
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->UnknownShape());
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
c->set_output_handle_shapes_and_types(0, *handle_data);
}
bool is_constant;
TF_RETURN_IF_ERROR(c->GetAttr("is_constant", &is_constant));
if (is_constant) {
c->set_output(0, c->input(0));
}
return absl::OkStatus();
});
REGISTER_OP("RefEnter")
.Input("data: Ref(T)")
.Output("output: Ref(T)")
.Attr("T: type")
.Attr("frame_name: string")
.Attr("is_constant: bool = false")
.Attr("parallel_iterations: int = 10")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Exit")
.Input("data: T")
.Output("output: T")
.Attr("T: type")
.SetForwardTypeFn(full_type::ReplicateInput())
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RefExit")
.Input("data: Ref(T)")
.Output("output: Ref(T)")
.Attr("T: type")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("NextIteration")
.Input("data: T")
.Output("output: T")
.Attr("T: type")
.SetForwardTypeFn(full_type::ReplicateInput())
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RefNextIteration")
.Input("data: Ref(T)")
.Output("output: Ref(T)")
.Attr("T: type")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("LoopCond")
.Input("input: bool")
.Output("output: bool")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRank(c, 0);
});
REGISTER_OP("ControlTrigger").SetShapeFn(shape_inference::NoOutputs);
REGISTER_OP("Abort")
.Attr("error_msg: string = ''")
.Attr("exit_without_error: bool = false")
.SetShapeFn(shape_inference::NoOutputs);
} | #include <memory>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ControlFlowOpsTest, Merge_ShapeFn) {
ShapeInferenceTestOp op("Merge");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Merge")
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "?;[]");
INFER_OK(op, "[2,1];?;[2,1]", "?;[]");
INFER_OK(op, "[2,1];[2,1];?", "?;[]");
INFER_OK(op, "[2,1];[2,1];[3,1,2]", "?;[]");
INFER_OK(op, "[2,1];[2,1];[3,1]", "[?,d0_1];[]");
INFER_OK(op, "[2,1];[2,2];[3,1]", "[?,?];[]");
INFER_OK(op, "[2,1];[2,1];[2,1]", "in0;[]");
}
TEST(ControlFlowOpsTest, SwitchN_ShapeFn) {
ShapeInferenceTestOp op("_SwitchN");
int n = 5;
TF_ASSERT_OK(NodeDefBuilder("test", "_SwitchN")
.Input({"d", 0, DT_FLOAT})
.Input({"bi", 0, DT_INT32})
.Attr("num_outs", n)
.Finalize(&op.node_def));
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?]");
INFER_OK(op, "?;?", "in0;in0;in0;in0;in0");
INFER_OK(op, "[2,?];?", "in0;in0;in0;in0;in0");
INFER_OK(op, "[2,?];[]", "in0;in0;in0;in0;in0");
INFER_OK(op, "[2,3];[]", "in0;in0;in0;in0;in0");
}
TEST(ControlFlowOpsTest, RefSelect_ShapeFn) {
ShapeInferenceTestOp op("RefSelect");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 1, DT_FLOAT_REF);
TF_ASSERT_OK(NodeDefBuilder("test", "RefSelect")
.Input("index", 0, DT_INT32)
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];?;?;?");
INFER_OK(op, "?;?;?;?", "?");
INFER_OK(op, "[];?;?;?", "?");
INFER_OK(op, "[];[1,2,3];?;?", "?");
INFER_OK(op, "[];[1,2,3];[1,2,?];[1,2,3]", "?");
INFER_OK(op, "[];[1,2,3];[1,2];[1,2,3]", "?");
INFER_OK(op, "[];[1,2,3];[1,2,4];[1,2,3]", "?");
INFER_OK(op, "[];[1,2,3];[1,2,3];[1,2,3]", "in1");
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
REGISTER_OP("ControlFlowOpsTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"))
.SetShapeFn(shape_inference::UnknownShape);
TEST(ControlFlowOpsTest, Merge_TypeInfrnc) {
Graph graph(OpRegistry::Global());
Node* input_tensor_op1;
TensorProto tensor_proto1;
TF_EXPECT_OK(
NodeBuilder("input_tensor_op1", "ControlFlowOpsTest>ConstTypeCtor")
.Attr("value", tensor_proto1)
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &input_tensor_op1));
Node* input_tensor_op2;
TensorProto tensor_proto2;
TF_EXPECT_OK(
NodeBuilder("input_tensor_op2", "ControlFlowOpsTest>ConstTypeCtor")
.Attr("value", tensor_proto2)
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &input_tensor_op2));
Node* shape_op;
TF_EXPECT_OK(NodeBuilder("merge_op", "Merge")
.Input({input_tensor_op1, input_tensor_op2})
.Attr("T", DT_FLOAT)
.Finalize(&graph, &shape_op));
TF_EXPECT_OK(type_inference(graph));
FullTypeDef expected_shape_op_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_FLOAT }
}
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT32 }
})pb",
&expected_shape_op_t));
EXPECT_TRUE(full_type::IsEqual(shape_op->def().experimental_type(),
expected_shape_op_t))
<< "fulltype is\n"
<< shape_op->def().experimental_type().DebugString() << "\nexpected\n"
<< expected_shape_op_t.DebugString();
}
} |
1,503 | cpp | tensorflow/tensorflow | debug_ops | tensorflow/core/kernels/debug_ops.cc | tensorflow/core/kernels/debug_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DEBUG_OPS_H_
#define TENSORFLOW_CORE_KERNELS_DEBUG_OPS_H_
#include <cstdint>
#include <memory>
#include <numeric>
#include "tensorflow/core/platform/bfloat16.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
#include "tensorflow/core/util/determinism.h"
#endif
#if GOOGLE_CUDA
#include "tensorflow/core/platform/cuda.h"
#elif TENSORFLOW_USE_ROCM
#include "tensorflow/core/platform/rocm.h"
#endif
#include "tensorflow/core/debug/debug_io_utils.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/util/debug_events_writer.h"
namespace tensorflow {
class CopyOp : public OpKernel {
public:
explicit CopyOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("tensor_name", &tensor_name_));
std::vector<string> debug_ops_spec;
OP_REQUIRES_OK(context,
context->GetAttr("debug_ops_spec", &debug_ops_spec));
for (const string& debug_op_spec : debug_ops_spec) {
const std::vector<string> items = str_util::Split(debug_op_spec, ";");
OP_REQUIRES(
context, items.size() == 3,
errors::Internal(
"Unexpected number of semicolons in debug_ops_spec element: ",
debug_op_spec));
debug_op_and_url_specs_.push_back(
DebugWatchAndURLSpec(strings::StrCat(tensor_name_, ":", items[0]),
items[1], items[2] == "1"));
}
}
void Compute(OpKernelContext* context) override {
const Tensor& src_tensor = context->input(0);
if (src_tensor.IsInitialized() &&
DataTypeCanUseMemcpy(src_tensor.dtype()) &&
DebugIO::IsCopyNodeGateOpen(debug_op_and_url_specs_)) {
Tensor* copied_tensor;
OP_REQUIRES_OK(context, context->allocate_output(0, src_tensor.shape(),
&copied_tensor));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
Device* device = static_cast<Device*>(context->device());
bool off_host_input = device->device_type() == DEVICE_GPU &&
!context->input_alloc_attr(0).on_host();
if (off_host_input) {
DeviceContext* device_ctxt = context->op_device_context();
Notification done_copy;
GPUUtil::CopyGPUTensorToSameGPU(
device, device_ctxt, &src_tensor, copied_tensor,
[&done_copy](const Status& s) { done_copy.Notify(); });
done_copy.WaitForNotification();
} else {
*copied_tensor = tensor::DeepCopy(src_tensor);
}
#else
*copied_tensor = tensor::DeepCopy(src_tensor);
#endif
} else {
context->set_output(0, src_tensor);
}
}
bool IsExpensive() override { return false; }
private:
string tensor_name_;
std::vector<DebugWatchAndURLSpec> debug_op_and_url_specs_;
};
class BaseDebugOp : public OpKernel {
public:
explicit BaseDebugOp(const string& debug_op_name,
OpKernelConstruction* context)
: OpKernel(context), debug_op_name_(debug_op_name) {
OP_REQUIRES_OK(context, context->GetAttr("debug_urls", &debug_urls_));
OP_REQUIRES_OK(context, context->GetAttr("gated_grpc", &gated_grpc_));
string device_name;
string tensor_name;
OP_REQUIRES_OK(context, context->GetAttr("device_name", &device_name));
OP_REQUIRES_OK(context, context->GetAttr("tensor_name", &tensor_name));
std::vector<string> name_items = str_util::Split(tensor_name, ':');
string node_name;
int32_t output_slot = 0;
OP_REQUIRES(context, name_items.size() == 1 || name_items.size() == 2,
errors::InvalidArgument("Failed to parse tensor name: \"",
tensor_name, "\""));
if (name_items.size() == 2) {
node_name = name_items[0];
OP_REQUIRES(
context, strings::safe_strto32(name_items[1], &output_slot),
errors::InvalidArgument("Invalid string value for output_slot: \"",
name_items[1], "\""));
} else if (name_items.size() == 1) {
node_name = name_items[0];
}
debug_watch_key_.reset(
new DebugNodeKey(device_name, node_name, output_slot, debug_op_name_));
}
bool IsExpensive() override { return false; }
protected:
bool ApplyGrpcGating(OpKernelContext* context) {
if (gated_grpc_ && !DebugIO::IsDebugNodeGateOpen(
debug_watch_key_->debug_node_name, debug_urls_)) {
Tensor* output_tensor;
TensorShape shape({0});
if (!context->allocate_output(0, shape, &output_tensor).ok()) {
LOG(ERROR) << "Debug node of watch key "
<< debug_watch_key_->debug_node_name
<< " failed to allocate empty tensor under gated-off state.";
}
return false;
} else {
return true;
}
}
Status PublishTensor(const Tensor& tensor, int64_t step_id = -1) {
if (debug_urls_.empty()) {
return absl::OkStatus();
} else {
Status status = DebugIO::PublishDebugTensor(
*debug_watch_key_, tensor, Env::Default()->NowMicros(), debug_urls_,
gated_grpc_, step_id);
if (!status.ok()) {
LOG(ERROR) << "Debug node of watch key "
<< debug_watch_key_->debug_node_name
<< " failed to publish debug tensor data to all URLs "
<< str_util::Join(debug_urls_, ", ")
<< ", due to: " << status.message();
}
return status;
}
}
void CompleteDebugNodeKey(const string& io_of_node, bool is_input,
int io_index) {
debug_watch_key_ = std::make_unique<DebugNodeKey>(
debug_watch_key_->device_name, debug_watch_key_->node_name,
debug_watch_key_->output_slot, debug_op_name_, io_of_node, is_input,
io_index);
}
private:
const string debug_op_name_;
std::unique_ptr<DebugNodeKey> debug_watch_key_;
std::vector<string> debug_urls_;
bool gated_grpc_;
};
class DebugIdentityOp : public BaseDebugOp {
public:
explicit DebugIdentityOp(OpKernelConstruction* context)
: BaseDebugOp("DebugIdentity", context) {}
void Compute(OpKernelContext* context) override {
if (!ApplyGrpcGating(context)) {
return;
}
OP_REQUIRES_OK(context, PublishTensor(context->input(0)));
context->set_output(0, context->input(0));
}
};
class DebugIdentityV3Op : public BaseDebugOp {
public:
explicit DebugIdentityV3Op(OpKernelConstruction* context)
: BaseDebugOp("DebugIdentityV3", context) {
string io_of_node;
bool is_input;
int io_index;
OP_REQUIRES_OK(context, context->GetAttr("io_of_node", &io_of_node));
OP_REQUIRES_OK(context, context->GetAttr("is_input", &is_input));
OP_REQUIRES_OK(context, context->GetAttr("io_index", &io_index));
if (!io_of_node.empty()) {
CompleteDebugNodeKey(io_of_node, is_input, io_index);
}
}
void Compute(OpKernelContext* context) override {
if (!ApplyGrpcGating(context)) {
return;
}
OP_REQUIRES_OK(context,
PublishTensor(context->input(0), context->step_id()));
context->set_output(0, context->input(0));
}
};
template <typename T>
class DebugNanCountOp : public BaseDebugOp {
public:
explicit DebugNanCountOp(OpKernelConstruction* context)
: BaseDebugOp("DebugNanCount", context) {}
void Compute(OpKernelContext* context) override {
if (!ApplyGrpcGating(context)) {
return;
}
Tensor* output_tensor;
const Tensor& input = context->input(0);
int64_t nan_count = 0;
if (input.IsInitialized()) {
const TensorShape& input_shape = input.shape();
const T* input_flat = input.template flat<T>().data();
for (int64_t i = 0; i < input_shape.num_elements(); ++i) {
if (Eigen::numext::isnan(static_cast<double>(input_flat[i]))) {
nan_count++;
}
}
}
TensorShape shape({1});
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output_tensor));
output_tensor->vec<int64_t>()(0) = nan_count;
OP_REQUIRES_OK(context, PublishTensor(*output_tensor));
}
};
template <typename T>
class DebugNumericSummaryOp : public BaseDebugOp {
public:
explicit DebugNumericSummaryOp(OpKernelConstruction* context)
: BaseDebugOp("DebugNumericSummary", context) {
OP_REQUIRES_OK(context, context->GetAttr("lower_bound", &lower_bound_));
OP_REQUIRES_OK(context, context->GetAttr("upper_bound", &upper_bound_));
OP_REQUIRES_OK(context,
context->GetAttr("mute_if_healthy", &mute_if_healthy_));
}
void Compute(OpKernelContext* context) override {
if (!ApplyGrpcGating(context)) {
return;
}
Tensor* output_tensor;
const Tensor& input = context->input(0);
int64_t is_initialized = 0;
int64_t element_count = 0;
int64_t negative_inf_count = 0;
int64_t negative_count = 0;
int64_t zero_count = 0;
int64_t positive_count = 0;
int64_t positive_inf_count = 0;
int64_t nan_count = 0;
double min = std::numeric_limits<double>::infinity();
double max = -std::numeric_limits<double>::infinity();
double sum = 0.0;
double mean = std::numeric_limits<double>::quiet_NaN();
double variance = std::numeric_limits<double>::quiet_NaN();
int64_t non_inf_nan_count = 0;
const TensorShape& input_shape = input.shape();
if (input.IsInitialized()) {
is_initialized = 1;
const T* input_flat = input.template flat<T>().data();
element_count = input_shape.num_elements();
const bool is_lower_bound_custom = !Eigen::numext::isinf(lower_bound_);
const bool is_upper_bound_custom = !Eigen::numext::isinf(upper_bound_);
for (int64_t i = 0; i < element_count; ++i) {
const double x = static_cast<double>(input_flat[i]);
if (Eigen::numext::isnan(x)) {
nan_count++;
} else if (Eigen::numext::isinf(x)) {
if (x < 0.0) {
negative_inf_count++;
} else {
positive_inf_count++;
}
} else {
if (is_lower_bound_custom && x <= lower_bound_) {
negative_inf_count++;
} else if (is_upper_bound_custom && x >= upper_bound_) {
positive_inf_count++;
} else if (x < 0.0) {
negative_count++;
} else if (x > 0.0) {
positive_count++;
} else {
zero_count++;
}
if (x < min) {
min = x;
}
if (x > max) {
max = x;
}
non_inf_nan_count++;
sum += x;
}
}
if (non_inf_nan_count > 0) {
mean = sum / non_inf_nan_count;
variance = 0.0;
for (int64_t i = 0; i < element_count; ++i) {
const double x = static_cast<double>(input_flat[i]);
if (!Eigen::numext::isnan(x) && !Eigen::numext::isinf(x)) {
variance += (x - mean) * (x - mean);
}
}
variance /= non_inf_nan_count;
}
}
TensorShape shape({14 + input_shape.dims()});
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output_tensor));
output_tensor->vec<double>()(0) = static_cast<double>(is_initialized);
output_tensor->vec<double>()(1) = static_cast<double>(element_count);
output_tensor->vec<double>()(2) = static_cast<double>(nan_count);
output_tensor->vec<double>()(3) = static_cast<double>(negative_inf_count);
output_tensor->vec<double>()(4) = static_cast<double>(negative_count);
output_tensor->vec<double>()(5) = static_cast<double>(zero_count);
output_tensor->vec<double>()(6) = static_cast<double>(positive_count);
output_tensor->vec<double>()(7) = static_cast<double>(positive_inf_count);
output_tensor->vec<double>()(8) = min;
output_tensor->vec<double>()(9) = max;
output_tensor->vec<double>()(10) = mean;
output_tensor->vec<double>()(11) = variance;
output_tensor->vec<double>()(12) = static_cast<double>(input.dtype());
output_tensor->vec<double>()(13) = static_cast<double>(input_shape.dims());
for (size_t d = 0; d < input_shape.dims(); ++d) {
output_tensor->vec<double>()(14 + d) =
static_cast<double>(input_shape.dim_sizes()[d]);
}
bool mute = mute_if_healthy_ && nan_count == 0 && negative_inf_count == 0 &&
positive_inf_count == 0;
if (!mute) {
OP_REQUIRES_OK(context, PublishTensor(*output_tensor));
}
}
private:
float lower_bound_;
float upper_bound_;
bool mute_if_healthy_;
};
class DebugIdentityV2Op : public OpKernel {
public:
explicit DebugIdentityV2Op(OpKernelConstruction* context)
: OpKernel(context),
device_name_(context->device()->name()),
output_slot_(-1),
tensor_debug_mode_(0),
tfdbg_run_id_() {
std::vector<string> debug_urls;
OP_REQUIRES_OK(context, context->GetAttr("debug_urls", &debug_urls));
for (const string& debug_url : debug_urls) {
if (absl::StartsWith(debug_url, DebugIO::kFileURLScheme)) {
dump_roots_.emplace_back(
debug_url.substr(strlen(DebugIO::kFileURLScheme)));
} else {
context->SetStatus(
errors::Internal("Unsupported debug URL schema in: ", debug_url));
}
}
OP_REQUIRES_OK(context,
context->GetAttr("tfdbg_context_id", &tfdbg_context_id_));
OP_REQUIRES_OK(context, context->GetAttr("op_name", &op_name_));
OP_REQUIRES_OK(context, context->GetAttr("output_slot", &output_slot_));
OP_REQUIRES_OK(context,
context->GetAttr("tensor_debug_mode", &tensor_debug_mode_));
if (context->HasAttr("circular_buffer_size")) {
OP_REQUIRES_OK(context, context->GetAttr("circular_buffer_size",
&circular_buffer_size_));
} else {
circular_buffer_size_ =
tfdbg::DebugEventsWriter::kDefaultCyclicBufferSize;
}
if (context->HasAttr("tfdbg_run_id")) {
OP_REQUIRES_OK(context, context->GetAttr("tfdbg_run_id", &tfdbg_run_id_));
}
}
void Compute(OpKernelContext* context) override {
const Tensor& tensor = context->input(0);
for (const string& dump_root : dump_roots_) {
tfdbg::DebugEventsWriter* debug_events_writer =
tfdbg::DebugEventsWriter::GetDebugEventsWriter(
dump_root, tfdbg_run_id_, circular_buffer_size_);
OP_REQUIRES_OK(context, debug_events_writer->WriteGraphExecutionTrace(
tfdbg_context_id_, device_name_, op_name_,
output_slot_, tensor_debug_mode_, tensor));
}
context->set_output(0, tensor);
}
private:
std::vector<string> dump_roots_;
string tfdbg_context_id_;
string device_name_;
string op_name_;
int32 output_slot_;
int32 tensor_debug_mode_;
int64_t circular_buffer_size_;
string tfdbg_run_id_;
};
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename Tin, typename Tout>
struct CurtHealthLaunch {
void Run(const GPUDevice& d, const Tin* data, int size, Tout output[1]);
};
extern template struct CurtHealthLaunch<Eigen::half, float>;
extern template struct CurtHealthLaunch<float, float>;
extern template struct CurtHealthLaunch<double, float>;
extern template struct CurtHealthLaunch<Eigen::half, double>;
extern template struct CurtHealthLaunch<float, double>;
extern template struct CurtHealthLaunch<double, double>;
template <typename Tin, typename Tout>
struct ConciseHealthLaunch {
void Run(const GPUDevice& d, const Tin* data, int size, Tout output[3]);
};
extern template struct ConciseHealthLaunch<Eigen::half, float>;
extern template struct ConciseHealthLaunch<float, float>;
extern template struct ConciseHealthLaunch<double, float>;
extern template struct ConciseHealthLaunch<Eigen::half, double>;
extern template struct ConciseHealthLaunch<float, double>;
extern template struct ConciseHealthLaunch<double, double>;
template <typename Tin, typename Tout>
struct FullHealthLaunch {
void Run(const GPUDevice& d, const Tin* data, int size, Tout output[6]);
};
extern template struct FullHealthLaunch<Eigen::half, float>;
extern template struct FullHealthLaunch<float, float>;
extern template struct FullHealthLaunch<double, float>;
extern template struct FullHealthLaunch<Eigen::half, double>;
extern template struct FullHealthLaunch<float, double>;
extern template struct FullHealthLaunch<double, double>;
template <typename Tin, typename Tout>
struct ReduceInfNanThreeSlotsLaunch {
void Run(const GPUDevice& d, const Tin* data, int size, Tout output[3]);
};
extern template struct ReduceInfNanThreeSlotsLaunch<Eigen::half, float>;
extern template struct ReduceInfNanThreeSlotsLaunch<float, float>;
extern template struct ReduceInfNanThreeSlotsLaunch<double, float>;
extern template struct ReduceInfNanThreeSlotsLaunch<Eigen::half, double>;
extern template struct ReduceInfNanThreeSlotsLaunch<float, double>;
extern template struct ReduceInfNanThreeSlotsLaunch<double, double>;
#endif
template <typename Device, typename Tin, typename Tout>
class DebugNumericSummaryV2Op;
template <typename Tin, typename Tout>
class DebugNumericSummaryV2Op<CPUDevice, Tin, Tout> : public OpKernel {
public:
explicit DebugNumericSummaryV2Op(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr("tensor_debug_mode", &tensor_debug_mode_));
OP_REQUIRES_OK(context, context->GetAttr("tensor_id", &tensor_id_));
}
void Compute(OpKernelContext* context) override {
const Tensor& tensor = context->input(0);
auto in = tensor.flat<Tin>();
const Tin* data = in.data();
const int64_t size = in.size();
Tensor* output_tensor;
Tout tensor_id = static_cast<Tout>(tensor_id_);
const Tout num_elem = static_cast<Tout>(context->input(0).NumElements());
if (tensor_debug_mode_ != 8) {
OP_REQUIRES(
context, tensor_id_ <= kMaxTensorId,
errors::InvalidArgument("DebugNumericSummaryV2Op requires "
"tensor_id to be less than or equal to "
"(2^",
std::numeric_limits<Tout>::digits,
"). Given tensor_id:", tensor_id_));
}
if (tensor_debug_mode_ == 2) {
TensorShape shape({2});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
output_tensor->flat<Tout>()(0) = tensor_id;
output_tensor->flat<Tout>()(1) = 0.0;
int fp_props =
std::accumulate(data, data + size, 0, [](const int x, const Tin& y) {
return Eigen::numext::isfinite(y) ? x : 1;
});
if (fp_props) {
output_tensor->flat<Tout>()(1) = 1.0;
}
} else if (tensor_debug_mode_ == 3) {
TensorShape shape({5});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
output_tensor->flat<Tout>()(0) = tensor_id;
output_tensor->flat<Tout>()(1) = num_elem;
Tout fp_props[3] = {0.0, 0.0, 0.0};
std::for_each(data, data + size, [&fp_props](const Tin& y) {
if (TF_PREDICT_TRUE(Eigen::numext::isfinite(y))) {
} else if (Eigen::numext::isinf(y)) {
if (y < static_cast<Tin>(0.f)) {
++fp_props[0];
} else {
++fp_props[1];
}
} else if (Eigen::numext::isnan(y)) {
++fp_props[2];
}
});
output_tensor->flat<Tout>()(2) = fp_props[0];
output_tensor->flat<Tout>()(3) = fp_props[1];
output_tensor->flat<Tout>()(4) = fp_props[2];
} else if (tensor_debug_mode_ == 4) {
TensorShape shape({11});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
int num_dims = tensor.dims();
output_tensor->flat<Tout>()(0) = tensor_id;
output_tensor->flat<Tout>()(1) = -1.0;
output_tensor->flat<Tout>()(2) = static_cast<Tout>(tensor.dtype());
output_tensor->flat<Tout>()(3) = static_cast<Tout>(num_dims);
output_tensor->flat<Tout>()(4) = num_elem;
Tout fp_props[6] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
std::for_each(data, data + size, [&fp_props](const Tin& y) {
if (TF_PREDICT_TRUE(Eigen::numext::isfinite(y))) {
if (y < static_cast<Tin>(0.f)) {
++fp_props[3];
} else if (y == static_cast<Tin>(0.f)) {
++fp_props[4];
} else {
++fp_props[5];
}
} else if (Eigen::numext::isinf(y)) {
if (y < static_cast<Tin>(0.f)) {
++fp_props[0];
} else {
++fp_props[1];
}
} else if (Eigen::numext::isnan(y)) {
++fp_props[2];
}
});
output_tensor->flat<Tout>()(5) = fp_props[0];
output_tensor->flat<Tout>()(6) = fp_props[1];
output_tensor->flat<Tout>()(7) = fp_props[2];
output_tensor->flat<Tout>()(8) = fp_props[3];
output_tensor->flat<Tout>()(9) = fp_props[4];
output_tensor->flat<Tout>()(10) = fp_props[5];
} else if (tensor_debug_mode_ == 5) {
TensorShape shape({10});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
int num_dims = tensor.dims();
output_tensor->flat<Tout>()(0) = tensor_id;
output_tensor->flat<Tout>()(1) = static_cast<Tout>(tensor.dtype());
output_tensor->flat<Tout>()(2) = static_cast<Tout>(num_dims);
output_tensor->flat<Tout>()(3) = num_elem;
int dim_idx = 4;
for (int i = std::max(0, num_dims - kShapeDims);
i < std::max(6, num_dims); ++i) {
if (i < num_dims) {
output_tensor->flat<Tout>()(dim_idx++) =
static_cast<Tout>(tensor.dim_size(i));
} else {
output_tensor->flat<Tout>()(dim_idx++) = 0.0;
}
}
} else if (tensor_debug_mode_ == 8) {
TensorShape shape({3});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
output_tensor->flat<Tout>()(0) = 0.0;
output_tensor->flat<Tout>()(1) = 0.0;
output_tensor->flat<Tout>()(2) = 0.0;
int fp_props =
std::accumulate(data, data + size, 0, [](const int x, const Tin& y) {
int result = x;
if (TF_PREDICT_TRUE(Eigen::numext::isfinite(y))) {
} else if (Eigen::numext::isinf(y)) {
result |= y < static_cast<Tin>(0.f) ? kNegInfBit : kPosInfBit;
} else if (Eigen::numext::isnan(y)) {
result |= kNaNBit;
}
return result;
});
if (fp_props & kNegInfBit) {
output_tensor->flat<Tout>()(0) = -std::numeric_limits<Tout>::infinity();
}
if (fp_props & kPosInfBit) {
output_tensor->flat<Tout>()(1) = std::numeric_limits<Tout>::infinity();
}
if (fp_props & kNaNBit) {
output_tensor->flat<Tout>()(2) = std::numeric_limits<Tout>::quiet_NaN();
}
} else {
context->SetStatus(errors::Unimplemented(
"Unimplemented tensor debug mode: ", tensor_debug_mode_));
}
}
private:
int tensor_debug_mode_;
int64_t tensor_id_;
static constexpr int kShapeDims = 6;
static constexpr int kNegInfBit = 0x01;
static constexpr int kPosInfBit = 0x02;
static constexpr int kNaNBit = 0x04;
static constexpr int64_t kMaxTensorId = 1LL
<< std::numeric_limits<Tout>::digits;
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename Tin, typename Tout>
class DebugNumericSummaryV2Op<GPUDevice, Tin, Tout> : public AsyncOpKernel {
public:
typedef GPUDevice Device;
explicit DebugNumericSummaryV2Op(OpKernelConstruction* context)
: AsyncOpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr("tensor_debug_mode", &tensor_debug_mode_));
OP_REQUIRES_OK(context, context->GetAttr("tensor_id", &tensor_id_));
}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
Tensor* output_tensor;
Tout tensor_id = static_cast<Tout>(tensor_id_);
const Tensor& tensor = context->input(0);
const Tout num_elem = static_cast<Tout>(tensor.NumElements());
const Device& d = context->eigen_device<Device>();
auto input = tensor.flat<Tin>();
auto check_cb = [this, done]() { done(); };
if (tensor_debug_mode_ != 8) {
OP_REQUIRES_ASYNC(
context, tensor_id_ <= kMaxTensorId,
errors::InvalidArgument("DebugNumericSummaryV2Op requires "
"tensor_id to be less than or equal to "
"(2^",
std::numeric_limits<Tout>::digits,
"). Given tensor_id:", tensor_id_),
done);
}
if (tensor_debug_mode_ == 2) {
TensorShape shape({2});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
auto* stream = context->op_device_context()->stream();
OP_REQUIRES_ASYNC(context, stream != nullptr,
errors::Internal("No GPU stream available."), done);
se::DeviceMemoryBase output_tensor_ptr(
output_tensor->flat<Tout>().data(),
output_tensor->flat<Tout>().size());
OP_REQUIRES_OK(context,
stream->MemZero(&output_tensor_ptr, 2 * sizeof(Tout)));
OP_REQUIRES_OK(context, stream->Memcpy(&output_tensor_ptr, &tensor_id,
sizeof(Tout)));
if (num_elem == 0) {
done();
return;
}
auto input = context->input(0).flat<Tin>();
CurtHealthLaunch<Tin, Tout>().Run(d, input.data(), input.size(),
output_tensor->flat<Tout>().data() + 1);
context->device()
->tensorflow_accelerator_device_info()
->event_mgr->ThenExecute(stream, std::move(check_cb));
} else if (tensor_debug_mode_ == 3) {
TensorShape shape({5});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
OP_REQUIRES_ASYNC(context, !tensorflow::OpDeterminismRequired(),
errors::Unimplemented(
"Determinism is not yet supported for "
"DebugNumericSummaryV2 when tensor_debug_mode is "
"CONCISE_HEALTH."),
done);
auto* stream = context->op_device_context()->stream();
OP_REQUIRES_ASYNC(context, stream != nullptr,
errors::Internal("No GPU stream available."), done);
se::DeviceMemoryBase output_tensor_ptr(
output_tensor->flat<Tout>().data(),
output_tensor->flat<Tout>().size());
OP_REQUIRES_OK(context,
stream->Memset32(&output_tensor_ptr, 0, 5 * sizeof(Tout)));
const Tout static_output[] = {tensor_id, num_elem};
OP_REQUIRES_OK(context, stream->Memcpy(&output_tensor_ptr, &static_output,
2 * sizeof(Tout)));
if (num_elem == 0) {
done();
return;
}
ConciseHealthLaunch<Tin, Tout>().Run(
d, input.data(), input.size(),
output_tensor->flat<Tout>().data() + 2);
context->device()
->tensorflow_accelerator_device_info()
->event_mgr->ThenExecute(stream, std::move(check_cb));
} else if (tensor_debug_mode_ == 4) {
TensorShape shape({11});
OP_REQUIRES_OK(context,
context->allocate_output(0, shape, &output_tensor));
auto* stream = context->op_device_context()->stream();
OP_REQUIRES_ASYNC(context, stream != nullptr,
errors::Internal("No GPU stream av | #include <string.h>
#include <fstream>
#include <vector>
#include "tensorflow/core/debug/debug_io_utils.h"
#include "tensorflow/core/debug/debug_node_key.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
class DebugIdentityOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type, const std::vector<string>& debug_urls) {
env_ = Env::Default();
TF_CHECK_OK(NodeDefBuilder("op", "DebugIdentity")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("debug_urls", debug_urls)
.Finalize(node_def()));
return InitOp();
}
Status Init(DataType input_type) {
std::vector<string> empty_debug_urls;
return Init(input_type, empty_debug_urls);
}
Env* env_;
};
TEST_F(DebugIdentityOpTest, Int32Success_6) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(DebugIdentityOpTest, Int32Success_6_FileURLs) {
const int kNumDumpDirs = 3;
const string tmp_dir = testing::TmpDir();
std::vector<string> dump_roots;
std::vector<string> debug_urls;
for (int i = 0; i < kNumDumpDirs; ++i) {
const string dump_root = strings::StrCat(tmp_dir, "_", i);
dump_roots.push_back(dump_root);
debug_urls.push_back(strings::StrCat("file:
}
uint64 wall_time = Env::Default()->NowMicros();
TF_ASSERT_OK(Init(DT_INT32, debug_urls));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
for (int i = 0; i < kNumDumpDirs; ++i) {
ASSERT_TRUE(env_->FileExists(dump_roots[i]).ok());
ASSERT_TRUE(env_->IsDirectory(dump_roots[i]).ok());
std::vector<string> device_roots;
FileSystem* fs = nullptr;
TF_ASSERT_OK(Env::Default()->GetFileSystemForFile(dump_roots[i], &fs));
std::vector<string> children;
TF_ASSERT_OK(fs->GetChildren(dump_roots[i], &children));
const string kDeviceDirPrefix = strings::StrCat(
DebugNodeKey::kMetadataFilePrefix, DebugNodeKey::kDeviceTag);
for (const string child : children) {
if (!strncmp(child.c_str(), kDeviceDirPrefix.c_str(),
kDeviceDirPrefix.size())) {
device_roots.push_back(io::JoinPath(dump_roots[i], child));
}
}
ASSERT_EQ(1, device_roots.size());
const string& device_root = device_roots[0];
TF_ASSERT_OK(Env::Default()->GetFileSystemForFile(device_root, &fs));
TF_ASSERT_OK(fs->GetChildren(device_root, &children));
int dump_files_found = 0;
for (const string child : children) {
dump_files_found++;
const string dump_file_path = io::JoinPath(device_root, child);
std::fstream ifs(dump_file_path, std::ios::in | std::ios::binary);
Event event;
event.ParseFromIstream(&ifs);
ifs.close();
ASSERT_GE(event.wall_time(), wall_time);
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(strings::StrCat("FakeTensor", ":", 0, ":", "DebugIdentity"),
event.summary().value(0).node_name());
Tensor tensor_prime(DT_INT32);
ASSERT_TRUE(tensor_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(TensorShape({6}), tensor_prime.shape());
for (int j = 0; j < 6; ++j) {
ASSERT_EQ(j + 1, tensor_prime.flat<int32>()(j));
}
}
ASSERT_EQ(1, dump_files_found);
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(env_->DeleteRecursively(dump_roots[i], &undeleted_files,
&undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
TEST_F(DebugIdentityOpTest, Int32Success_2_3) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(DebugIdentityOpTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_STRING));
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
class DebugNanCountOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNanCount")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(DebugNanCountOpTest, Float_has_NaNs) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({6}),
{1.1, std::numeric_limits<float>::quiet_NaN(), 3.3,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(), 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {3});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
TEST_F(DebugNanCountOpTest, Float_no_NaNs) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(
TensorShape({6}),
{1.1, 2.2, 3.3, std::numeric_limits<float>::infinity(), 5.5, 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {0});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
TEST_F(DebugNanCountOpTest, Double_has_NaNs) {
TF_ASSERT_OK(Init(DT_DOUBLE));
AddInputFromArray<double>(TensorShape({6}),
{1.1, std::numeric_limits<double>::quiet_NaN(), 3.3,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(), 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {3});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
TEST_F(DebugNanCountOpTest, Double_no_NaNs) {
TF_ASSERT_OK(Init(DT_DOUBLE));
AddInputFromArray<double>(
TensorShape({6}),
{1.1, 2.2, 3.3, std::numeric_limits<double>::infinity(), 5.5, 6.6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_nan_count(allocator(), DT_INT64, TensorShape({1}));
test::FillValues<int64_t>(&expected_nan_count, {0});
test::ExpectTensorEqual<int64_t>(expected_nan_count, *GetOutput(0));
}
class DebugNumericSummaryOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Finalize(node_def()));
return InitOp();
}
Status InitGated(DataType input_type, const std::vector<string>& debug_urls) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("gated_grpc", true)
.Attr("debug_urls", debug_urls)
.Finalize(node_def()));
return InitOp();
}
#if defined(PLATFORM_GOOGLE)
void ClearEnabledWatchKeys() { DebugGrpcIO::ClearEnabledWatchKeys(); }
#endif
};
TEST_F(DebugNumericSummaryOpTest, Float_full_house) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(
TensorShape({18}),
{std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(), 0.0f, 0.0f, 0.0f, -1.0f, -3.0f,
3.0f, 7.0f, -std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({15}));
test::FillValues<double>(
&expected,
{1.0,
18.0,
4.0,
2.0,
2.0,
3.0,
2.0,
5.0,
-3.0,
7.0,
0.85714285714,
8.97959183673,
static_cast<double>(DT_FLOAT),
1.0,
18.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Double_full_house) {
TF_ASSERT_OK(Init(DT_DOUBLE));
AddInputFromArray<double>(
TensorShape({18}),
{std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(), 0.0, 0.0, 0.0, -1.0, -3.0, 3.0,
7.0, -std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::infinity(),
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({15}));
test::FillValues<double>(
&expected,
{1.0,
18.0,
4.0,
2.0,
2.0,
3.0,
2.0,
5.0,
-3.0,
7.0,
0.85714285714,
8.97959183673,
static_cast<double>(DT_DOUBLE),
1.0,
18.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Float_only_valid_values) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({2, 3}),
{0.0f, 0.0f, -1.0f, 3.0f, 3.0f, 7.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(
&expected,
{1.0,
6.0,
0.0,
0.0,
1.0,
2.0,
3.0,
0.0,
-1.0,
7.0,
2.0,
7.33333333333,
static_cast<double>(DT_FLOAT),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Float_all_Inf_or_NaN) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({3, 3}),
{std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor output_tensor = *GetOutput(0);
const double* output = output_tensor.template flat<double>().data();
ASSERT_NEAR(1.0, output[0], 1e-8);
ASSERT_NEAR(9.0, output[1], 1e-8);
ASSERT_NEAR(4.0, output[2], 1e-8);
ASSERT_NEAR(2.0, output[3], 1e-8);
ASSERT_NEAR(0.0, output[4], 1e-8);
ASSERT_NEAR(0.0, output[5], 1e-8);
ASSERT_NEAR(0.0, output[6], 1e-8);
ASSERT_NEAR(3.0, output[7], 1e-8);
ASSERT_EQ(std::numeric_limits<float>::infinity(), output[8]);
ASSERT_EQ(-std::numeric_limits<float>::infinity(), output[9]);
ASSERT_TRUE(Eigen::numext::isnan(output[10]));
ASSERT_TRUE(Eigen::numext::isnan(output[11]));
ASSERT_EQ(static_cast<double>(DT_FLOAT), output[12]);
ASSERT_EQ(2.0, output[13]);
ASSERT_EQ(3.0, output[14]);
ASSERT_EQ(3.0, output[15]);
}
TEST_F(DebugNumericSummaryOpTest, Many_dimensions_tensor_shape) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({1, 3, 1, 1, 1, 1, 1}),
{std::numeric_limits<float>::quiet_NaN(),
-std::numeric_limits<float>::infinity(), -8.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({21}));
test::FillValues<double>(&expected,
{1.0,
3.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
-8.0,
-8.0,
-8.0,
0.0,
static_cast<double>(DT_FLOAT),
7.0,
1.0,
3.0,
1.0,
1.0,
1.0,
1.0,
1.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Scalar_tensor_shape) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(TensorShape({}), {42.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({14}));
test::FillValues<double>(&expected,
{1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
42.0,
42.0,
42.0,
0.0,
static_cast<double>(DT_FLOAT),
0.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Int16Success) {
TF_ASSERT_OK(Init(DT_INT16));
AddInputFromArray<int16>(TensorShape({4, 1}), {-1, -3, 3, 7});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(&expected,
{1.0,
4.0,
0.0,
0.0,
2.0,
0.0,
2.0,
0.0,
-3.0,
7.0,
1.5,
14.75,
static_cast<double>(DT_INT16),
2.0,
4.0, 1.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Int32Success) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {0, 0, -1, 3, 3, 7});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(
&expected,
{1.0,
6.0,
0.0,
0.0,
1.0,
2.0,
3.0,
0.0,
-1.0,
7.0,
2.0,
7.33333333333,
static_cast<double>(DT_INT32),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, Int64Success) {
TF_ASSERT_OK(Init(DT_INT64));
AddInputFromArray<int64_t>(TensorShape({2, 2, 2}), {0, 0, -1, 3, 3, 7, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({17}));
test::FillValues<double>(&expected,
{1.0,
8.0,
0.0,
0.0,
1.0,
4.0,
3.0,
0.0,
-1.0,
7.0,
1.5,
6.25,
static_cast<double>(DT_INT64),
3.0,
2.0, 2.0, 2.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, UInt8Success) {
TF_ASSERT_OK(Init(DT_UINT8));
AddInputFromArray<uint8>(TensorShape({1, 5}), {0, 10, 30, 30, 70});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(&expected,
{1.0,
5.0,
0.0,
0.0,
0.0,
1.0,
4.0,
0.0,
0.0,
70.0,
28.0,
576.0,
static_cast<double>(DT_UINT8),
2.0,
1.0, 5.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, BoolSuccess) {
TF_ASSERT_OK(Init(DT_BOOL));
AddInputFromArray<bool>(TensorShape({2, 3}),
{false, false, true, true, true, false});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(&expected,
{1.0,
6.0,
0.0,
0.0,
0.0,
3.0,
3.0,
0.0,
0.0,
1.0,
0.5,
0.25,
static_cast<double>(DT_BOOL),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
#if defined(PLATFORM_GOOGLE)
TEST_F(DebugNumericSummaryOpTest, DisabledDueToEmptyEnabledSet) {
ClearEnabledWatchKeys();
std::vector<string> debug_urls({"grpc:
TF_ASSERT_OK(InitGated(DT_FLOAT, debug_urls));
AddInputFromArray<float>(TensorShape({2, 2}), {1.0, 3.0, 3.0, 7.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_disabled(allocator(), DT_DOUBLE, TensorShape({0}));
test::ExpectTensorNear<double>(expected_disabled, *GetOutput(0), 1e-8);
}
TEST_F(DebugNumericSummaryOpTest, DisabledDueToNonMatchingWatchKey) {
ClearEnabledWatchKeys();
DebugGrpcIO::SetDebugNodeKeyGrpcState(
"grpc:
EventReply::DebugOpStateChange::READ_ONLY);
std::vector<string> debug_urls({"grpc:
TF_ASSERT_OK(InitGated(DT_FLOAT, debug_urls));
AddInputFromArray<float>(TensorShape({2, 2}), {1.0, 3.0, 3.0, 7.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_disabled(allocator(), DT_DOUBLE, TensorShape({0}));
test::ExpectTensorNear<double>(expected_disabled, *GetOutput(0), 1e-8);
}
#endif
class DebugNumericSummaryOpCustomLowerBoundTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("lower_bound", -1.2f)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(DebugNumericSummaryOpCustomLowerBoundTest, Float_full_house) {
TF_ASSERT_OK(Init(DT_FLOAT));
AddInputFromArray<float>(
TensorShape({18}),
{std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(), 0.0f, 0.0f, 0.0f, -1.0f, -3.0f,
3.0f, 7.0f, -std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({15}));
test::FillValues<double>(
&expected,
{1.0,
18.0,
4.0,
3.0,
1.0,
3.0,
2.0,
5.0,
-3.0,
7.0,
0.85714285714,
8.97959183673,
static_cast<double>(DT_FLOAT),
1.0,
18.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
class DebugNumericSummaryOpCustomLowerUpperBoundsTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "DebugNumericSummary")
.Input(FakeInput(input_type))
.Attr("tensor_name", "FakeTensor:0")
.Attr("lower_bound", -0.5f)
.Attr("upper_bound", 3.6f)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(DebugNumericSummaryOpCustomLowerUpperBoundsTest, Int32Success) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {0, 0, -1, 3, 3, 7});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({16}));
test::FillValues<double>(
&expected,
{1.0,
6.0,
0.0,
1.0,
0.0,
2.0,
2.0,
1.0,
-1.0,
7.0,
2.0,
7.33333333333,
static_cast<double>(DT_INT32),
2.0,
2.0, 3.0});
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
}
} |
1,504 | cpp | tensorflow/tensorflow | xent_op | tensorflow/core/kernels/xent_op.cc | tensorflow/core/kernels/xent_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_XENT_OP_H_
#define TENSORFLOW_CORE_KERNELS_XENT_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct XentFunctor {
void operator()(const Device &d,
const Eigen::DSizes<Eigen::DenseIndex, 2> &shape,
const Eigen::array<Eigen::DenseIndex, 2> &logits_bcast,
const Eigen::array<Eigen::DenseIndex, 2> &labels_bcast,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<T>::ConstMatrix labels,
typename TTypes<T>::Matrix scratch,
typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop);
};
template <typename Device, typename T>
struct XentEigenImpl {
static void Compute(const Device &d,
const Eigen::DSizes<Eigen::DenseIndex, 2> &shape,
const Eigen::array<Eigen::DenseIndex, 2> &logits_bcast,
const Eigen::array<Eigen::DenseIndex, 2> &labels_bcast,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<T>::ConstMatrix labels,
typename TTypes<T>::Matrix scratch,
typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop) {
const int kBatchDim = 0;
const int kClassDim = 1;
const int batch_size = shape[kBatchDim];
const int num_classes = shape[kClassDim];
Eigen::IndexList<Eigen::type2index<kClassDim> > along_class;
Eigen::IndexList<int, Eigen::type2index<1> > batch_by_one;
batch_by_one.set(0, batch_size);
Eigen::IndexList<int> batch_only;
batch_only.set(0, batch_size);
Eigen::IndexList<Eigen::type2index<1>, int> one_by_class;
one_by_class.set(1, num_classes);
scratch.reshape(batch_only).device(d) =
logits.broadcast(logits_bcast).maximum(along_class);
backprop.device(d) =
logits.broadcast(logits_bcast) - scratch.broadcast(one_by_class);
scratch.reshape(batch_only).device(d) = backprop.exp().sum(along_class);
loss.device(d) = (labels.broadcast(labels_bcast) *
(scratch.log().eval().broadcast(one_by_class) - backprop))
.eval()
.sum(along_class);
backprop.device(d) = (backprop.exp() / scratch.broadcast(one_by_class)) -
labels.broadcast(labels_bcast);
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/xent_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class SoftmaxXentWithLogitsOp : public OpKernel {
public:
explicit SoftmaxXentWithLogitsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& logits_in = context->input(0);
const Tensor& labels_in = context->input(1);
TensorShape shape_in = logits_in.shape();
BCast bcast(BCast::FromShape(logits_in.shape()),
BCast::FromShape(labels_in.shape()),
false);
if (!logits_in.IsSameSize(labels_in)) {
OP_REQUIRES(context, bcast.IsValid(),
errors::InvalidArgument(
"logits and labels must be broadcastable: logits_size=",
logits_in.shape().DebugString(),
" labels_size=", labels_in.shape().DebugString()));
shape_in = BCast::ToShape(bcast.output_shape());
}
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(shape_in),
errors::InvalidArgument("logits and labels must be either "
"2-dimensional, or broadcasted to be "
"2-dimensional"));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(context, !OpDeterminismRequired(),
errors::Unimplemented(
"The GPU implementation of SoftmaxCrossEntropyWithLogits"
" that would have been executed is not deterministic."
" Note that the Python API uses an alternative,"
" deterministic, GPU-accelerated path when determinism is"
" enabled."));
}
Tensor scratch;
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({shape_in.dim_size(0), 1}),
&scratch));
Tensor* loss_out = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(
0, TensorShape({shape_in.dim_size(0)}), &loss_out));
Tensor* back_out = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 1, shape_in, &back_out));
if (shape_in.dim_size(0) > 0) {
functor::XentFunctor<Device, T> functor;
functor(context->eigen_device<Device>(), shape_in.AsEigenDSizes<2>(),
BCast::ToIndexArray<2>(bcast.x_bcast()),
BCast::ToIndexArray<2>(bcast.y_bcast()),
logits_in.template shaped<T, 2>(bcast.x_reshape()),
labels_in.template shaped<T, 2>(bcast.y_reshape()),
scratch.matrix<T>(), loss_out->vec<T>(), back_out->matrix<T>());
}
}
};
namespace functor {
template <typename Device, typename T>
struct XentFunctorBase {
void operator()(const Device& d,
const Eigen::DSizes<Eigen::DenseIndex, 2>& shape,
const Eigen::array<Eigen::DenseIndex, 2>& logits_bcast,
const Eigen::array<Eigen::DenseIndex, 2>& labels_bcast,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<T>::ConstMatrix labels,
typename TTypes<T>::Matrix scratch,
typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop) {
if (shape[0] > 0) {
XentEigenImpl<Device, T>::Compute(d, shape, logits_bcast, labels_bcast,
logits, labels, scratch, loss,
backprop);
}
}
};
template <typename T>
struct XentFunctor<CPUDevice, T> : XentFunctorBase<CPUDevice, T> {};
}
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER(Name("SoftmaxCrossEntropyWithLogits") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T"), \
SoftmaxXentWithLogitsOp<CPUDevice, T>);
TF_CALL_half(REGISTER_CPU);
TF_CALL_float(REGISTER_CPU);
TF_CALL_double(REGISTER_CPU);
TF_CALL_bfloat16(REGISTER_CPU);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER(Name("SoftmaxCrossEntropyWithLogits") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T"), \
SoftmaxXentWithLogitsOp<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU);
#endif
} | #include "tensorflow/core/kernels/xent_op.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <class T>
static Graph* Xent(int batch_size, int num_classes, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor logits(type, TensorShape({batch_size, num_classes}));
logits.flat<T>().setRandom();
Tensor labels(type, TensorShape({batch_size, num_classes}));
labels.flat<T>().setRandom();
test::graph::Binary(g, "SoftmaxCrossEntropyWithLogits",
test::graph::Constant(g, logits),
test::graph::Constant(g, labels));
return g;
}
#define BM_XentDev(BATCH, CLASS, DEVICE, C_TYPE, TF_TYPE) \
static void BM_Xent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Xent<C_TYPE>(BATCH, CLASS, TF_TYPE), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * BATCH * CLASS; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_Xent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE)->UseRealTime()
#ifdef GOOGLE_CUDA
BM_XentDev(16, 10000, gpu, float, DT_FLOAT);
BM_XentDev(16, 30000, gpu, float, DT_FLOAT);
BM_XentDev(16, 100000, gpu, float, DT_FLOAT);
BM_XentDev(32, 10000, gpu, float, DT_FLOAT);
BM_XentDev(32, 30000, gpu, float, DT_FLOAT);
BM_XentDev(32, 100000, gpu, float, DT_FLOAT);
BM_XentDev(64, 10000, gpu, float, DT_FLOAT);
BM_XentDev(64, 30000, gpu, float, DT_FLOAT);
BM_XentDev(64, 100000, gpu, float, DT_FLOAT);
#endif
#define BM_XentDev_CPU(C_TYPE, TF_TYPE) \
BM_XentDev(1, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(2, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(4, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(8, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(16, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(32, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(64, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(128, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(256, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(512, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(1024, 10000, cpu, C_TYPE, TF_TYPE)
BM_XentDev_CPU(float, DT_FLOAT);
BM_XentDev_CPU(bfloat16, DT_BFLOAT16);
} |
1,505 | cpp | tensorflow/tensorflow | gather_nd_op | tensorflow/core/kernels/gather_nd_op.cc | tensorflow/core/kernels/gather_nd_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_H_
#define TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bad_indices_policy.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
class OpKernelContext;
class Tensor;
namespace functor {
template <typename Device, typename T, typename Index, int IXDIM>
struct GatherNdSlice {
Index operator()(const Device& d, const Index slice_size,
typename TTypes<int32>::Scalar Tscratch,
typename TTypes<T, IXDIM + 1>::ConstTensor Tparams,
typename TTypes<Index>::ConstMatrix Tindices,
typename TTypes<T>::Matrix Tout);
};
template <typename Device, typename T, typename Index>
Status DoGatherNd(
OpKernelContext* c, const Tensor& params, const Tensor& indices,
Tensor* out,
BadIndicesPolicy bad_indices_policy = BadIndicesPolicy::kDefault) {
if (!TensorShapeUtils::IsVectorOrHigher(params.shape())) {
return errors::InvalidArgument("params must be at least a vector");
}
if (!TensorShapeUtils::IsVectorOrHigher(indices.shape())) {
return errors::InvalidArgument("indices must be at least a vector");
}
if (indices.dim_size(indices.dims() - 1) > params.dims()) {
return errors::InvalidArgument(
"index innermost dimension length must be <= params rank; saw: ",
indices.dim_size(indices.dims() - 1), " vs. ", params.dims());
}
const TensorShape& indices_shape(indices.shape());
const int64_t indices_nd = indices_shape.dim_size(indices_shape.dims() - 1);
int64_t N_big = 1;
for (int i = 0; i < indices_shape.dims() - 1; ++i) {
N_big *= indices_shape.dim_size(i);
}
if (N_big > std::numeric_limits<int>::max()) {
return errors::InvalidArgument(
"indices has too many elements for int indexing: ", N_big, " > ",
std::numeric_limits<int>::max());
}
if (params.NumElements() > std::numeric_limits<Index>::max()) {
return errors::InvalidArgument("params.NumElements() too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params.NumElements(), " > ",
std::numeric_limits<Index>::max());
}
Index N_result = 1;
for (int i = 0; i < indices_shape.dims() - 1; ++i) {
N_result *= indices_shape.dim_size(i);
}
const TensorShape& params_shape(params.shape());
Index total_nd = params_shape.dims();
TensorShape result_shape(indices_shape);
result_shape.RemoveLastDims(1);
int64_t slice_size_big = 1;
for (Index i = indices_nd; i < total_nd; ++i) {
slice_size_big *= params_shape.dim_size(i);
TF_RETURN_IF_ERROR(result_shape.AddDimWithStatus(params_shape.dim_size(i)));
}
if (slice_size_big > std::numeric_limits<Index>::max()) {
return errors::InvalidArgument(
"slice size is too large for indexing: ", slice_size_big, " > ",
std::numeric_limits<Index>::max());
}
const Index slice_size = static_cast<Index>(slice_size_big);
TF_RETURN_IF_ERROR(
c->allocate_temp(DataTypeToEnum<T>::value, result_shape, out));
if (N_result > 0) {
if (params_shape.num_elements() == 0) {
return errors::InvalidArgument(
"Requested more than 0 entries, but "
"params is empty. Params shape: ",
params_shape.DebugString());
}
auto indices_mat = indices.flat_inner_dims<Index>();
Index bad_i = -1;
auto out_mat = out->shaped<T, 2>({N_result, slice_size});
Tensor scratch;
TF_RETURN_IF_ERROR(c->allocate_temp(DT_INT32, TensorShape(), &scratch));
auto scratch_scalar = scratch.scalar<int32>();
switch (indices_nd) {
#define PARAMS_CASE(IXDIM) \
case IXDIM: { \
functor::GatherNdSlice<Device, T, Index, IXDIM> func; \
auto params_flat = params.flat_outer_dims<T, IXDIM + 1>(); \
bad_i = func(c->eigen_device<Device>(), slice_size, scratch_scalar, \
params_flat, indices_mat, out_mat); \
} break
PARAMS_CASE(0);
PARAMS_CASE(1);
PARAMS_CASE(2);
PARAMS_CASE(3);
PARAMS_CASE(4);
PARAMS_CASE(5);
PARAMS_CASE(6);
PARAMS_CASE(7);
#undef PARAMS_CASE
default:
return errors::InvalidArgument(
"Only indices.shape[-1] values between 1 and 7 "
"are currently supported. Requested rank: ",
indices_nd);
}
using CPUDevice = Eigen::ThreadPoolDevice;
const bool check_bad_indices =
((std::is_same<Device, CPUDevice>::value &&
bad_indices_policy == BadIndicesPolicy::kDefault) ||
bad_indices_policy == BadIndicesPolicy::kError);
if (check_bad_indices && bad_i >= 0) {
auto shape = indices.shape();
shape.RemoveLastDims(1);
return errors::InvalidArgument(
"indices", SliceDebugString(shape, bad_i), " = [",
str_util::Join(
gtl::ArraySlice<Index>(&indices_mat(bad_i, 0), indices_nd), ", "),
"] does not index into param shape ", params.shape().DebugString(),
", node name: ", c->op_kernel().name());
}
}
return absl::OkStatus();
}
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/gather_nd_op.h"
#include <string>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bad_indices_policy.h"
namespace tensorflow {
namespace {
constexpr char kBadIndicesPolicyAtrr[] = "bad_indices_policy";
}
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename Index>
class GatherNdOp : public OpKernel {
public:
explicit GatherNdOp(OpKernelConstruction* c) : OpKernel(c) {
const DataType dt = DataTypeToEnum<T>::v();
const DataType index_t = DataTypeToEnum<Index>::v();
OP_REQUIRES_OK(c, c->MatchSignature({dt, index_t}, {dt}));
if (c->HasAttr(kBadIndicesPolicyAtrr)) {
std::string bad_indices_policy_str;
OP_REQUIRES_OK(
c, c->GetAttr(kBadIndicesPolicyAtrr, &bad_indices_policy_str));
absl::StatusOr<BadIndicesPolicy> bad_indices_policy =
BadIndicesPolicyFromString(bad_indices_policy_str);
OP_REQUIRES_OK(c, bad_indices_policy.status());
bad_indices_policy_ = *bad_indices_policy;
}
}
void Compute(OpKernelContext* c) override {
const Tensor& params = c->input(0);
const Tensor& indices = c->input(1);
Tensor out;
OP_REQUIRES_OK(c, functor::DoGatherNd<Device, T, Index>(
c, params, indices, &out, bad_indices_policy_));
c->set_output(0, out);
}
private:
BadIndicesPolicy bad_indices_policy_ = BadIndicesPolicy::kDefault;
};
#define REGISTER_GATHER_ND_FULL(dev, type, index_type) \
REGISTER_KERNEL_BUILDER( \
Name("GatherNd") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("Tparams") \
.TypeConstraint<index_type>("Tindices") \
.AttrConstraint<std::string>( \
"bad_indices_policy", \
{"", "DEFAULT", "ERROR", "IGNORE"}), \
GatherNdOp<dev##Device, type, index_type>)
#define REGISTER_GATHER_ND_CPU(type) \
REGISTER_GATHER_ND_FULL(CPU, type, int16); \
REGISTER_GATHER_ND_FULL(CPU, type, int32); \
REGISTER_GATHER_ND_FULL(CPU, type, int64_t)
TF_CALL_ALL_TYPES(REGISTER_GATHER_ND_CPU);
TF_CALL_QUANTIZED_TYPES(REGISTER_GATHER_ND_CPU);
TF_CALL_float8_e5m2(REGISTER_GATHER_ND_CPU);
TF_CALL_float8_e4m3fn(REGISTER_GATHER_ND_CPU);
#undef REGISTER_GATHER_ND_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, NDIM) \
template <> \
Index GatherNdSlice<GPUDevice, T, Index, NDIM>::operator()( \
const GPUDevice& d, const Index slice_size, \
typename TTypes<int32>::Scalar Tscratch, \
typename TTypes<T, NDIM + 1>::ConstTensor Tparams, \
typename TTypes<Index>::ConstMatrix Tindices, \
typename TTypes<T>::Matrix Tout); \
extern template struct GatherNdSlice<GPUDevice, T, Index, NDIM>;
#define DECLARE_GPU_SPECS_INDEX(T, Index) \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 0); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 1); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 2); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 3); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 4); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 5); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 6); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 7);
#define DECLARE_GPU_SPECS(T) \
DECLARE_GPU_SPECS_INDEX(T, int32); \
DECLARE_GPU_SPECS_INDEX(T, int64_t)
TF_CALL_int32(DECLARE_GPU_SPECS);
TF_CALL_int64(DECLARE_GPU_SPECS);
TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPECS);
TF_CALL_COMPLEX_TYPES(DECLARE_GPU_SPECS);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_INDEX
}
#undef REGISTER_GATHER_ND_FULL
#define REGISTER_GATHER_ND_FULL(dev, type, index_type) \
REGISTER_KERNEL_BUILDER( \
Name("GatherNd") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("Tparams") \
.TypeConstraint<index_type>("Tindices") \
.AttrConstraint<std::string>("bad_indices_policy", \
{"", "DEFAULT", "IGNORE"}), \
GatherNdOp<dev##Device, type, index_type>)
#define REGISTER_GATHER_ND_GPU(type) \
REGISTER_GATHER_ND_FULL(GPU, type, int32); \
REGISTER_GATHER_ND_FULL(GPU, type, int64_t)
TF_CALL_int32(REGISTER_GATHER_ND_GPU);
TF_CALL_int64(REGISTER_GATHER_ND_GPU);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GATHER_ND_GPU);
TF_CALL_COMPLEX_TYPES(REGISTER_GATHER_ND_GPU);
#undef REGISTER_GATHER_ND_GPU
#endif
#undef REGISTER_GATHER_ND_FULL
} | #include <functional>
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace test {
namespace graph {
class Node* GatherNd(Graph* g, class Node* in0, class Node* in1) {
class Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "GatherNd")
.Input(in0)
.Input(in1)
.Finalize(g, &ret));
return ret;
}
}
}
namespace {
class GatherNdOpTest : public OpsTestBase {
protected:
void MakeOp(DataType param_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(param_type))
.Input(FakeInput(index_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(GatherNdOpTest, Simple) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected, {8, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(GatherNdOpTest, Error_OutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 5});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.message(), "indices[1] = [5] does not index into param shape [5]"))
<< s.message();
}
TEST_F(GatherNdOpTest, Quantized_UINT8) {
MakeOp(DT_QUINT8, DT_INT32);
AddInputFromArray<quint8>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({2}));
test::FillValues<quint8>(&expected, {8, 4});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
}
TEST_F(GatherNdOpTest, Quantized_INT8) {
MakeOp(DT_QINT8, DT_INT32);
AddInputFromArray<qint8>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({2}));
test::FillValues<qint8>(&expected, {8, 4});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
}
class GatherNdOpIgnoreBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType param_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(param_type))
.Input(FakeInput(index_type))
.Attr("bad_indices_policy", "IGNORE")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(GatherNdOpIgnoreBadIndicesTest, IgnoreOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {9, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({3, 1}), {3, 5, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {8, 0, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class GatherNdOpConstructionTest : public OpsTestBase {};
TEST_F(GatherNdOpConstructionTest, Error_BadIndicesPolicyInvalid) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY")
.Finalize(node_def()));
EXPECT_NE(InitOp(), absl::OkStatus());
}
constexpr int kLookups = 2000;
template <typename Index>
static Graph* GatherNd(int dim) {
Graph* g = new Graph(OpRegistry::Global());
Tensor params(DT_FLOAT, TensorShape({dim, 8, 16, 32}));
params.flat<float>().setRandom();
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
Tensor indices(DataTypeToEnum<Index>::value, TensorShape({kLookups, 4}));
auto indices_mat = indices.matrix<Index>();
for (int i = 0; i < kLookups; i++) {
indices_mat(i, 0) = rnd.Uniform(dim);
indices_mat(i, 1) = rnd.Uniform(8);
indices_mat(i, 2) = rnd.Uniform(16);
indices_mat(i, 3) = rnd.Uniform(32);
}
test::graph::GatherNd(g, test::graph::Constant(g, params),
test::graph::Constant(g, indices));
return g;
}
#define BM_GATHER_ND(DEVICE, INDEX) \
static void BM_##DEVICE##_gather_nd_##INDEX( \
::testing::benchmark::State& state) { \
const int dim = state.range(0); \
test::Benchmark(#DEVICE, GatherNd<INDEX>(dim), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * kLookups * 4; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_gather_nd_##INDEX) \
->UseRealTime() \
->Arg(10) \
->Arg(100) \
->Arg(1000) \
->Arg(10000)
BM_GATHER_ND(cpu, int32);
BM_GATHER_ND(gpu, int32);
BM_GATHER_ND(cpu, int64_t);
BM_GATHER_ND(gpu, int64_t);
}
} |
1,506 | cpp | tensorflow/tensorflow | fused_batch_norm_op | tensorflow/core/kernels/fused_batch_norm_op.cc | tensorflow/core/kernels/fused_batch_norm_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_FUSED_BATCH_NORM_OP_H_
#define TENSORFLOW_CORE_KERNELS_FUSED_BATCH_NORM_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace functor {
enum class FusedBatchNormActivationMode { kIdentity, kRelu };
std::string ToString(FusedBatchNormActivationMode activation_mode);
Status ParseActivationMode(OpKernelConstruction* context,
FusedBatchNormActivationMode* activation_mode);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename Device, typename T, typename U>
struct FusedBatchNormInferenceFunctor {
void operator()(OpKernelContext* context, TensorFormat tensor_format,
typename TTypes<T, 4>::ConstTensor in,
typename TTypes<U>::ConstVec scale,
typename TTypes<U>::ConstVec offset,
typename TTypes<U>::ConstVec estimated_mean,
typename TTypes<U>::ConstVec estimated_variance,
typename TTypes<T, 4>::ConstTensor side_input, U epsilon,
FusedBatchNormActivationMode activation_mode,
typename TTypes<T, 4>::Tensor out);
};
#endif
template <typename Device, typename T, typename U>
struct FusedBatchNormFreezeGrad {
void operator()(OpKernelContext* context, const Tensor& y_backprop_input,
const Tensor& x_input, const Tensor& scale_input,
const Tensor& pop_mean_input,
const Tensor& pop_variance_input, U epsilon,
Tensor* x_backprop_output, Tensor* scale_backprop_output,
Tensor* offset_backprop_output) {}
};
}
}
#endif
#include <array>
#include <atomic>
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
#include "tensorflow/core/kernels/conv_2d.h"
#include "tensorflow/core/kernels/gpu_utils.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/util/stream_executor_util.h"
#endif
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/cast_op.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/fused_batch_norm_op.h"
#include "tensorflow/core/kernels/redux_functor.h"
#include "tensorflow/core/kernels/transpose_functor.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
namespace functor {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
using se::DeviceMemory;
using se::ScratchAllocator;
using se::Stream;
using tsl::StatusOr;
#endif
string ToString(FusedBatchNormActivationMode activation_mode) {
switch (activation_mode) {
case FusedBatchNormActivationMode::kIdentity:
return "Identity";
case FusedBatchNormActivationMode::kRelu:
return "Relu";
}
}
Status ParseActivationMode(OpKernelConstruction* context,
FusedBatchNormActivationMode* activation_mode) {
string activation_mode_str;
TF_RETURN_IF_ERROR(context->GetAttr("activation_mode", &activation_mode_str));
if (activation_mode_str == "Identity") {
*activation_mode = FusedBatchNormActivationMode::kIdentity;
return absl::OkStatus();
}
if (activation_mode_str == "Relu") {
*activation_mode = FusedBatchNormActivationMode::kRelu;
return absl::OkStatus();
}
return errors::InvalidArgument("Unsupported activation mode: ",
activation_mode_str);
}
template <typename Device, typename T, typename U, bool is_training>
struct FusedBatchNorm;
template <typename Device, typename T, typename U>
struct FusedBatchNormGrad;
template <typename T, typename U>
struct FusedBatchNorm<CPUDevice, T, U, true> {
void operator()(OpKernelContext* context, const Tensor& x_input,
const Tensor& scale_input, const Tensor& offset_input,
const Tensor& running_mean_input,
const Tensor& running_variance_input,
const Tensor* side_input, U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode,
Tensor* y_output, Tensor* running_mean_output,
Tensor* running_var_output, Tensor* saved_batch_mean_output,
Tensor* saved_batch_var_output, TensorFormat tensor_format,
bool use_reserved_space) {
OP_REQUIRES(context, side_input == nullptr,
errors::Internal(
"The CPU implementation of FusedBatchNorm does not support "
"side input."));
OP_REQUIRES(context,
activation_mode == FusedBatchNormActivationMode::kIdentity,
errors::Internal("The CPU implementation of FusedBatchNorm "
"does not support activations."));
if (use_reserved_space) {
Tensor* dummy_reserve_space = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(5, {}, &dummy_reserve_space));
dummy_reserve_space->flat<U>()(0) = U();
}
if (x_input.shape().num_elements() == 0) {
functor::SetNanFunctor<CPUDevice, U> f;
f(context->eigen_device<CPUDevice>(), running_mean_output->flat<U>());
f(context->eigen_device<CPUDevice>(), running_var_output->flat<U>());
return;
}
Tensor transformed_x;
Tensor transformed_y;
if (tensor_format == FORMAT_NCHW) {
const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N');
const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H');
const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W');
const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C');
TensorShape transformed_x_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_x_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_shape, &transformed_x));
TensorShape transformed_y_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_y_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_y_shape, &transformed_y));
std::array<int32, 4> perm = {0, 2, 3, 1};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
x_input, perm, &transformed_x));
} else {
transformed_x = x_input;
transformed_y = *y_output;
}
typename TTypes<T, 4>::Tensor x(transformed_x.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec offset(offset_input.vec<U>());
typename TTypes<U>::ConstVec old_mean(running_mean_input.vec<U>());
typename TTypes<U>::ConstVec old_variance(running_variance_input.vec<U>());
typename TTypes<T, 4>::Tensor y(transformed_y.tensor<T, 4>());
typename TTypes<U>::Vec new_mean(running_mean_output->vec<U>());
typename TTypes<U>::Vec new_variance(running_var_output->vec<U>());
typename TTypes<U>::Vec saved_batch_mean(saved_batch_mean_output->vec<U>());
typename TTypes<U>::Vec saved_batch_var(saved_batch_var_output->vec<U>());
const CPUDevice& d = context->eigen_device<CPUDevice>();
const int depth = x.dimension(3);
const int size = x.size();
const int rest_size = size / depth;
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::type2index<0>> reduce_dims;
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec;
bcast_spec.set(0, rest_size);
auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>();
const int rest_size_minus_one = (rest_size > 1) ? (rest_size - 1) : 1;
U rest_size_inv = static_cast<U>(1.0f / static_cast<U>(rest_size));
U rest_size_adjust =
static_cast<U>(rest_size) / static_cast<U>(rest_size_minus_one);
Eigen::Tensor<U, 1, Eigen::RowMajor> batch_mean(depth);
Eigen::Tensor<U, 1, Eigen::RowMajor> batch_variance(depth);
batch_mean.device(d) = (x_rest_by_depth.sum(reduce_dims) * rest_size_inv);
auto x_centered = x_rest_by_depth -
batch_mean.reshape(one_by_depth).broadcast(bcast_spec);
batch_variance.device(d) =
x_centered.square().sum(reduce_dims) * rest_size_inv;
auto scaling_factor = ((batch_variance + epsilon).rsqrt() * scale)
.eval()
.reshape(one_by_depth)
.broadcast(bcast_spec);
auto x_scaled = x_centered * scaling_factor;
auto x_shifted =
(x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec))
.template cast<T>();
y.reshape(rest_by_depth).device(d) = x_shifted;
if (exponential_avg_factor == U(1.0)) {
saved_batch_var.device(d) = batch_variance;
saved_batch_mean.device(d) = batch_mean;
new_variance.device(d) = batch_variance * rest_size_adjust;
new_mean.device(d) = batch_mean;
} else {
U one_minus_factor = U(1) - exponential_avg_factor;
saved_batch_var.device(d) = batch_variance;
saved_batch_mean.device(d) = batch_mean;
new_variance.device(d) =
one_minus_factor * old_variance +
(exponential_avg_factor * rest_size_adjust) * batch_variance;
new_mean.device(d) =
one_minus_factor * old_mean + exponential_avg_factor * batch_mean;
}
if (tensor_format == FORMAT_NCHW) {
const std::array<int32, 4> perm = {0, 3, 1, 2};
const Status s = ::tensorflow::DoTranspose(
context->eigen_device<CPUDevice>(), transformed_y, perm, y_output);
if (!s.ok()) {
context->SetStatus(errors::InvalidArgument("Transpose failed: ", s));
}
}
}
};
template <typename T, typename U>
struct FusedBatchNorm<CPUDevice, T, U, false> {
void operator()(OpKernelContext* context, const Tensor& x_input,
const Tensor& scale_input, const Tensor& offset_input,
const Tensor& estimated_mean_input,
const Tensor& estimated_variance_input,
const Tensor* side_input, U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode,
Tensor* y_output, Tensor* batch_mean_output,
Tensor* batch_var_output, Tensor* saved_mean_output,
Tensor* saved_var_output, TensorFormat tensor_format,
bool use_reserved_space) {
OP_REQUIRES(context, side_input == nullptr,
errors::Internal(
"The CPU implementation of FusedBatchNorm does not support "
"side input."));
OP_REQUIRES(context,
activation_mode == FusedBatchNormActivationMode::kIdentity,
errors::Internal("The CPU implementation of FusedBatchNorm "
"does not support activations."));
if (use_reserved_space) {
Tensor* dummy_reserve_space = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(5, {}, &dummy_reserve_space));
dummy_reserve_space->flat<U>()(0) = U();
}
if (x_input.shape().num_elements() == 0) {
functor::SetNanFunctor<CPUDevice, U> f;
f(context->eigen_device<CPUDevice>(), batch_mean_output->flat<U>());
f(context->eigen_device<CPUDevice>(), batch_var_output->flat<U>());
return;
}
Tensor transformed_x;
Tensor transformed_y;
if (tensor_format == FORMAT_NCHW) {
const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N');
const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H');
const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W');
const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C');
TensorShape transformed_x_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_x_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_shape, &transformed_x));
TensorShape transformed_y_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_y_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_y_shape, &transformed_y));
std::array<int32, 4> perm = {0, 2, 3, 1};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
x_input, perm, &transformed_x));
} else {
transformed_x = x_input;
transformed_y = *y_output;
}
typename TTypes<T, 4>::Tensor x(transformed_x.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec offset(offset_input.vec<U>());
typename TTypes<U>::ConstVec estimated_mean(estimated_mean_input.vec<U>());
typename TTypes<U>::ConstVec estimated_variance(
estimated_variance_input.vec<U>());
typename TTypes<T, 4>::Tensor y(transformed_y.tensor<T, 4>());
typename TTypes<U>::Vec batch_mean(batch_mean_output->vec<U>());
typename TTypes<U>::Vec batch_variance(batch_var_output->vec<U>());
const CPUDevice& d = context->eigen_device<CPUDevice>();
const int depth = x.dimension(3);
OP_REQUIRES(
context, depth != 0,
errors::Internal("The 4th element in the input shape cannot be 0."));
const int size = x.size();
const int rest_size = size / depth;
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec;
bcast_spec.set(0, rest_size);
auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>();
auto x_centered =
x_rest_by_depth -
estimated_mean.reshape(one_by_depth).broadcast(bcast_spec);
auto scaling_factor = ((estimated_variance + epsilon).rsqrt() * scale)
.eval()
.reshape(one_by_depth)
.broadcast(bcast_spec);
auto x_scaled = x_centered * scaling_factor;
auto x_shifted =
(x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec))
.template cast<T>();
y.reshape(rest_by_depth).device(d) = x_shifted;
batch_mean.device(d) = estimated_mean;
batch_variance.device(d) = estimated_variance;
if (tensor_format == FORMAT_NCHW) {
const std::array<int32, 4> perm = {0, 3, 1, 2};
const Status s = ::tensorflow::DoTranspose(
context->eigen_device<CPUDevice>(), transformed_y, perm, y_output);
if (!s.ok()) {
context->SetStatus(errors::InvalidArgument("Transpose failed: ", s));
}
}
}
};
template <typename T, typename U>
struct FusedBatchNormGrad<CPUDevice, T, U> {
void operator()(OpKernelContext* context, const Tensor& y_backprop_input,
const Tensor& x_input, const Tensor& scale_input,
const Tensor* offset_input, const Tensor& mean_input,
const Tensor& variance_input, const Tensor* y_input,
U epsilon, FusedBatchNormActivationMode activation_mode,
Tensor* x_backprop_output, Tensor* scale_backprop_output,
Tensor* offset_backprop_output,
Tensor* side_input_backprop_output, bool use_reserved_space,
TensorFormat tensor_format) {
OP_REQUIRES(context,
y_input == nullptr &&
activation_mode == FusedBatchNormActivationMode::kIdentity,
errors::Internal(
"The CPU implementation of FusedBatchNormGrad does not "
"support activations."));
OP_REQUIRES(context, side_input_backprop_output == nullptr,
errors::Internal("The CPU implementation of FusedBatchNormGrad "
"does not support side input."));
Tensor transformed_y_backprop_input;
Tensor transformed_x_input;
Tensor transformed_x_backprop_output;
if (tensor_format == FORMAT_NCHW) {
const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N');
const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H');
const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W');
const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C');
TensorShape transformed_y_backprop_input_shape;
OP_REQUIRES_OK(context,
ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths,
&transformed_y_backprop_input_shape));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::value,
transformed_y_backprop_input_shape,
&transformed_y_backprop_input));
TensorShape transformed_x_input_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_x_input_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_input_shape,
&transformed_x_input));
TensorShape transformed_x_backprop_output_shape;
OP_REQUIRES_OK(context,
ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths,
&transformed_x_backprop_output_shape));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_backprop_output_shape,
&transformed_x_backprop_output));
std::array<int32, 4> perm = {0, 2, 3, 1};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
y_backprop_input, perm,
&transformed_y_backprop_input));
OP_REQUIRES_OK(context, ::tensorflow::DoTranspose(
context->eigen_device<CPUDevice>(), x_input,
perm, &transformed_x_input));
} else {
transformed_y_backprop_input = y_backprop_input;
transformed_x_input = x_input;
transformed_x_backprop_output = *x_backprop_output;
}
typename TTypes<T, 4>::Tensor y_backprop(
transformed_y_backprop_input.tensor<T, 4>());
typename TTypes<T, 4>::Tensor x(transformed_x_input.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec mean(mean_input.vec<U>());
typename TTypes<U>::ConstVec variance(variance_input.vec<U>());
typename TTypes<T, 4>::Tensor x_backprop(
transformed_x_backprop_output.tensor<T, 4>());
typename TTypes<U>::Vec offset_backprop(offset_backprop_output->vec<U>());
const CPUDevice& d = context->eigen_device<CPUDevice>();
const int depth = x.dimension(3);
const int size = x.size();
const int rest_size = size / depth;
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec;
bcast_spec.set(0, rest_size);
auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>();
U rest_size_inv = static_cast<U>(1.0f / static_cast<U>(rest_size));
using ScalarSum = Eigen::internal::scalar_sum_op<U>;
const functor::ReduceOuterDimensions<T, U, U, ScalarSum> redux_sum_t;
const functor::ReduceOuterDimensions<U, U, U, ScalarSum> redux_sum_u;
auto scratch_dtype = DataTypeToEnum<U>::value;
Tensor scratch_one_by_depth;
OP_REQUIRES_OK(context, context->allocate_temp(scratch_dtype, {depth},
&scratch_one_by_depth));
Tensor scratch_rest_by_depth;
if (std::is_same<T, U>::value) {
OP_REQUIRES(context,
scratch_rest_by_depth.CopyFrom(transformed_x_backprop_output,
{rest_size, depth}),
errors::Internal("Failed to copy a tensor"));
} else {
OP_REQUIRES_OK(context,
context->allocate_temp(scratch_dtype, {rest_size, depth},
&scratch_rest_by_depth));
}
typename TTypes<U, 2>::Tensor scratch_tensor(
scratch_rest_by_depth.tensor<U, 2>());
typename TTypes<U>::Vec scratch_vector(scratch_one_by_depth.vec<U>());
auto x_mean_rest_by_depth =
mean.reshape(one_by_depth).broadcast(bcast_spec);
auto x_centered = (x_rest_by_depth - x_mean_rest_by_depth);
auto coef0_one_by_depth =
(variance.reshape(one_by_depth) + epsilon).rsqrt();
auto coef0_rest_by_depth = coef0_one_by_depth.broadcast(bcast_spec);
auto x_scaled = x_centered * coef0_rest_by_depth;
auto y_backprop_rest_by_depth =
y_backprop.reshape(rest_by_depth).template cast<U>();
scratch_tensor.device(d) = y_backprop_rest_by_depth * x_scaled;
redux_sum_u(d, rest_by_depth, scratch_rest_by_depth, scale_backprop_output);
redux_sum_t(d, rest_by_depth, transformed_y_backprop_input,
offset_backprop_output);
auto y_backprop_sum = offset_backprop;
auto y_backprop_sum_one_by_depth = y_backprop_sum.reshape(one_by_depth);
auto y_backprop_mean_one_by_depth =
y_backprop_sum_one_by_depth * rest_size_inv;
auto y_backprop_mean_rest_by_depth =
y_backprop_mean_one_by_depth.broadcast(bcast_spec);
auto y_backprop_centered =
y_backprop_rest_by_depth - y_backprop_mean_rest_by_depth;
scratch_tensor.device(d) = y_backprop_rest_by_depth * x_centered;
redux_sum_u(d, rest_by_depth, scratch_rest_by_depth, &scratch_one_by_depth);
auto y_backprop_centered_mean =
scratch_vector.reshape(one_by_depth) / static_cast<U>(rest_size);
auto coef1 = (scale.reshape(one_by_depth) * coef0_one_by_depth)
.broadcast(bcast_spec);
auto coef2 = (coef0_one_by_depth.square() * y_backprop_centered_mean)
.broadcast(bcast_spec);
x_backprop.reshape(rest_by_depth).device(d) =
(coef1 * (y_backprop_centered - x_centered * coef2)).template cast<T>();
if (tensor_format == FORMAT_NCHW) {
std::array<int32, 4> perm = {0, 3, 1, 2};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
transformed_x_backprop_output,
perm, x_backprop_output));
}
}
};
template <typename T, typename U>
struct FusedBatchNormFreezeGrad<CPUDevice, T, U> {
void operator()(OpKernelContext* context, const Tensor& y_backprop_input,
const Tensor& x_input, const Tensor& scale_input,
const Tensor& pop_mean_input,
const Tensor& pop_variance_input, U epsilon,
Tensor* x_backprop_output, Tensor* scale_backprop_output,
Tensor* offset_backprop_output) {
typename TTypes<T, 4>::ConstTensor y_backprop(
y_backprop_input.tensor<T, 4>());
typename TTypes<T, 4>::ConstTensor input(x_input.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec pop_mean(pop_mean_input.vec<U>());
typename TTypes<U>::ConstVec pop_var(pop_variance_input.vec<U>());
typename TTypes<T, 4>::Tensor x_backprop(x_backprop_output->tensor<T, 4>());
typename TTypes<U>::Vec scale_backprop(scale_backprop_output->vec<U>());
const int depth = pop_mean.dimension(0);
const int rest_size = input.size() / depth;
const CPUDevice& d = context->eigen_device<CPUDevice>();
Tensor scratch1_vec, scratch2_vec;
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value,
{depth}, &scratch1_vec));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value,
{depth}, &scratch2_vec));
Tensor scratch3_tensor;
if (std::is_same<T, U>::value) {
OP_REQUIRES(
context,
scratch3_tensor.CopyFrom(*x_backprop_output, {rest_size, depth}),
errors::Internal("Failed to copy a tensor"));
} else {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value,
{rest_size, depth},
&scratch3_tensor));
}
typename TTypes<U>::Vec scratch1(scratch1_vec.vec<U>());
typename TTypes<U>::Vec scratch2(scratch2_vec.vec<U>());
typename TTypes<U, 2>::Tensor scratch3(scratch3_tensor.tensor<U, 2>());
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> rest_by_one;
rest_by_one.set(0, rest_size);
using ScalarSum = Eigen::internal::scalar_sum_op<U>;
const functor::ReduceOuterDimensions<T, U, U, ScalarSum> redux_sum_t;
const functor::ReduceOuterDimensions<U, U, U, ScalarSum> redux_sum_u;
auto y_backprop_rest_by_depth =
y_backprop.reshape(rest_by_depth).template cast<U>();
auto input_rest_by_depth = input.reshape(rest_by_depth).template cast<U>();
redux_sum_t(d, rest_by_depth, y_backprop_input, offset_backprop_output);
scratch1 = (pop_var + pop_var.constant(epsilon)).rsqrt();
scratch3.device(d) =
y_backprop_rest_by_depth *
(input_rest_by_depth -
pop_mean.reshape(one_by_depth).broadcast(rest_by_one));
redux_sum_u(d, rest_by_depth, scratch3_tensor, &scratch2_vec);
x_backprop.reshape(rest_by_depth).device(d) =
(y_backprop_rest_by_depth *
((scratch1.reshape(one_by_depth) * scale.reshape(one_by_depth))
.broadcast(rest_by_one)))
.template cast<T>();
scale_backprop = scratch2 * scratch1;
}
};
#if !GOOGLE_CUDA
namespace {
bool BatchnormSpatialPersistentEnabled() { return false | #include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
class FusedBatchNormOpTest : public OpsTestBase {};
TEST_F(FusedBatchNormOpTest, Training) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", 1.0)
.Attr("epsilon", 0.001)
.Attr("is_training", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
Tensor expected_mean(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_mean, {10, 10});
test::ExpectTensorNear<float>(expected_mean, *GetOutput(1), 0.01);
Tensor expected_variance(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_variance, {14.00, 14.00});
test::ExpectTensorNear<float>(expected_variance, *GetOutput(2), 0.01);
}
TEST_F(FusedBatchNormOpTest, TrainingRunningMean) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", 0.5)
.Attr("epsilon", 0.001)
.Attr("is_training", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({2}), {6.0, 6.0});
AddInputFromArray<float>(TensorShape({2}), {16.0, 16.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
Tensor expected_mean(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_mean, {8, 8});
test::ExpectTensorNear<float>(expected_mean, *GetOutput(1), 0.01);
Tensor expected_variance(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_variance, {15.00, 15.00});
test::ExpectTensorNear<float>(expected_variance, *GetOutput(2), 0.01);
}
TEST_F(FusedBatchNormOpTest, Inference) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", 0.001)
.Attr("is_training", false)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({2}), {10, 10});
AddInputFromArray<float>(TensorShape({2}), {11.67f, 11.67f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
}
TEST_F(FusedBatchNormOpTest, InferenceIgnoreAvgFactor) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", 0.5)
.Attr("epsilon", 0.001)
.Attr("is_training", false)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({2}), {10, 10});
AddInputFromArray<float>(TensorShape({2}), {11.67f, 11.67f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
}
TEST_F(FusedBatchNormOpTest, EmptyInput) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", 0.001)
.Attr("is_training", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 0, 0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
TF_ASSERT_OK(RunOpKernel());
EXPECT_EQ(GetOutput(0)->shape(), TensorShape({1, 1, 0, 0}));
}
class FusedBatchNormGradOpTest : public OpsTestBase {};
TEST_F(FusedBatchNormGradOpTest, Simple) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_grad_op", "FusedBatchNormGrad")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", 0.001)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{2, 2, 9, 9, -4, -4, 5, 5, 8, 8, 7, 7});
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{1, 1, 7, 7, 4, 4, -3, -3, -11, -11, 13, 13});
AddInputFromArray<float>(TensorShape({2}), {4, 4});
AddInputFromArray<float>(TensorShape({2}), {1.833f, 1.833f});
AddInputFromArray<float>(TensorShape({2}), {57.472f, 57.472f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_x(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected_x, {-1.34, -1.34, 2.47, 2.47, -4.44, -4.44,
0.17, 0.17, 1.60, 1.60, 1.53, 1.53});
test::ExpectTensorNear<float>(expected_x, *GetOutput(0), 0.01);
Tensor expected_scale(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_scale, {-1.6488, -1.6488});
test::ExpectTensorNear<float>(expected_scale, *GetOutput(1), 0.01);
Tensor expected_offset(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_offset, {27, 27});
test::ExpectTensorNear<float>(expected_offset, *GetOutput(2), 0.01);
}
using fp32 = float;
using fp16 = Eigen::half;
using bf16 = bfloat16;
template <typename T>
static Graph* FusedBatchNormInference(int n, int h, int w, int c,
bool is_training,
TensorFormat data_format) {
Graph* g = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::value;
Tensor x_t(dtype, data_format == FORMAT_NHWC ? TensorShape({n, h, w, c})
: TensorShape({n, c, h, w}));
x_t.flat<T>().setRandom();
Tensor other_t(DT_FLOAT, TensorShape({c}));
other_t.flat<float>().setRandom();
Tensor empty_t(DT_FLOAT, TensorShape({0}));
Node* x = test::graph::Constant(g, x_t, "x");
Node* other = test::graph::Constant(g, other_t, "other");
Node* empty = test::graph::Constant(g, empty_t, "empty");
Node* fused_batch_norm;
TF_CHECK_OK(NodeBuilder(g->NewName("fused_batch_norm"), "FusedBatchNormV3")
.Input(x)
.Input(other)
.Input(other)
.Input(is_training ? empty : other)
.Input(is_training ? empty : other)
.Attr("T", dtype)
.Attr("U", DT_FLOAT)
.Attr("epsilon", 0.001)
.Attr("is_training", is_training)
.Attr("data_format", ToString(data_format))
.Finalize(g, &fused_batch_norm));
return g;
}
template <typename T>
static Graph* FusedBatchNormGrad(int n, int h, int w, int c, bool is_training,
TensorFormat data_format) {
Graph* g = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::value;
TensorShape shape = data_format == FORMAT_NHWC ? TensorShape({n, h, w, c})
: TensorShape({n, c, h, w});
Tensor y_backprop_t(dtype, shape);
y_backprop_t.flat<T>().setRandom();
Tensor x_t(dtype, shape);
x_t.flat<T>().setRandom();
Tensor other_t(DT_FLOAT, TensorShape({c}));
other_t.flat<float>().setRandom();
Node* y_backprop = test::graph::Constant(g, y_backprop_t, "y_backprop");
Node* x = test::graph::Constant(g, x_t, "x");
Node* other = test::graph::Constant(g, other_t, "other");
Node* fused_batch_norm;
TF_CHECK_OK(
NodeBuilder(g->NewName("fused_batch_norm_grad"), "FusedBatchNormGradV3")
.Input(y_backprop)
.Input(x)
.Input(other)
.Input(other)
.Input(other)
.Input(other)
.Attr("T", dtype)
.Attr("U", DT_FLOAT)
.Attr("epsilon", 0.001)
.Attr("is_training", is_training)
.Attr("data_format", ToString(data_format))
.Finalize(g, &fused_batch_norm));
return g;
}
#define BM_NAME(NAME, N, H, W, C, T, IT, FORMAT, DEVICE) \
BM_##NAME##_##N##_##H##_##W##_##C##_##IT##_##FORMAT##_##T##_##DEVICE
#define BM_FusedBatchNorm(N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE) \
static void BM_NAME(FusedBatchNorm, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)(::testing::benchmark::State & state) { \
test::Benchmark( \
#DEVICE, \
FusedBatchNormInference<T>(N, H, W, C, IS_TRAINING, FORMAT_##FORMAT), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * N * H * W * C); \
} \
BENCHMARK( \
BM_NAME(FusedBatchNorm, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)) \
->UseRealTime();
BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, bf16, false, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, bf16, true, NHWC, cpu);
#ifdef GOOGLE_CUDA
BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NCHW, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NCHW, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NCHW, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NCHW, gpu);
#endif
#define BM_FusedBatchNormGrad(N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE) \
static void BM_NAME(FusedBatchNormGrad, N, H, W, C, T, IS_TRAINING, FORMAT, \
DEVICE)(::testing::benchmark::State & state) { \
test::Benchmark( \
#DEVICE, \
FusedBatchNormGrad<T>(N, H, W, C, IS_TRAINING, FORMAT_##FORMAT), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * N * H * W * C); \
} \
BENCHMARK( \
BM_NAME(FusedBatchNormGrad, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)) \
->UseRealTime();
#define BM_FusedBatchNormGradResnetShapes(T, IS_TRAINING, FORMAT, DEVICE) \
BM_FusedBatchNormGrad(64, 56, 56, 64, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 56, 56, 128, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 56, 56, 256, T, IS_TRAINING, FORMAT, DEVICE); \
\
BM_FusedBatchNormGrad(64, 28, 28, 128, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 28, 28, 256, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 28, 28, 512, T, IS_TRAINING, FORMAT, DEVICE); \
\
BM_FusedBatchNormGrad(64, 14, 14, 128, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 14, 14, 256, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 14, 14, 1024, T, IS_TRAINING, FORMAT, DEVICE)
BM_FusedBatchNormGradResnetShapes(fp32, true, NHWC, cpu);
BM_FusedBatchNormGradResnetShapes(fp32, false, NHWC, cpu);
BM_FusedBatchNormGradResnetShapes(bf16, true, NHWC, cpu);
BM_FusedBatchNormGradResnetShapes(bf16, false, NHWC, cpu);
#ifdef GOOGLE_CUDA
BM_FusedBatchNormGradResnetShapes(fp32, true, NHWC, gpu);
BM_FusedBatchNormGradResnetShapes(fp16, true, NHWC, gpu);
BM_FusedBatchNormGradResnetShapes(fp32, true, NCHW, gpu);
BM_FusedBatchNormGradResnetShapes(fp16, true, NCHW, gpu);
BM_FusedBatchNormGradResnetShapes(fp32, false, NHWC, gpu);
BM_FusedBatchNormGradResnetShapes(fp16, false, NHWC, gpu);
#endif
} |
1,507 | cpp | tensorflow/tensorflow | collective_nccl | tensorflow/core/kernels/collective_nccl.cc | tensorflow/core/kernels/collective_nccl_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_COLLECTIVE_NCCL_H_
#define TENSORFLOW_CORE_KERNELS_COLLECTIVE_NCCL_H_
#include "tensorflow/core/framework/collective.h"
namespace tensorflow {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
class NcclBase : public CollectiveImplementationInterface {
public:
explicit NcclBase(CollectiveType type, const string& name);
~NcclBase() override = default;
Status InitializeCollectiveParams(CollectiveParams* col_params) override;
Status InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) override;
protected:
const CollectiveType type_;
const string name_;
std::shared_ptr<CollectiveContext> col_ctx_;
const CollectiveParams* col_params_;
};
#endif
}
#endif
#include "tensorflow/core/kernels/collective_nccl.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/nccl/nccl_manager.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
NcclBase::NcclBase(CollectiveType type, const string& name)
: type_(type), name_(name), col_ctx_(nullptr), col_params_(nullptr) {}
Status NcclBase::InitializeCollectiveParams(CollectiveParams* col_params) {
if (type_ != col_params->instance.type) {
return errors::Internal("Expected initialized type ", type_,
" to match type in CollectiveParams ",
col_params->instance.type);
}
const char* expected_name;
switch (type_) {
case REDUCTION_COLLECTIVE:
expected_name = "NcclReduce";
break;
case BROADCAST_COLLECTIVE:
expected_name = "NcclBroadcast";
break;
case GATHER_COLLECTIVE:
expected_name = "NcclGather";
break;
case REDUCE_SCATTER_COLLECTIVE:
expected_name = "NcclReduceScatter";
break;
case ALL_TO_ALL_COLLECTIVE:
expected_name = "NcclAllToAll";
break;
default:
return errors::Internal("Unexpected CollectiveType ", type_);
}
if (expected_name != col_params->instance.impl_details.collective_name) {
return errors::Internal("Unexpected combination of collective type ",
col_params->instance.type, " and collective name ",
col_params->instance.impl_details.collective_name,
", expected name ", expected_name);
}
return OkStatus();
}
Status NcclBase::InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) {
col_ctx_ = col_ctx;
col_params_ = col_ctx->col_params.get();
return collective_util::InitializeDeviceAndLocality(
col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device,
&col_ctx->device_locality);
}
}
#endif | #ifdef GOOGLE_CUDA
#include <algorithm>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
static constexpr int kStepId = 10;
std::unique_ptr<OpKernel> GetKernel(const NodeDef& node, DeviceBase* device) {
Status status;
std::unique_ptr<OpKernel> k = CreateOpKernel(
DEVICE_GPU, device, device->GetAllocator(AllocatorAttributes()), node,
TF_GRAPH_DEF_VERSION, &status);
if (!status.ok()) LOG(FATAL) << status;
return k;
}
std::unique_ptr<OpKernel> GetAdd(DeviceBase* device) {
NodeDef node_def;
NodeDefBuilder builder("add_node", "Add");
TF_CHECK_OK(builder.Attr("T", DT_FLOAT)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(&node_def));
return GetKernel(node_def, device);
}
std::unique_ptr<OpKernel> GetDiv(DeviceBase* device) {
NodeDef node_def;
NodeDefBuilder builder("add_node", "Div");
TF_CHECK_OK(builder.Attr("T", DT_FLOAT)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(&node_def));
return GetKernel(node_def, device);
}
class NcclTestBase : public ::testing::Test {
protected:
class DeviceInstance;
NcclTestBase(CollectiveType collective_type, const string& collective_name)
: collective_type_(collective_type), collective_name_(collective_name) {}
void Init(const int num_ranks) {
setenv("NCCL_DEBUG", "INFO", 1 );
setenv("NCCL_LAUNCH_MODE", "PARALLEL", 1 );
test_env_ = CreateCollectiveTestEnv( 1,
num_ranks,
DEVICE_GPU, true);
for (int rank = 0; rank < num_ranks; ++rank) {
instances_.push_back(std::make_unique<DeviceInstance>(
rank, collective_name_, collective_type_, test_env_.get()));
}
}
virtual void InitInput(Tensor* input, const int rank) = 0;
virtual void InitExpected(std::vector<float>* expected,
const int tensor_length, const int current_rank,
const int num_ranks) = 0;
virtual void InitDevice(DeviceInstance* di) = 0;
virtual void RunCollectiveOnDevice(DeviceInstance* di) = 0;
void RunCollective() {
std::atomic<int> done(0);
for (const auto& instance : instances_) {
DeviceInstance* di = instance.get();
InitDevice(di);
SchedClosure([this, di, &done] {
RunCollectiveOnDevice(di);
++done;
});
}
while (done < static_cast<int>(instances_.size())) {
Env::Default()->SleepForMicroseconds(1000);
}
}
void RunTest(int num_ranks, int input_length) {
Init(num_ranks);
if (num_ranks > test_env_->device_mgr->NumDevices()) {
LOG(WARNING) << "Skipping test because required " << num_ranks
<< " GPUs but found " << test_env_->device_mgr->NumDevices();
return;
}
for (int rank = 0; rank < num_ranks; ++rank) {
instances_[rank]->InitTensor(
DT_FLOAT, TensorShape({input_length}),
[this, rank](Tensor* t) { InitInput(t, rank); });
}
RunCollective();
for (int rank = 0; rank < instances_.size(); ++rank) {
std::vector<float> expected;
InitExpected(&expected, input_length, rank, num_ranks);
if (VLOG_IS_ON(3)) {
string str_buf;
for (const auto& x : expected) {
strings::StrAppend(&str_buf, " ", x);
}
VLOG(3) << "Expected output " << str_buf;
}
TF_ASSERT_OK(instances_[rank]->status_);
VLOG(2) << "rank " << rank << " output " << &instances_[rank]->output_
<< " buf " << DMAHelper::base(&instances_[rank]->output_);
VLOG(3) << "rank " << rank << " got output tensor "
<< instances_[rank]->output_.DebugString(
instances_[rank]->output_.NumElements());
test::ExpectTensorEqual<float>(test::AsTensor<float>(expected),
instances_[rank]->output_);
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, const string& collective_name,
CollectiveType collective_type, CollectiveTestEnv* test_env)
: test_env_(test_env) {
col_params_ =
CreateCollectiveParams(*test_env_, rank, collective_name,
collective_type, DT_FLOAT, TensorShape());
string device_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(device_name, &device_))
<< "Could not find device " << device_name << " existing devices "
<< test_env_->device_mgr->DebugString();
merge_op_ = GetAdd(device_);
final_op_ = GetDiv(device_);
}
void InitTensor(DataType dtype, const TensorShape& shape,
const std::function<void(Tensor*)>& init_f) {
input_ = Tensor(dtype, shape);
init_f(&input_);
if (VLOG_IS_ON(3)) {
VLOG(3) << "input tensor " << input_.DebugString(shape.num_elements());
} else {
VLOG(2) << "input tensor " << input_.DebugString();
}
}
void RunReduce() {
output_ = input_;
status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_,
&input_, &output_);
}
void RunReduceScatter() {
auto output_shape = input_.shape();
output_shape.set_dim(
0, output_shape.dim_size(0) / col_params_->group.group_size);
output_ = Tensor(DT_FLOAT, output_shape);
status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_,
&input_, &output_);
}
void RunBroadcast() {
output_ = input_;
status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_,
&input_, &output_);
}
void RunGather() {
auto output_shape = input_.shape();
output_shape.set_dim(
0, output_shape.dim_size(0) * col_params_->group.group_size);
output_ = Tensor(DT_FLOAT, output_shape);
status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_,
&input_, &output_);
}
void RunAllToAll() {
output_ = Tensor(DT_FLOAT, input_.shape());
status_ = tensorflow::RunCollective(test_env_, col_params_.get(), device_,
&input_, &output_);
}
CollectiveTestEnv* test_env_;
Tensor input_;
Tensor output_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
std::unique_ptr<OpKernel> merge_op_;
std::unique_ptr<OpKernel> final_op_;
Status status_;
};
CollectiveType collective_type_;
const string collective_name_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
mutex mu_;
int32 op_counter_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<CollectiveTestEnv> test_env_;
};
class NcclReducerTest : public NcclTestBase {
protected:
NcclReducerTest()
: NcclTestBase(REDUCTION_COLLECTIVE,
"NcclReduce") {}
~NcclReducerTest() override = default;
void InitInput(Tensor* input, const int rank) override {
for (size_t i = 0; i < input->NumElements(); ++i) {
float value = pow(10, rank) * i;
input->flat<float>()(i) = value;
}
}
void InitExpected(std::vector<float>* expected, const int tensor_length,
const int current_rank, const int num_ranks) override {
expected->resize(tensor_length);
for (int i = 0; i < tensor_length; ++i) {
float expected_sum = 0.0;
for (int rank = 0; rank < num_ranks; ++rank) {
float value = pow(10, rank) * i;
expected_sum += value;
}
(*expected)[i] = expected_sum / num_ranks;
}
}
void InitDevice(DeviceInstance* di) override {
di->col_params_->merge_op = di->merge_op_.get();
di->col_params_->final_op = di->final_op_.get();
}
void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunReduce(); }
};
class NcclReduceScattererTest : public NcclTestBase {
protected:
NcclReduceScattererTest()
: NcclTestBase(REDUCE_SCATTER_COLLECTIVE,
"NcclReduceScatter") {}
~NcclReduceScattererTest() override = default;
void InitInput(Tensor* input, const int rank) override {
for (size_t i = 0; i < input->NumElements(); ++i) {
float value = pow(10, rank) * i;
input->flat<float>()(i) = value;
}
}
void InitExpected(std::vector<float>* expected, const int tensor_length,
const int current_rank, const int num_ranks) override {
const int output_length = tensor_length / num_ranks;
expected->resize(output_length);
for (int i = 0; i < output_length; ++i) {
float expected_sum = 0.0;
for (int rank = 0; rank < num_ranks; ++rank) {
float value = pow(10, rank) * (i + current_rank * output_length);
expected_sum += value;
}
(*expected)[i] = expected_sum / num_ranks;
}
}
void InitDevice(DeviceInstance* di) override {
di->col_params_->merge_op = di->merge_op_.get();
di->col_params_->final_op = di->final_op_.get();
}
void RunCollectiveOnDevice(DeviceInstance* di) override {
di->RunReduceScatter();
}
};
class NcclBroadcasterTest : public NcclTestBase {
protected:
NcclBroadcasterTest()
: NcclTestBase(BROADCAST_COLLECTIVE,
"NcclBroadcast") {}
~NcclBroadcasterTest() override = default;
void InitInput(Tensor* input, const int rank) override {
bool source = rank == source_rank_;
for (size_t i = 0; i < input->NumElements(); ++i) {
input->flat<float>()(i) = source ? static_cast<float>(i) : -1.0;
}
}
void InitExpected(std::vector<float>* expected, const int tensor_length,
const int current_rank, const int num_ranks) override {
expected->resize(tensor_length);
for (int i = 0; i < tensor_length; ++i) {
(*expected)[i] = i;
}
}
void InitDevice(DeviceInstance* di) override {
di->col_params_->source_rank = source_rank_;
di->col_params_->is_source = di->col_params_->default_rank == source_rank_;
}
void RunCollectiveOnDevice(DeviceInstance* di) override {
di->RunBroadcast();
}
int source_rank_ = 0;
};
class NcclGathererTest : public NcclTestBase {
protected:
NcclGathererTest()
: NcclTestBase(GATHER_COLLECTIVE,
"NcclGather") {}
~NcclGathererTest() override = default;
void InitInput(Tensor* input, const int rank) override {
for (size_t i = 0; i < input->NumElements(); ++i) {
float value = pow(10, rank) * i;
input->flat<float>()(i) = value;
}
}
void InitExpected(std::vector<float>* expected, const int tensor_length,
const int current_rank, const int num_ranks) override {
expected->resize(tensor_length * num_ranks, -1);
for (int rank = 0, i = 0; rank < num_ranks; ++rank) {
for (int j = 0; j < tensor_length; ++j, ++i) {
(*expected)[i] = pow(10, rank) * j;
}
}
}
void InitDevice(DeviceInstance* di) override {}
void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunGather(); }
int source_rank_ = 0;
};
class NcclAllToAllTest : public NcclTestBase {
protected:
NcclAllToAllTest()
: NcclTestBase(ALL_TO_ALL_COLLECTIVE,
"NcclAllToAll") {}
~NcclAllToAllTest() override = default;
void InitInput(Tensor* input, const int rank) override {
for (size_t i = 0; i < input->NumElements(); ++i) {
float value = rank * input->NumElements() + i;
input->flat<float>()(i) = value;
}
}
void InitExpected(std::vector<float>* expected, const int tensor_length,
const int current_rank, const int num_ranks) override {
expected->resize(tensor_length);
const int part_size = tensor_length / num_ranks;
for (int rank = 0, i = 0; rank < num_ranks; ++rank) {
for (int j = 0; j < part_size; ++j, ++i) {
const int part_index = current_rank + rank * num_ranks;
(*expected)[i] = part_index * part_size + j;
}
}
}
void InitDevice(DeviceInstance* di) override {}
void RunCollectiveOnDevice(DeviceInstance* di) override { di->RunAllToAll(); }
int source_rank_ = 0;
};
TEST_F(NcclReducerTest, Test2Dev16Len) {
RunTest(2, 16);
}
TEST_F(NcclReducerTest, Test4Dev16Len) {
RunTest(4, 16);
}
TEST_F(NcclReducerTest, Test8Dev16Len) {
RunTest(8, 16);
}
TEST_F(NcclReducerTest, Test8Dev128Len) {
RunTest(8, 128);
}
TEST_F(NcclReducerTest, Test8Dev1048576Len) {
RunTest(8, 1048576);
}
TEST_F(NcclBroadcasterTest, Test2Dev16LenSrc0) {
RunTest(2, 16);
}
TEST_F(NcclBroadcasterTest, Test4Dev16LenSrc1) {
source_rank_ = 1;
RunTest(4, 16);
}
TEST_F(NcclBroadcasterTest, Test8Dev16LenSrc7) {
source_rank_ = 7;
RunTest(8, 16);
}
TEST_F(NcclBroadcasterTest, Test8Dev128LenSrc0) {
RunTest(8, 128);
}
TEST_F(NcclBroadcasterTest, Test8Dev1048576LenSrc0) {
RunTest(8, 1048576);
}
TEST_F(NcclGathererTest, Test2Dev16Len) {
RunTest(2, 16);
}
TEST_F(NcclGathererTest, Test4Dev16Len) {
RunTest(4, 16);
}
TEST_F(NcclGathererTest, Test8Dev16Len) {
RunTest(8, 16);
}
TEST_F(NcclGathererTest, Test8Dev128Len) {
RunTest(8, 128);
}
TEST_F(NcclGathererTest, Test8Dev1048576Len) {
RunTest(8, 1048576);
}
TEST_F(NcclReduceScattererTest, Test2Dev16Len) {
RunTest(2, 16);
}
TEST_F(NcclReduceScattererTest, Test4Dev16Len) {
RunTest(4, 16);
}
TEST_F(NcclReduceScattererTest, Test8Dev16Len) {
RunTest(8, 16);
}
TEST_F(NcclReduceScattererTest, Test8Dev128Len) {
RunTest(8, 128);
}
TEST_F(NcclReduceScattererTest, Test8Dev1048576Len) {
RunTest(8, 1048576);
}
TEST_F(NcclAllToAllTest, Test2Dev16Len) {
RunTest(2, 16);
}
TEST_F(NcclAllToAllTest, Test4Dev16Len) {
RunTest(4, 16);
}
TEST_F(NcclAllToAllTest, Test8Dev16Len) {
RunTest(8, 16);
}
TEST_F(NcclAllToAllTest, Test8Dev128Len) {
RunTest(8, 128);
}
TEST_F(NcclAllToAllTest, Test8Dev1048576Len) {
RunTest(8, 1048576);
}
}
#endif |
1,508 | cpp | tensorflow/tensorflow | logging_ops | tensorflow/core/kernels/logging_ops.cc | tensorflow/core/kernels/logging_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_LOGGING_OPS_H_
#define TENSORFLOW_CORE_KERNELS_LOGGING_OPS_H_
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
class AssertOp : public OpKernel {
public:
explicit AssertOp(OpKernelConstruction* c);
void Compute(OpKernelContext* ctx) override;
private:
int32 summarize_ = 0;
};
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/dataset_stateful_op_allowlist.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::InferenceContext;
REGISTER_OP("Assert")
.Input("condition: bool")
.Input("data: T")
.SetIsStateful()
.Attr("T: list(type)")
.Attr("summarize: int = 3")
.SetShapeFn(shape_inference::NoOutputs);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Assert");
REGISTER_OP("Print")
.Input("input: T")
.Input("data: U")
.Output("output: T")
.SetIsStateful()
.Attr("T: type")
.Attr("U: list(type) >= 0")
.Attr("message: string = ''")
.Attr("first_n: int = -1")
.Attr("summarize: int = 3")
.SetShapeFn(shape_inference::UnchangedShape);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Print");
REGISTER_OP("PrintV2")
.Input("input: string")
.SetIsStateful()
.Attr("output_stream: string = 'stderr'")
.Attr("end: string = '\n'")
.SetShapeFn([](InferenceContext* c) {
if (!c->RankKnown(c->input(0))) return absl::OkStatus();
if (c->Rank(c->input(0)) != 0) {
return errors::InvalidArgument("input must be a scalar, but has rank: ",
c->Rank(c->input(0)));
}
return absl::OkStatus();
});
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("PrintV2");
REGISTER_OP("TensorSummaryV2")
.Input("tag: string")
.Input("tensor: T")
.Input("serialized_summary_metadata: string")
.Output("summary: string")
.Attr("T: type")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("TensorSummary")
.Input("tensor: T")
.Output("summary: string")
.Attr("T: type")
.Attr("description: string = ''")
.Attr("labels: list(string) = []")
.Attr("display_name: string = ''")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("ImageSummary")
.Input("tag: string")
.Input("tensor: T")
.Output("summary: string")
.Attr("max_images: int >= 1 = 3")
.Attr("T: {uint8, float, half, float64} = DT_FLOAT")
.Attr(
"bad_color: tensor = { dtype: DT_UINT8 "
"tensor_shape: { dim { size: 4 } } "
"int_val: 255 int_val: 0 int_val: 0 int_val: 255 }")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("AudioSummaryV2")
.Input("tag: string")
.Input("tensor: float")
.Input("sample_rate: float")
.Output("summary: string")
.Attr("max_outputs: int >= 1 = 3")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("AudioSummary")
.Input("tag: string")
.Input("tensor: float")
.Output("summary: string")
.Attr("sample_rate: float")
.Attr("max_outputs: int >= 1 = 3")
.SetShapeFn(shape_inference::ScalarShape)
.Deprecated(15, "Use AudioSummaryV2.");
REGISTER_OP("Timestamp")
.Output("ts: float64")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Timestamp");
} | #include <chrono>
#include <thread>
#include "xla/tsl/util/determinism_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/status_matchers.h"
namespace tensorflow {
namespace {
class PrintingV2GraphTest : public OpsTestBase {
protected:
Status Init(const string& output_stream = "log(warning)") {
TF_CHECK_OK(NodeDefBuilder("op", "PrintV2")
.Input(FakeInput(DT_STRING))
.Attr("output_stream", output_stream)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(PrintingV2GraphTest, StringSuccess) {
TF_ASSERT_OK(Init());
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
TF_ASSERT_OK(RunOpKernel());
}
TEST_F(PrintingV2GraphTest, InvalidOutputStream) {
ASSERT_NE(absl::OkStatus(), (Init("invalid_output_stream")));
}
TEST_F(PrintingV2GraphTest, InvalidInputRank) {
TF_ASSERT_OK(Init());
AddInputFromArray<tstring>(TensorShape({2}), {"bar", "foo"});
ASSERT_NE(absl::OkStatus(), RunOpKernel());
}
class PrintingGraphTest : public OpsTestBase {
protected:
Status Init(DataType input_type1, DataType input_type2, string msg = "",
int first_n = -1, int summarize = 3) {
TF_CHECK_OK(NodeDefBuilder("op", "Print")
.Input(FakeInput(input_type1))
.Input(FakeInput(2, input_type2))
.Attr("message", msg)
.Attr("first_n", first_n)
.Attr("summarize", summarize)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(PrintingGraphTest, Int32Success_6) {
TF_ASSERT_OK(Init(DT_INT32, DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, Int32Success_Summarize6) {
TF_ASSERT_OK(Init(DT_INT32, DT_INT32, "", -1, 6));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_INT32, DT_STRING));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, MsgSuccess) {
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "Message: "));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, FirstNSuccess) {
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "", 3));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
for (int i = 0; i < 4; i++) TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
class TimestampTest : public OpsTestBase {
protected:
Status Init() {
TF_CHECK_OK(NodeDefBuilder("op", "Timestamp").Finalize(node_def()));
return InitOp();
}
};
TEST_F(TimestampTest, WaitAtLeast) {
TF_ASSERT_OK(Init());
TF_ASSERT_OK(RunOpKernel());
double ts1 = *((*GetOutput(0)).flat<double>().data());
std::this_thread::sleep_for(std::chrono::seconds(1));
TF_ASSERT_OK(RunOpKernel());
double ts2 = *((*GetOutput(0)).flat<double>().data());
EXPECT_LE(1.0, ts2 - ts1);
}
TEST_F(TimestampTest, DeterminismError) {
tsl::test::DeterministicOpsScope det_scope;
TF_ASSERT_OK(Init());
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(
error::FAILED_PRECONDITION,
"Timestamp cannot be called when determinism is enabled"));
}
}
} |
1,509 | cpp | tensorflow/tensorflow | sparse_utils | tensorflow/core/kernels/sparse_utils.cc | tensorflow/core/kernels/sparse_utils_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SPARSE_UTILS_H_
#define TENSORFLOW_CORE_KERNELS_SPARSE_UTILS_H_
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace sparse_utils {
template <typename Tindices>
Tindices FindNextDenseRowStartIndex(
const Tindices sparse_index_begin,
const typename TTypes<Tindices>::ConstMatrix& indices_mat);
template <typename Tindices>
std::vector<Tindices> GetStartIndicesOfEachDenseRow(
const typename TTypes<Tindices>::ConstMatrix& indices_mat,
bool* contains_empty_rows);
template <typename Tindices>
std::vector<Tindices> ParseRowStartIndices(
const tensorflow::Tensor& tensor,
const Tindices num_nonzero_entries_in_sparse_mat);
template <typename Tindices>
bool ContainsEmptyRows(const std::vector<Tindices>& row_start_indices);
enum class IndexValidation {
kNone,
kOrdered,
kUnordered
};
template <typename Tindices>
Status ValidateSparseTensor(const Tensor& indices, const Tensor& values,
const Tensor& shape,
IndexValidation index_validation);
}
}
#endif
#include "tensorflow/core/kernels/sparse_utils.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace sparse_utils {
template <typename Tindices>
Tindices FindNextDenseRowStartIndex(
const Tindices sparse_index_begin,
const typename TTypes<Tindices>::ConstMatrix& indices_mat) {
Tindices begin = sparse_index_begin;
Tindices end = indices_mat.dimension(0);
const Tindices orig_sparse_index_end = end;
const Tindices orig_dense_index_begin = indices_mat(begin, 0);
if (orig_dense_index_begin == static_cast<int64_t>(indices_mat(end - 1, 0))) {
return orig_sparse_index_end;
}
Tindices increment = 1;
while (begin + increment < end &&
indices_mat(begin + increment, 0) == orig_dense_index_begin) {
increment *= 2;
}
if (begin + increment < end) {
end = begin + increment;
}
begin += increment / 2;
const Tindices dense_row_index_to_find = orig_dense_index_begin;
while (begin < end) {
const Tindices m = begin + (end - begin) / 2;
const Tindices m_dense_row_index = static_cast<Tindices>(indices_mat(m, 0));
if (m_dense_row_index == dense_row_index_to_find &&
(m + 1 == orig_sparse_index_end ||
static_cast<Tindices>(indices_mat(m + 1, 0)) !=
dense_row_index_to_find)) {
return m + 1;
} else if (m_dense_row_index <= dense_row_index_to_find) {
begin = m + 1;
} else {
end = m;
}
}
return orig_sparse_index_end;
}
template <typename Tindices>
std::vector<Tindices> GetStartIndicesOfEachDenseRow(
const typename TTypes<Tindices>::ConstMatrix& indices_mat,
bool* contains_empty_rows) {
int64_t start_sparse_index_of_cur_dense_row = 0;
std::vector<Tindices> segment_indices;
const Tindices num_entries_in_sparse_tensor = indices_mat.dimension(0);
const Tindices num_dense_rows_in_sparse_tensor =
1 + indices_mat(num_entries_in_sparse_tensor - 1, 0);
segment_indices.reserve(1 + num_dense_rows_in_sparse_tensor);
segment_indices.push_back(0);
for (Tindices i = 0; i < indices_mat(0, 0); ++i) {
segment_indices.push_back(0);
}
*contains_empty_rows = indices_mat(0, 0) > 0;
while (true) {
const Tindices start_sparse_index_of_next_dense_row =
FindNextDenseRowStartIndex<Tindices>(
start_sparse_index_of_cur_dense_row, indices_mat);
if (start_sparse_index_of_next_dense_row == num_entries_in_sparse_tensor) {
segment_indices.push_back(start_sparse_index_of_next_dense_row);
break;
}
for (Tindices i = 0;
i < indices_mat(start_sparse_index_of_next_dense_row, 0) -
indices_mat(start_sparse_index_of_cur_dense_row, 0);
++i) {
segment_indices.push_back(start_sparse_index_of_next_dense_row);
}
*contains_empty_rows |=
indices_mat(start_sparse_index_of_next_dense_row, 0) -
indices_mat(start_sparse_index_of_cur_dense_row, 0) >
1;
start_sparse_index_of_cur_dense_row = start_sparse_index_of_next_dense_row;
}
return segment_indices;
}
template <typename Tindices>
std::vector<Tindices> ParseRowStartIndices(
const tensorflow::Tensor& tensor,
const Tindices num_nonzero_entries_in_sparse_mat) {
std::vector<Tindices> out;
auto vec = tensor.vec<Tindices>();
out.reserve(vec.size() + 1);
for (size_t i = 0; i < vec.dimension(0); ++i) {
out.push_back(vec(i));
}
out.push_back(num_nonzero_entries_in_sparse_mat);
return out;
}
template <typename Tindices>
bool ContainsEmptyRows(const std::vector<Tindices>& row_start_indices) {
for (size_t i = 1; i < row_start_indices.size() - 1; ++i) {
if (row_start_indices.at(i) - row_start_indices.at(i - 1) == 0) {
return true;
}
}
return false;
}
namespace {
Status ValidateSparseTensorShape(const Tensor& indices, const Tensor& values,
const Tensor& shape) {
if (!TensorShapeUtils::IsMatrix(indices.shape())) {
return errors::InvalidArgument("Sparse indices must be rank 2 but is rank ",
indices.shape().dim_sizes().size());
}
if (!TensorShapeUtils::IsVector(values.shape())) {
return errors::InvalidArgument("Sparse values must be rank 1 but is rank ",
values.shape().dims());
}
if (!TensorShapeUtils::IsVector(shape.shape())) {
return errors::InvalidArgument("Sparse shape must be rank 1 but is rank ",
shape.shape().dims());
}
int64_t nnz = indices.dim_size(0);
int64_t ndims = indices.dim_size(1);
if (values.dim_size(0) != nnz) {
return errors::InvalidArgument("Number of elements in indices (", nnz,
") and values (", values.dim_size(0),
") do not match");
}
if (shape.NumElements() != ndims) {
return errors::InvalidArgument("Index rank (", ndims, ") and shape rank (",
shape.NumElements(), ") do not match");
}
return absl::OkStatus();
}
template <typename IndexTensor>
string CreateIndexString(const IndexTensor& indices, int64_t row) {
const int64_t ndims = indices.dimension(1);
string index_str = strings::StrCat("indices[", row, ", :] = [");
for (int64_t dim = 0; dim < ndims; ++dim) {
strings::StrAppend(&index_str, indices(row, dim),
dim < ndims - 1 ? ", " : "]");
}
if (ndims == 0) {
strings::StrAppend(&index_str, "]");
}
return index_str;
}
template <typename Tindices>
Status ValidateSparseTensorIndicesUnordered(const Tensor& indices,
const Tensor& shape) {
const auto indices_mat = indices.flat_inner_dims<Tindices>();
const auto shape_vec = shape.flat<Tindices>();
int64_t nnz = indices.dim_size(0);
int64_t ndims = indices.dim_size(1);
for (int64_t i = 0; i < nnz; ++i) {
for (int64_t dim = 0; dim < ndims; ++dim) {
const Tindices idx = indices_mat(i, dim);
if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) {
string index_str = CreateIndexString(indices_mat, i);
return errors::InvalidArgument("Sparse index tuple ", index_str,
" is out of bounds");
}
}
}
return absl::OkStatus();
}
template <typename Tindices>
Status ValidateSparseTensorIndicesOrdered(const Tensor& indices,
const Tensor& shape) {
const auto indices_mat = indices.flat_inner_dims<Tindices>();
const auto shape_vec = shape.flat<Tindices>();
int64_t nnz = indices.dim_size(0);
int64_t ndims = indices.dim_size(1);
if (nnz == 0) {
return absl::OkStatus();
}
for (int64_t dim = 0; dim < ndims; ++dim) {
const Tindices idx = indices_mat(0, dim);
if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) {
string index_str = CreateIndexString(indices_mat, 0);
return errors::InvalidArgument("Sparse index tuple ", index_str,
" is out of bounds");
}
}
for (int64_t i = 1; i < nnz; ++i) {
bool different = false;
for (int64_t dim = 0; dim < ndims; ++dim) {
const Tindices idx = indices_mat(i, dim);
const Tindices prev_idx = indices_mat(i - 1, dim);
if (TF_PREDICT_TRUE(different)) {
if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) {
string index_str = CreateIndexString(indices_mat, i);
return errors::InvalidArgument("Sparse index tuple ", index_str,
" is out of bounds");
}
} else {
if (TF_PREDICT_FALSE(idx < prev_idx || idx >= shape_vec(dim))) {
string index_str = CreateIndexString(indices_mat, i);
if (TF_PREDICT_FALSE(idx < 0 || idx >= shape_vec(dim))) {
return errors::InvalidArgument("Sparse index tuple ", index_str,
" is out of bounds");
} else {
return errors::InvalidArgument("Sparse index tuple ", index_str,
" is out of order");
}
} else if (TF_PREDICT_TRUE(idx > prev_idx)) {
different = true;
}
}
}
if (TF_PREDICT_FALSE(!different)) {
string index_str = CreateIndexString(indices_mat, i);
return errors::InvalidArgument("Sparse index tuple ", index_str,
" is repeated");
}
}
return absl::OkStatus();
}
}
template <typename Tindices>
Status ValidateSparseTensor(const Tensor& indices, const Tensor& values,
const Tensor& shape,
IndexValidation index_validation) {
TF_RETURN_IF_ERROR(ValidateSparseTensorShape(indices, values, shape));
switch (index_validation) {
case IndexValidation::kOrdered:
return ValidateSparseTensorIndicesOrdered<Tindices>(indices, shape);
case IndexValidation::kUnordered:
return ValidateSparseTensorIndicesUnordered<Tindices>(indices, shape);
case IndexValidation::kNone: {
}
}
return absl::OkStatus();
}
#define REGISTER_SPARSE_UTIL_FUNCTIONS(TypeIndex) \
template TypeIndex FindNextDenseRowStartIndex<TypeIndex>( \
const TypeIndex sparse_index_begin, \
const TTypes<TypeIndex>::ConstMatrix& indices_mat); \
template std::vector<TypeIndex> GetStartIndicesOfEachDenseRow<TypeIndex>( \
const TTypes<TypeIndex>::ConstMatrix& indices_mat, \
bool* contains_empty_rows); \
template bool ContainsEmptyRows<TypeIndex>( \
const std::vector<TypeIndex>& row_start_indices); \
template std::vector<TypeIndex> ParseRowStartIndices<TypeIndex>( \
const tensorflow::Tensor& tensor, \
const TypeIndex num_nonzero_entries_in_sparse_mat); \
template Status ValidateSparseTensor<TypeIndex>( \
const Tensor& indices, const Tensor& values, const Tensor& shape, \
IndexValidation index_validation)
REGISTER_SPARSE_UTIL_FUNCTIONS(int32);
REGISTER_SPARSE_UTIL_FUNCTIONS(int64);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint8);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint16);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint32);
REGISTER_SPARSE_UTIL_FUNCTIONS(uint64);
}
} | #include "tensorflow/core/kernels/sparse_utils.h"
#include <algorithm>
#include <cstdint>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace sparse_utils {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::MatchesRegex;
TEST(SparseUtilsTest, GetStartIndicesOfEachDenseRow) {
{
int32 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0};
TTypes<int32>::ConstMatrix indices_mat(data, 8, 2);
bool contains_empty_rows;
EXPECT_TRUE(GetStartIndicesOfEachDenseRow<int32>(indices_mat,
&contains_empty_rows) ==
std::vector<int32>({0, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8}));
EXPECT_TRUE(contains_empty_rows);
}
{
int32 data[] = {0, 0, 1, 0, 1, 0, 4, 0, 4, 0, 4, 0, 6, 0, 7,
0, 7, 0, 7, 0, 7, 0, 8, 0, 8, 0, 10, 0, 12, 0};
TTypes<int32>::ConstMatrix indices_mat(data, 15, 2);
bool contains_empty_rows;
EXPECT_TRUE(
GetStartIndicesOfEachDenseRow<int32>(indices_mat,
&contains_empty_rows) ==
std::vector<int32>({0, 1, 3, 3, 3, 6, 6, 7, 11, 13, 13, 14, 14, 15}));
EXPECT_TRUE(contains_empty_rows);
}
{
int64_t data[] = {3, 0};
TTypes<int64_t>::ConstMatrix indices_mat(data, 1, 2);
bool contains_empty_rows;
EXPECT_TRUE(GetStartIndicesOfEachDenseRow<int64_t>(indices_mat,
&contains_empty_rows) ==
std::vector<int64_t>({0, 0, 0, 0, 1}));
EXPECT_TRUE(contains_empty_rows);
}
{
uint32 data[] = {3, 0, 3, 0};
TTypes<uint32>::ConstMatrix indices_mat(data, 2, 2);
bool contains_empty_rows;
EXPECT_TRUE(GetStartIndicesOfEachDenseRow<uint32>(indices_mat,
&contains_empty_rows) ==
std::vector<uint32>({0, 0, 0, 0, 2}));
EXPECT_TRUE(contains_empty_rows);
}
{
uint16 data[] = {0, 0, 0, 0, 0, 0, 1, 0};
TTypes<uint16>::ConstMatrix indices_mat(data, 4, 2);
bool contains_empty_rows;
EXPECT_TRUE(GetStartIndicesOfEachDenseRow<uint16>(indices_mat,
&contains_empty_rows) ==
std::vector<uint16>({0, 3, 4}));
EXPECT_FALSE(contains_empty_rows);
}
{
uint64 data[] = {0, 0, 0, 0, 0, 0, 3, 0};
TTypes<uint64>::ConstMatrix indices_mat(data, 4, 2);
bool contains_empty_rows;
EXPECT_TRUE(GetStartIndicesOfEachDenseRow<uint64>(indices_mat,
&contains_empty_rows) ==
std::vector<uint64>({0, 3, 3, 3, 4}));
EXPECT_TRUE(contains_empty_rows);
}
}
TEST(SparseUtilsTest, ParseRowStartIndices) {
{
Tensor t(DataType::DT_INT32, {1});
int indx = 0;
for (const int32_t v : {0}) {
t.flat<int32>()(indx++) = v;
}
EXPECT_TRUE(ParseRowStartIndices<int32>(t, 1) ==
std::vector<int32>({0, 1}));
}
{
Tensor t(DataType::DT_INT64, {1});
int indx = 0;
for (const int64_t v : {0}) {
t.flat<int64_t>()(indx++) = v;
}
EXPECT_TRUE(ParseRowStartIndices<int64_t>(t, 2) ==
std::vector<int64_t>({0, 2}));
}
{
Tensor t(DataType::DT_UINT64, {2});
int indx = 0;
for (const uint64 v : {0, 3}) {
t.flat<uint64>()(indx++) = v;
}
EXPECT_TRUE(ParseRowStartIndices<uint64>(t, 4) ==
std::vector<uint64>({0, 3, 4}));
}
{
Tensor t(DataType::DT_UINT16, {2});
int indx = 0;
for (const uint16 v : {0, 3}) {
t.flat<uint16>()(indx++) = v;
}
EXPECT_TRUE(ParseRowStartIndices<uint16>(t, 4) ==
std::vector<uint16>({0, 3, 4}));
}
}
TEST(SparseUtilsTest, ContainsEmptyRows) {
{
int32 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0};
TTypes<int32>::ConstMatrix indices_mat(data, 8, 2);
bool contains_empty_rows;
const auto segment_indices =
GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows);
EXPECT_TRUE(ContainsEmptyRows(segment_indices));
}
{
int64_t data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0};
TTypes<int64_t>::ConstMatrix indices_mat(data, 8, 2);
bool contains_empty_rows;
const auto segment_indices = GetStartIndicesOfEachDenseRow<int64_t>(
indices_mat, &contains_empty_rows);
EXPECT_TRUE(ContainsEmptyRows(segment_indices));
}
{
int32 data[] = {1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4};
TTypes<int32>::ConstMatrix indices_mat(data, 6, 2);
bool contains_empty_rows;
const auto segment_indices =
GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows);
EXPECT_TRUE(ContainsEmptyRows(segment_indices));
}
{
uint16 data[] = {1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4};
TTypes<uint16>::ConstMatrix indices_mat(data, 6, 2);
bool contains_empty_rows;
const auto segment_indices = GetStartIndicesOfEachDenseRow<uint16>(
indices_mat, &contains_empty_rows);
EXPECT_TRUE(ContainsEmptyRows(segment_indices));
}
{
int32 data[] = {0, 0, 1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4};
TTypes<int32>::ConstMatrix indices_mat(data, 7, 2);
bool contains_empty_rows;
const auto segment_indices =
GetStartIndicesOfEachDenseRow<int32>(indices_mat, &contains_empty_rows);
EXPECT_FALSE(ContainsEmptyRows(segment_indices));
}
{
int64_t data[] = {0, 0, 1, 0, 1, 1, 2, 0, 2, 1, 2, 2, 3, 4};
TTypes<int64_t>::ConstMatrix indices_mat(data, 7, 2);
bool contains_empty_rows;
const auto segment_indices = GetStartIndicesOfEachDenseRow<int64_t>(
indices_mat, &contains_empty_rows);
EXPECT_FALSE(ContainsEmptyRows(segment_indices));
}
{
uint32 data[] = {0, 0, 0, 1, 0, 2, 2, 0, 2, 1, 2, 2, 3, 4};
TTypes<uint32>::ConstMatrix indices_mat(data, 7, 2);
bool contains_empty_rows;
const auto segment_indices = GetStartIndicesOfEachDenseRow<uint32>(
indices_mat, &contains_empty_rows);
EXPECT_TRUE(ContainsEmptyRows(segment_indices));
}
{
int64_t data[] = {0, 0, 0, 1, 0, 2, 2, 0, 2, 1, 2, 2, 3, 4};
TTypes<int64_t>::ConstMatrix indices_mat(data, 7, 2);
bool contains_empty_rows;
const auto segment_indices = GetStartIndicesOfEachDenseRow<int64_t>(
indices_mat, &contains_empty_rows);
EXPECT_TRUE(ContainsEmptyRows(segment_indices));
}
{
uint64 data[] = {0, 0, 0, 1, 0, 2, 1, 0, 2, 1, 2, 2, 3, 4};
TTypes<uint64>::ConstMatrix indices_mat(data, 7, 2);
bool contains_empty_rows;
const auto segment_indices = GetStartIndicesOfEachDenseRow<uint64>(
indices_mat, &contains_empty_rows);
EXPECT_FALSE(ContainsEmptyRows(segment_indices));
}
}
TEST(SparseUtilsTest, FindNextDenseRowStartIndex) {
{
int32 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0};
TTypes<int32>::ConstMatrix indices_mat(data, 8, 2);
for (int32_t i = 0; i < 8; ++i) {
EXPECT_EQ(i + 1, FindNextDenseRowStartIndex<int32>(i, indices_mat));
}
}
{
uint16 data[] = {0, 0, 1, 0, 4, 0, 6, 0, 7, 0, 8, 0, 10, 0, 12, 0};
TTypes<uint16>::ConstMatrix indices_mat(data, 8, 2);
for (uint16 i = 0; i < 8; ++i) {
EXPECT_EQ(i + 1, FindNextDenseRowStartIndex<uint16>(i, indices_mat));
}
}
{
int64_t data[] = {0, 0, 1, 0, 1, 0, 4, 0, 4, 0, 4, 0, 6, 0, 7,
0, 7, 0, 7, 0, 7, 0, 8, 0, 8, 0, 10, 0, 12, 0};
TTypes<int64_t>::ConstMatrix indices_mat(data, 15, 2);
EXPECT_EQ(3, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(1),
indices_mat));
EXPECT_EQ(3, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(2),
indices_mat));
EXPECT_EQ(6, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(3),
indices_mat));
EXPECT_EQ(6, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(4),
indices_mat));
EXPECT_EQ(14, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(13),
indices_mat));
EXPECT_EQ(15, FindNextDenseRowStartIndex<int64_t>(static_cast<int64_t>(14),
indices_mat));
}
}
::tensorflow::random::SimplePhilox& RandomPhilox() {
static auto* philox =
new ::tensorflow::random::PhiloxRandom(tensorflow::testing::RandomSeed());
static auto* rnd = new ::tensorflow::random::SimplePhilox(philox);
return *rnd;
}
template <typename SetType>
void FillIndicesWithRandomTuples(const TensorShape& shape, Tensor& indices) {
const int64_t nnz = indices.dim_size(0);
const int64_t ndims = indices.dim_size(1);
SetType indices_set;
int64_t count = 0;
while (count < nnz) {
std::vector<int64_t> candidate(ndims);
for (int64_t d = 0; d < ndims; ++d) {
candidate[d] = RandomPhilox().Uniform64(shape.dim_size(d));
}
auto it = indices_set.insert(std::move(candidate));
if (it.second) {
++count;
}
}
auto indices_mat = indices.matrix<int64_t>();
int64_t row = 0;
for (const std::vector<int64_t>& idxs : indices_set) {
for (int64_t col = 0; col < ndims; ++col) {
indices_mat(row, col) = idxs[col];
}
++row;
}
}
void GenerateRandomSparseTensor(int64_t max_nnz, const TensorShape& shape,
bool ordered, Tensor& output_indices,
Tensor& output_values, Tensor& output_shape) {
const int64_t ndims = shape.dims();
const int64_t nnz = std::min(shape.num_elements(), max_nnz);
output_indices = Tensor(DT_INT64, TensorShape({nnz, ndims}));
output_values = Tensor(DT_FLOAT, TensorShape({nnz}));
output_shape = Tensor(DT_INT64, TensorShape({ndims}));
if (ordered) {
FillIndicesWithRandomTuples<std::set<std::vector<int64_t>>>(shape,
output_indices);
} else {
FillIndicesWithRandomTuples<absl::flat_hash_set<std::vector<int64_t>>>(
shape, output_indices);
}
auto values_vec = output_values.vec<float>();
values_vec.setRandom();
auto shape_vec = output_shape.vec<int64_t>();
for (int i = 0; i < shape.dims(); ++i) {
shape_vec(i) = shape.dim_size(i);
}
}
using ValidateSparseTensorTest = ::testing::TestWithParam<IndexValidation>;
TEST_P(ValidateSparseTensorTest, ValidSparseTensorPasses) {
constexpr int kNumNonZeros = 1000;
const TensorShape kTensorShapes[] = {
{}, {3}, {4, 5}, {6, 7, 8}, {9, 10, 11, 12}};
const IndexValidation index_validation = GetParam();
const bool ordered = (index_validation == IndexValidation::kOrdered);
for (const TensorShape& test_shape : kTensorShapes) {
Tensor indices, values, shape;
GenerateRandomSparseTensor(kNumNonZeros, test_shape, ordered, indices,
values, shape);
TF_EXPECT_OK((ValidateSparseTensor<int64_t>(indices, values, shape,
index_validation)));
}
}
TEST_P(ValidateSparseTensorTest, InvalidIndicesRankFails) {
constexpr int kNumNonZeros = 1000;
constexpr int kNumDims = 3;
const TensorShape kInvalidIndicesShapes[] = {
{}, {kNumNonZeros}, {kNumNonZeros, kNumDims, 4}};
const IndexValidation index_validation = GetParam();
for (const TensorShape& invalid_shape : kInvalidIndicesShapes) {
const Tensor indices = Tensor(DT_INT64, invalid_shape);
const Tensor values = Tensor(DT_FLOAT, TensorShape({kNumNonZeros}));
const Tensor shape = Tensor(DT_INT64, TensorShape({kNumDims}));
EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape,
index_validation)),
StatusIs(error::INVALID_ARGUMENT,
MatchesRegex("Sparse indices must be rank 2 .*")));
}
}
TEST_P(ValidateSparseTensorTest, InvalidValuesRankFails) {
constexpr int kNumNonZeros = 1000;
constexpr int kNumDims = 3;
const TensorShape kInvalidValuesShapes[] = {{}, {kNumNonZeros, 2}};
const IndexValidation index_validation = GetParam();
for (const TensorShape& invalid_shape : kInvalidValuesShapes) {
const Tensor indices =
Tensor(DT_INT64, TensorShape({kNumNonZeros, kNumDims}));
const Tensor values = Tensor(DT_FLOAT, invalid_shape);
const Tensor shape = Tensor(DT_INT64, TensorShape({kNumDims}));
EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape,
index_validation)),
StatusIs(error::INVALID_ARGUMENT,
MatchesRegex("Sparse values must be rank 1 .*")));
}
}
TEST_P(ValidateSparseTensorTest, InvalidShapeRankFails) {
constexpr int kNumNonZeros = 1000;
constexpr int kNumDims = 3;
const IndexValidation index_validation = GetParam();
const TensorShape kInvalidShapeShapes[] = {{}, {kNumDims, 2}};
for (const TensorShape& invalid_shape : kInvalidShapeShapes) {
const Tensor indices =
Tensor(DT_INT64, TensorShape({kNumNonZeros, kNumDims}));
const Tensor values = Tensor(DT_FLOAT, TensorShape({kNumNonZeros}));
const Tensor shape = Tensor(DT_INT64, invalid_shape);
EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape,
index_validation)),
StatusIs(error::INVALID_ARGUMENT,
MatchesRegex("Sparse shape must be rank 1 .*")));
}
}
TEST_P(ValidateSparseTensorTest, IncompatibleShapesFails) {
constexpr int kNumNonZeros = 1000;
constexpr int kNumDims = 3;
const IndexValidation index_validation = GetParam();
const Tensor values = Tensor(DT_FLOAT, TensorShape({kNumNonZeros}));
const Tensor shape = Tensor(DT_INT64, TensorShape({kNumDims}));
{
const Tensor indices =
Tensor(DT_INT64, TensorShape({kNumNonZeros + 1, kNumDims}));
EXPECT_THAT((ValidateSparseTensor<int64_t>(indices, values, shape,
index_validation)),
StatusIs(error::INVALID_ARGUMENT,
MatchesRegex("Number of elements in indices .* and "
"values .* do not match")));
}
{
const Tensor indices =
Tensor(DT_INT64, TensorShape({kNumNonZeros, kNumDims + 1}));
EXPECT_THAT(
(ValidateSparseTensor<int64_t>(indices, values, shape,
index_validation)),
StatusIs(error::INVALID_ARGUMENT,
MatchesRegex("Index rank .* and shape rank .* do not match")));
}
}
TEST_P(ValidateSparseTensorTest, IndexOutOfBoundsFails) {
constexpr int kNumNonZeros = 1000;
constexpr int kNumTests = 100;
const IndexValidation index_validation = GetParam();
const bool ordered = (index_validation == IndexValidation::kOrdered);
const TensorShape kTensorShapes[] = {{3}, {4, 5}, {6, 7, 8}, {9, 10, 11, 12}};
for (const TensorShape& test_shape : kTensorShapes) {
Tensor indices, values, shape;
GenerateRandomSparseTensor(kNumNonZeros, test_shape, ordered, indices,
values, shape);
auto indices_mat = indices.matrix<int64_t>();
for (int test = 0; test < kNumTests; ++test) {
int64_t row = RandomPhilox().Uniform64(indices.dim_size(0));
int64_t dim = RandomPhilox().Uniform64(indices.dim_size(1));
int64_t old_val = indices_mat(row, dim);
for (int64_t val : {static_cast<int64_t>(-1), test_shape.dim_size(dim)}) {
indices_mat(row, dim) = val;
Status indices_valid = ValidateSparseTensor<int64_t>(
indices, values, shape, index_validation);
if (index_validation == IndexValidation::kNone) {
TF_EXPECT_OK(indices_valid);
} else {
EXPECT_THAT(
indices_valid,
StatusIs(error::INVALID_ARGUMENT,
MatchesRegex("Sparse index tuple .* is out of bounds")))
<< indices_mat;
}
}
indices_mat(row, dim) = old_val;
}
}
}
TEST_P(ValidateSparseTensorTest, IndexOutOfOrderFailsForOrderedValidation) {
constexpr int kNumNonZeros = 1000;
constexpr int kNumTests = 100;
const TensorShape kTensorShapes[] = {{3}, {4, 5}, {6, 7, 8}, {9, 10, 11, 12}};
const IndexValidation index_validation = GetParam();
const bool ordered = (index_validation == IndexValidation::kOrdered);
for (const TensorShape& test_shape : kTensorShapes) {
Tensor indices, values, shape;
GenerateRandomSparseTensor(kNumNonZeros, test_shape, ordered, indices,
values, shape);
auto indices_mat = indices.matrix<int64_t>();
const int64_t nnz = indices.dim_size(0);
const int64_t ndims = indices.dim_size(1);
for (int test = 0; test < kNumTests; ++test) {
int64_t row1 = RandomPhilox().Uniform64(nnz);
int64_t row2;
do {
row2 = RandomPhilox().Uniform64(nnz);
} while (row1 == row2);
for (int dim = 0; dim < ndims; ++dim) {
std::swap(indices_mat(row1, dim), indices_mat(row2, dim));
}
Status indices_valid = ValidateSparseTensor<int64_t>(
indices, values, shape, index_validation);
if (ordered) {
EXPECT_THAT(
indices_valid,
StatusIs(error::INVALID_ARGUMENT,
MatchesRegex("Sparse index tuple .* is out of order")));
} else {
TF_EXPECT_OK(indices_valid);
}
for (int dim = 0; dim < ndims; ++dim) {
std::swap(indices_mat(row1, dim), indices_mat(row2, dim));
}
}
}
}
INSTANTIATE_TEST_SUITE_P(
ValidateSparseTensorTestSuite, ValidateSparseTensorTest,
::testing::Values(IndexValidation::kNone, IndexValidation::kOrdered,
IndexValidation::kUnordered),
[](const ::testing::TestParamInfo<ValidateSparseTensorTest::ParamType>&
info) {
switch (info.param) {
case IndexValidation::kNone:
return "None";
case IndexValidation::kUnordered:
return "Unordered";
case IndexValidation::kOrdered:
return "Ordered";
}
});
void BM_ValidateSparseTensor(::testing::benchmark::State& state,
TensorShape dense_shape,
IndexValidation index_validation) {
Tensor indices, values, shape;
const int64_t nnz = state.range(0);
GenerateRandomSparseTensor(nnz, dense_shape, true, indices,
values, shape);
for (auto s : state) {
::benchmark::DoNotOptimize(ValidateSparseTensor<int64_t>(
indices, values, shape, index_validation));
}
}
BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Ordered1024, TensorShape({1024}),
IndexValidation::kOrdered)
->Range(8, 512);
BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Unordered1024, TensorShape({1024}),
IndexValidation::kUnordered)
->Range(8, 512);
BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Ordered1024x1024,
TensorShape({1024, 1024}), IndexValidation::kOrdered)
->Range(8, 1024);
BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Unordered1024x1024,
TensorShape({1024, 1024}), IndexValidation::kUnordered)
->Range(8, 1024);
BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Ordered1024x1024x1024,
TensorShape({1024, 1024, 1024}), IndexValidation::kOrdered)
->Range(8, 1024 * 32);
BENCHMARK_CAPTURE(BM_ValidateSparseTensor, Unordered1024x1024x1024,
TensorShape({1024, 1024, 1024}), IndexValidation::kUnordered)
->Range(8, 1024 * 32);
}
}
} |
1,510 | cpp | tensorflow/tensorflow | random_index_shuffle | tensorflow/core/kernels/random_index_shuffle.cc | tensorflow/core/kernels/random_index_shuffle_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_RANDOM_INDEX_SHUFFLE_H_
#define TENSORFLOW_CORE_KERNELS_RANDOM_INDEX_SHUFFLE_H_
#include <array>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace random {
uint64_t index_shuffle(const uint64_t index, const std::array<uint32_t, 3>& key,
const uint64_t max_index, const int32_t rounds);
}
}
#endif
#include "tensorflow/core/kernels/random_index_shuffle.h"
#include <assert.h>
#include <algorithm>
#include <array>
#include <bitset>
#include <cmath>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace random {
constexpr int kMinBlockSize = 16;
namespace impl {
#define ROTL(x, r, W) (((x) << (r)) | (x >> (W - (r))))
#define ROTR(x, r, W) (((x) >> (r)) | ((x) << (W - (r))))
#define SIMON_F(x, W) ((ROTL(x, 1, W) & ROTL(x, 8, W) ^ ROTL(x, 2, W)))
#define SIMON_Rx2(x, y, k1, k2, W) \
(y ^= SIMON_F(x, W), y ^= k1, x ^= SIMON_F(y, W), x ^= k2)
template <int W>
std::vector<std::bitset<W>> simon_key_schedule(
const std::array<uint32_t, 3>& key, const int32_t rounds) {
static_assert(W >= 8, "Minimum word size is 8 bits.");
const auto c = std::bitset<W>(0xfffffffc);
auto z = std::bitset<W>(0x7369f885192c0ef5LL);
std::vector<std::bitset<W>> rk({key[0], key[1], key[2]});
rk.reserve(rounds);
for (int i = 3; i < rounds; i++) {
rk.push_back(c ^ (z & std::bitset<W>(1)) ^ rk[i - 3] ^
ROTR(rk[i - 1], 3, W) ^ ROTR(rk[i - 1], 4, W));
z >>= 1;
}
return rk;
}
template <int W>
uint64_t simon_encrypt(const uint64_t value,
const std::vector<std::bitset<W>>& round_keys) {
static_assert(W >= 8, "Minimum word size is 8 bits.");
std::bitset<W> left(value >> W);
std::bitset<W> right(value);
for (int i = 0; i < round_keys.size();) {
SIMON_Rx2(right, left, round_keys[i++], round_keys[i++], W);
}
return (left.to_ullong() << W) | right.to_ullong();
}
template <int B>
uint64_t index_shuffle(const uint64_t index, const std::array<uint32_t, 3>& key,
const uint64_t max_index, const int32_t rounds) {
const auto round_keys = simon_key_schedule<B / 2>(key, rounds);
uint64_t new_index = index;
while (true) {
new_index = simon_encrypt<B / 2>(new_index, round_keys);
if (new_index <= max_index) {
return new_index;
}
}
}
#undef ROTL
#undef ROTR
#undef SIMON_F
#undef SIMON_RxC
}
uint64_t index_shuffle(const uint64_t index, const std::array<uint32_t, 3>& key,
const uint64_t max_index, const int32_t rounds) {
int block_size = static_cast<int>(std::ceil(std::log2(max_index)));
block_size = std::max(block_size + block_size % 2, kMinBlockSize);
assert(block_size > 0 && block_size % 2 == 0 && block_size <= 64);
assert(rounds >= 4 && rounds % 2 == 0);
#define HANDLE_BLOCK_SIZE(B) \
case B: \
return impl::index_shuffle<B>(index, key, max_index, rounds);
switch (block_size) {
HANDLE_BLOCK_SIZE(16);
HANDLE_BLOCK_SIZE(18);
HANDLE_BLOCK_SIZE(20);
HANDLE_BLOCK_SIZE(22);
HANDLE_BLOCK_SIZE(24);
HANDLE_BLOCK_SIZE(26);
HANDLE_BLOCK_SIZE(28);
HANDLE_BLOCK_SIZE(30);
HANDLE_BLOCK_SIZE(32);
HANDLE_BLOCK_SIZE(34);
HANDLE_BLOCK_SIZE(36);
HANDLE_BLOCK_SIZE(38);
HANDLE_BLOCK_SIZE(40);
HANDLE_BLOCK_SIZE(42);
HANDLE_BLOCK_SIZE(44);
HANDLE_BLOCK_SIZE(46);
HANDLE_BLOCK_SIZE(48);
HANDLE_BLOCK_SIZE(50);
HANDLE_BLOCK_SIZE(52);
HANDLE_BLOCK_SIZE(54);
HANDLE_BLOCK_SIZE(56);
HANDLE_BLOCK_SIZE(58);
HANDLE_BLOCK_SIZE(60);
HANDLE_BLOCK_SIZE(62);
default:
return impl::index_shuffle<64>(index, key, max_index, rounds);
}
#undef HANDLE_BLOCK_SIZE
}
}
} | #include "tensorflow/core/kernels/random_index_shuffle.h"
#include <array>
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace random {
namespace {
class RandomIndexShuffleTest : public ::testing::TestWithParam<uint64_t> {
public:
uint64_t GetMaxValue() const { return GetParam(); }
};
TEST_P(RandomIndexShuffleTest, Bijection) {
const std::array<uint32, 3>& key = {42, 73, 1991};
const uint64_t max_value = GetMaxValue();
std::vector<bool> seen(max_value + 1, false);
for (uint64_t value = 0; value <= max_value; ++value) {
const uint64 output_value =
index_shuffle(value, key, max_value, 4);
EXPECT_GE(output_value, 0);
EXPECT_LE(output_value, max_value);
EXPECT_FALSE(seen[output_value]);
seen[output_value] = true;
}
}
INSTANTIATE_TEST_SUITE_P(MaxValueTests, RandomIndexShuffleTest,
::testing::Values(285, 17, 23495, 499'000));
}
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} |
1,511 | cpp | tensorflow/tensorflow | ops_testutil | tensorflow/core/kernels/ops_testutil.cc | tensorflow/core/kernels/ops_testutil_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_OPS_TESTUTIL_H_
#define TENSORFLOW_CORE_KERNELS_OPS_TESTUTIL_H_
#include <functional>
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/type_index.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
namespace tensorflow {
namespace test {
void SetOutputAttrs(OpKernelContext::Params* params,
std::vector<AllocatorAttributes>* attrs);
}
class OpsTestBase : public ::testing::Test {
public:
OpsTestBase();
~OpsTestBase() override;
void SetDevice(const DeviceType& device_type, std::unique_ptr<Device> device);
void set_node_def(const NodeDef& node_def);
NodeDef* node_def();
Status InitOp();
Status InitOpWithGraphVersion(int graph_def_version);
template <typename T>
void AddInput(const TensorShape& shape, std::function<T(int)> input_mapping) {
test::FillFn(AddInput(DataTypeToEnum<T>::v(), shape), input_mapping);
}
template <typename T>
void AddInputFromArray(const TensorShape& shape,
const gtl::ArraySlice<T> data) {
test::FillValues<T>(AddInput(DataTypeToEnum<T>::v(), shape), data);
}
template <typename T, typename SrcType>
void AddInputFromList(const TensorShape& shape,
std::initializer_list<SrcType> data) {
test::FillValues<T>(AddInput(DataTypeToEnum<T>::v(), shape), data);
}
template <typename T>
void AddResourceInput(const string& container, const string& name,
T* resource) {
CHECK_GT(input_types_.size(), inputs_.size())
<< "Adding more inputs than types; perhaps you need to call MakeOp";
ResourceMgr* rm = device_->resource_manager();
std::string container_name =
container.empty() ? rm->default_container() : container;
EXPECT_TRUE(rm->Create(container_name, name, resource).ok());
AddResourceInputInternal(container_name, name, TypeIndex::Make<T>());
}
Status RunOpKernel();
const Tensor& GetInput(int input_index) const;
TensorValue mutable_input(int input_index);
Tensor* GetOutput(int output_index);
Allocator* allocator();
OpKernel* op_kernel();
const DataTypeVector& output_types() const;
void set_session_metadata(SessionMetadata session_metadata) {
session_metadata_ = std::move(session_metadata);
}
const SessionMetadata& session_metadata() const { return session_metadata_; }
protected:
void CreateContext();
Tensor* AddInput(DataType dtype, const TensorShape& shape);
void AddResourceInputInternal(const std::string& container_name,
const std::string& name,
const TypeIndex& type_index);
std::unique_ptr<DeviceMgr> device_mgr_;
Device* device_;
Allocator* allocator_;
std::unique_ptr<OpKernel> kernel_;
std::unique_ptr<ScopedStepContainer> step_container_;
NodeDef node_def_;
DataTypeVector input_types_;
DeviceType device_type_;
mutex lock_for_refs_;
absl::InlinedVector<TensorValue, 4> inputs_;
std::vector<Tensor*> tensors_;
std::vector<Tensor*> managed_outputs_;
std::vector<AllocatorAttributes> out_alloc_attrs_;
checkpoint::TensorSliceReaderCacheWrapper slice_reader_cache_wrapper_;
CancellationManager default_cancellation_manager_;
std::unique_ptr<OpKernelContext::Params> params_;
std::unique_ptr<OpKernelContext> context_;
std::unique_ptr<Allocator> managed_allocator_;
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
std::unique_ptr<thread::ThreadPool> thread_pool_;
SessionMetadata session_metadata_;
private:
OpsTestBase(const OpsTestBase&) = delete;
void operator=(const OpsTestBase&) = delete;
};
}
#endif
#include "tensorflow/core/framework/node_properties.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#include "tensorflow/core/common_runtime/gpu/gpu_managed_allocator.h"
#endif
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/type_index.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
namespace tensorflow {
namespace test {
void SetOutputAttrs(OpKernelContext::Params* params,
std::vector<AllocatorAttributes>* attrs) {
attrs->clear();
for (int index = 0; index < params->op_kernel->num_outputs(); index++) {
AllocatorAttributes attr;
const bool on_host =
(params->op_kernel->output_memory_types()[index] == HOST_MEMORY);
attr.set_on_host(on_host);
attrs->push_back(attr);
}
params->output_attr_array = attrs->data();
}
}
OpsTestBase::OpsTestBase() : device_type_(DEVICE_CPU) {
auto device = DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0");
CHECK(device) << "Could not create CPU device";
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), "default", 1);
device_ = device.get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device));
allocator_ = device_->GetAllocator(AllocatorAttributes());
flib_def_ = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(),
FunctionDefLibrary());
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions());
}
OpsTestBase::~OpsTestBase() {
for (auto& temp : tensors_) {
delete temp;
}
for (auto& temp : managed_outputs_) {
delete temp;
}
tensors_.clear();
managed_outputs_.clear();
context_.reset(nullptr);
params_.reset(nullptr);
}
void OpsTestBase::SetDevice(const DeviceType& device_type,
std::unique_ptr<Device> device) {
CHECK(device_) << "No device provided";
device_ = device.get();
device_type_ = device_type;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (device_type == DEVICE_GPU) {
managed_allocator_.reset(new GpuManagedAllocator());
allocator_ = managed_allocator_.get();
} else {
managed_allocator_.reset();
allocator_ = device_->GetAllocator(AllocatorAttributes());
}
#else
CHECK_NE(device_type, DEVICE_GPU)
<< "Requesting GPU on binary compiled without GOOGLE_CUDA or "
"TENSORFLOW_USE_ROCM.";
allocator_ = device_->GetAllocator(AllocatorAttributes());
#endif
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device));
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(),
thread_pool_.get());
}
void OpsTestBase::set_node_def(const NodeDef& node_def) {
node_def_.CopyFrom(node_def);
}
NodeDef* OpsTestBase::node_def() { return &node_def_; }
Status OpsTestBase::InitOp() {
return InitOpWithGraphVersion(TF_GRAPH_DEF_VERSION);
}
Status OpsTestBase::InitOpWithGraphVersion(int graph_def_version) {
std::shared_ptr<const NodeProperties> props;
TF_RETURN_IF_ERROR(NodeProperties::CreateFromNodeDef(
node_def_, OpRegistry::Global(), &props));
OpKernel* kernel;
TF_RETURN_IF_ERROR(CreateOpKernel(
device_type_, device_, allocator(), nullptr,
device_->resource_manager(), props, graph_def_version, &kernel));
kernel_.reset(kernel);
input_types_ = kernel_->input_types();
return absl::OkStatus();
}
static std::function<void(std::function<void()>)>* GetDefaultRunner() {
static auto* const default_runner =
new std::function<void(std::function<void()>)>(
[](const std::function<void()>& f) { f(); });
return default_runner;
}
void OpsTestBase::CreateContext() {
context_.reset(nullptr);
for (auto& temp : managed_outputs_) {
delete temp;
}
managed_outputs_.clear();
managed_outputs_.resize(0);
params_.reset(new OpKernelContext::Params);
params_->device = device_;
params_->frame_iter = FrameAndIter(0, 0);
params_->inputs = inputs_;
params_->op_kernel = kernel_.get();
step_container_.reset(new ScopedStepContainer(0, [](const string&) {}));
params_->step_container = step_container_.get();
test::SetOutputAttrs(params_.get(), &out_alloc_attrs_);
params_->slice_reader_cache = &slice_reader_cache_wrapper_;
params_->cancellation_manager = &default_cancellation_manager_;
params_->resource_manager = device_->resource_manager();
params_->function_library = pflr_->GetFLR(device_->name());
params_->runner = GetDefaultRunner();
params_->session_metadata = &session_metadata();
context_.reset(new OpKernelContext(params_.get()));
}
Status OpsTestBase::RunOpKernel() {
CreateContext();
device_->Compute(kernel_.get(), context_.get());
return context_->status();
}
const Tensor& OpsTestBase::GetInput(int input_index) const {
CHECK_LT(input_index, context_->num_inputs());
CHECK(!IsRefType(context_->input_dtype(input_index)));
return context_->input(input_index);
}
TensorValue OpsTestBase::mutable_input(int input_index) {
CHECK_LT(input_index, inputs_.size());
return inputs_[input_index];
}
Tensor* OpsTestBase::GetOutput(int output_index) {
CHECK_LT(output_index, context_->num_outputs());
Tensor* output = context_->mutable_output(output_index);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (device_type_ == DEVICE_GPU) {
managed_outputs_.resize(context_->num_outputs());
if (!managed_outputs_[output_index]) {
Tensor* managed_output =
new Tensor(allocator(), output->dtype(), output->shape());
auto src = output->tensor_data();
auto dst = managed_output->tensor_data();
context_->eigen_gpu_device().memcpyDeviceToHost(
const_cast<char*>(dst.data()), src.data(), src.size());
context_->eigen_gpu_device().synchronize();
managed_outputs_[output_index] = managed_output;
}
output = managed_outputs_[output_index];
}
#endif
return output;
}
Allocator* OpsTestBase::allocator() { return allocator_; }
OpKernel* OpsTestBase::op_kernel() { return kernel_.get(); }
const DataTypeVector& OpsTestBase::output_types() const {
return kernel_->output_types();
}
Tensor* OpsTestBase::AddInput(DataType dtype, const TensorShape& shape) {
CHECK_GT(input_types_.size(), inputs_.size())
<< "Adding more inputs than types; perhaps you need to call MakeOp";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(allocator(), dtype, shape);
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]), dtype);
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], dtype);
inputs_.push_back({nullptr, input});
}
return input;
}
void OpsTestBase::AddResourceInputInternal(const std::string& container_name,
const std::string& name,
const TypeIndex& type_index) {
ResourceHandle handle;
handle.set_device(device_->name());
handle.set_container(container_name);
handle.set_name(name);
handle.set_hash_code(type_index.hash_code());
handle.set_maybe_type_name(type_index.name());
Tensor* input = new Tensor(allocator(), DT_RESOURCE, TensorShape({}));
input->scalar<ResourceHandle>()() = handle;
tensors_.push_back(input);
inputs_.push_back({nullptr, input});
}
} | #include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
TEST_F(OpsTestBase, ScopedStepContainer) {
TF_EXPECT_OK(NodeDefBuilder("identity", "Identity")
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {""});
TF_EXPECT_OK(RunOpKernel());
EXPECT_TRUE(step_container_ != nullptr);
}
TEST_F(OpsTestBase, ResourceVariableInput) {
TF_EXPECT_OK(NodeDefBuilder("identity", "Identity")
.Input(FakeInput(DT_RESOURCE))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
Var* var = new Var(DT_STRING);
AddResourceInput("" , "Test" , var);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ(output->dtype(), DT_RESOURCE);
}
} |
1,512 | cpp | tensorflow/tensorflow | sparse_matmul_op | tensorflow/core/kernels/sparse_matmul_op.cc | tensorflow/core/kernels/sparse_matmul_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_SPARSE_MATMUL_OP_H_
#define TENSORFLOW_CORE_KERNELS_SPARSE_MATMUL_OP_H_
#include "Eigen/Core"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/types.h"
#if defined(PLATFORM_WINDOWS)
#include "tsl/platform/windows/intrinsics_port.h"
#endif
namespace Eigen {
namespace internal {
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pexpand_bf16_l(const Packet& from) {
tensorflow::uint32 tmp;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
tmp = (reinterpret_cast<const tensorflow::uint32&>(from)) & 0xffff0000;
#else
tmp = (reinterpret_cast<const tensorflow::uint32&>(from) << 16) & 0xffff0000;
#endif
return reinterpret_cast<const float&>(tmp);
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pexpand_bf16_u(const Packet& from) {
tensorflow::uint32 tmp;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
tmp = (reinterpret_cast<const tensorflow::uint32&>(from) << 16) & 0xffff0000;
#else
tmp = (reinterpret_cast<const tensorflow::uint32&>(from)) & 0xffff0000;
#endif
return reinterpret_cast<const float&>(tmp);
}
#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) || \
defined(EIGEN_VECTORIZE_NEON) || defined(EIGEN_VECTORIZE_ZVECTOR)
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet4f pexpand_bf16_l(const Packet4f& from) {
float r[4];
tensorflow::uint32 p[4];
pstoreu(r, from);
tensorflow::uint32* ir = reinterpret_cast<tensorflow::uint32*>(r);
p[0] = (ir[0] << 16) & 0xffff0000;
p[1] = ir[0] & 0xffff0000;
p[2] = (ir[1] << 16) & 0xffff0000;
p[3] = ir[1] & 0xffff0000;
return ploadu<Packet4f>(reinterpret_cast<float*>(p));
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet4f pexpand_bf16_u(const Packet4f& from) {
float r[4];
tensorflow::uint32 p[4];
pstoreu(r, from);
tensorflow::uint32* ir = reinterpret_cast<tensorflow::uint32*>(r);
p[0] = (ir[2] << 16) & 0xffff0000;
p[1] = ir[2] & 0xffff0000;
p[2] = (ir[3] << 16) & 0xffff0000;
p[3] = ir[3] & 0xffff0000;
return ploadu<Packet4f>(reinterpret_cast<float*>(p));
}
#endif
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pinterleave4x64(const Packet& from) {
return from;
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pbroadcast_first(const Packet& a) {
return a;
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pbroadcast_second(const Packet& a) {
assert(false && "Not applicable to Scalar Values");
return a;
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pbroadcast_third(const Packet& a) {
assert(false && "Not applicable to Scalar Values");
return a;
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pbroadcast_fourth(const Packet& a) {
assert(false && "Not applicable to Scalar Values");
return a;
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pload4bf16(
const typename unpacket_traits<Packet>::type* from) {
assert(false && "Not applicable to Scalar Values");
return Packet();
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet pload2bf16(
const typename unpacket_traits<Packet>::type* from) {
assert(false && "Not applicable to Scalar Values");
return Packet();
}
#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX) || \
defined(EIGEN_VECTORIZE_NEON) || defined(EIGEN_VECTORIZE_ZVECTOR)
template <>
EIGEN_STRONG_INLINE Packet4f pload4bf16<Packet4f>(const float* from) {
tensorflow::uint32 p[4];
const tensorflow::uint32* ir =
reinterpret_cast<const tensorflow::uint32*>(from);
p[0] = (ir[0] << 16) & 0xffff0000;
p[1] = ir[0] & 0xffff0000;
p[2] = (ir[1] << 16) & 0xffff0000;
p[3] = ir[1] & 0xffff0000;
return ploadu<Packet4f>(reinterpret_cast<float*>(p));
}
template <>
EIGEN_STRONG_INLINE Packet4f pload2bf16<Packet4f>(const float* from) {
tensorflow::uint32 p[4];
const tensorflow::uint32* ir =
reinterpret_cast<const tensorflow::uint32*>(from);
p[0] = (ir[0] << 16) & 0xffff0000;
p[1] = ir[0] & 0xffff0000;
p[2] = (ir[0] << 16) & 0xffff0000;
p[3] = ir[0] & 0xffff0000;
return ploadu<Packet4f>(reinterpret_cast<float*>(p));
}
#endif
#if defined(EIGEN_VECTORIZE_NEON)
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_first<Packet4f>(const Packet4f& a) {
return pset1<Packet4f>(pfirst(a));
}
template <>
EIGEN_STRONG_INLINE Packet2f pbroadcast_first<Packet2f>(const Packet2f& a) {
return pset1<Packet2f>(pfirst(a));
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_second<Packet4f>(const Packet4f& a) {
return pset1<Packet4f>(vgetq_lane_f32(a, 1));
}
template <>
EIGEN_STRONG_INLINE Packet2f pbroadcast_second<Packet2f>(const Packet2f& a) {
return pset1<Packet2f>(vget_lane_f32(a, 1));
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_third<Packet4f>(const Packet4f& a) {
return pset1<Packet4f>(vgetq_lane_f32(a, 2));
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_fourth<Packet4f>(const Packet4f& a) {
return pset1<Packet4f>(vgetq_lane_f32(a, 3));
}
#endif
#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_first<Packet4f>(const Packet4f& a) {
return vec_splat(a, 0);
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_second<Packet4f>(const Packet4f& a) {
return vec_splat(a, 1);
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_third<Packet4f>(const Packet4f& a) {
return vec_splat(a, 2);
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_fourth<Packet4f>(const Packet4f& a) {
return vec_splat(a, 3);
}
#endif
#ifdef EIGEN_VECTORIZE_SSE2
template <>
EIGEN_STRONG_INLINE Packet4f pinterleave4x64<Packet4f>(const Packet4f& from) {
return from;
}
template <>
EIGEN_STRONG_INLINE Packet4f pload4bf16<Packet4f>(const float* from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castpd_si128(_mm_load_pd1((const double*)from));
return _mm_castsi128_ps(_mm_unpacklo_epi16(zero, tmp));
}
template <>
EIGEN_STRONG_INLINE Packet4f pload2bf16<Packet4f>(const float* from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castps_si128(_mm_load_ps1(from));
return _mm_castsi128_ps(_mm_unpacklo_epi16(zero, tmp));
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet4f pexpand_bf16_l(const Packet4f& from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castps_si128(from);
return _mm_castsi128_ps(_mm_unpacklo_epi16(zero, tmp));
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet4f pexpand_bf16_u(const Packet4f& from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castps_si128(from);
return _mm_castsi128_ps(_mm_unpackhi_epi16(zero, tmp));
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_first<Packet4f>(const Packet4f& a) {
return _mm_set1_ps(pfirst<Packet4f>(a));
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_second<Packet4f>(const Packet4f& a) {
return _mm_set1_ps(_mm_cvtss_f32(_mm_shuffle_ps(a, a, 1)));
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_third<Packet4f>(const Packet4f& a) {
return _mm_set1_ps(_mm_cvtss_f32(_mm_shuffle_ps(a, a, 2)));
}
template <>
EIGEN_STRONG_INLINE Packet4f pbroadcast_fourth<Packet4f>(const Packet4f& a) {
return _mm_set1_ps(_mm_cvtss_f32(_mm_shuffle_ps(a, a, 3)));
}
#endif
#ifdef EIGEN_VECTORIZE_AVX512
template <>
EIGEN_STRONG_INLINE Packet16f
pbroadcast_first<Packet16f>(const Packet16f& a_in) {
Packet4f a = _mm512_castps512_ps128(a_in);
return _mm512_broadcastss_ps(a);
}
template <>
EIGEN_STRONG_INLINE Packet16f
pbroadcast_second<Packet16f>(const Packet16f& a_in) {
Packet4f a = _mm512_castps512_ps128(a_in);
return _mm512_broadcastss_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1)));
}
template <>
EIGEN_STRONG_INLINE Packet16f
pbroadcast_third<Packet16f>(const Packet16f& a_in) {
Packet4f a = _mm512_castps512_ps128(a_in);
return _mm512_broadcastss_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2)));
}
template <>
EIGEN_STRONG_INLINE Packet16f
pbroadcast_fourth<Packet16f>(const Packet16f& a_in) {
Packet4f a = _mm512_castps512_ps128(a_in);
return _mm512_broadcastss_ps(_mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3)));
}
template <>
EIGEN_STRONG_INLINE Packet8d pbroadcast_first<Packet8d>(const Packet8d& a_in) {
Packet2d a = _mm512_castpd512_pd128(a_in);
return _mm512_broadcastsd_pd(a);
}
template <>
EIGEN_STRONG_INLINE Packet8d pbroadcast_second<Packet8d>(const Packet8d& a_in) {
Packet2d a = _mm_permute_pd(_mm512_castpd512_pd128(a_in), 3);
return _mm512_broadcastsd_pd(a);
}
template <>
EIGEN_STRONG_INLINE Packet8d pbroadcast_third<Packet8d>(const Packet8d& a_in) {
Packet2d a = _mm256_extractf128_pd(_mm512_castpd512_pd256(a_in), 1);
return _mm512_broadcastsd_pd(a);
}
template <>
EIGEN_STRONG_INLINE Packet8d pbroadcast_fourth<Packet8d>(const Packet8d& a_in) {
Packet2d a =
_mm_permute_pd(_mm256_extractf128_pd(_mm512_castpd512_pd256(a_in), 1), 3);
return _mm512_broadcastsd_pd(a);
}
template <>
EIGEN_STRONG_INLINE Packet16i
pbroadcast_first<Packet16i>(const Packet16i& a_in) {
Packet4i a = _mm512_castsi512_si128(a_in);
return _mm512_broadcastd_epi32(a);
}
template <>
EIGEN_STRONG_INLINE Packet16i
pbroadcast_second<Packet16i>(const Packet16i& a_in) {
Packet4i a = _mm512_castsi512_si128(a_in);
return _mm512_broadcastd_epi32(_mm_shuffle_epi32(a, _MM_SHUFFLE(1, 1, 1, 1)));
}
template <>
EIGEN_STRONG_INLINE Packet16i
pbroadcast_third<Packet16i>(const Packet16i& a_in) {
Packet4i a = _mm512_castsi512_si128(a_in);
return _mm512_broadcastd_epi32(_mm_shuffle_epi32(a, _MM_SHUFFLE(2, 2, 2, 2)));
}
template <>
EIGEN_STRONG_INLINE Packet16i
pbroadcast_fourth<Packet16i>(const Packet16i& a_in) {
Packet4i a = _mm512_castsi512_si128(a_in);
return _mm512_broadcastd_epi32(_mm_shuffle_epi32(a, _MM_SHUFFLE(3, 3, 3, 3)));
}
#endif
#ifdef EIGEN_VECTORIZE_AVX
template <>
EIGEN_STRONG_INLINE Packet8f pinterleave4x64<Packet8f>(const Packet8f& from) {
#ifdef EIGEN_VECTORIZE_AVX2
return _mm256_castsi256_ps(_mm256_permute4x64_epi64(_mm256_castps_si256(from),
_MM_SHUFFLE(3, 1, 2, 0)));
#else
auto tmp1 = _mm256_extract_epi32(_mm256_castps_si256(from), 2);
auto tmp2 = _mm256_extract_epi32(_mm256_castps_si256(from), 3);
auto tmp3 = _mm256_extract_epi32(_mm256_castps_si256(from), 4);
auto tmp4 = _mm256_extract_epi32(_mm256_castps_si256(from), 5);
auto tmp5 = _mm256_insert_epi32(_mm256_castps_si256(from), tmp1, 4);
tmp5 = _mm256_insert_epi32(tmp5, tmp2, 5);
tmp5 = _mm256_insert_epi32(tmp5, tmp3, 2);
tmp5 = _mm256_insert_epi32(tmp5, tmp4, 3);
return _mm256_castsi256_ps(tmp5);
#endif
}
template <>
EIGEN_STRONG_INLINE Packet8f pload4bf16<Packet8f>(const float* from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castpd_si128(_mm_load_pd1((const double*)from));
return _mm256_castps128_ps256(
_mm_castsi128_ps(_mm_unpacklo_epi16(zero, tmp)));
}
template <>
EIGEN_STRONG_INLINE Packet8f pload2bf16<Packet8f>(const float* from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castps_si128(_mm_load_ps1(from));
return _mm256_castps128_ps256(
_mm_castsi128_ps(_mm_unpacklo_epi16(zero, tmp)));
}
#ifdef EIGEN_VECTORIZE_AVX512
template <>
EIGEN_STRONG_INLINE Packet16f pload4bf16<Packet16f>(const float* from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castpd_si128(_mm_load_pd1((const double*)from));
return _mm512_castps128_ps512(
_mm_castsi128_ps(_mm_unpacklo_epi16(zero, tmp)));
}
template <>
EIGEN_STRONG_INLINE Packet16f pload2bf16<Packet16f>(const float* from) {
__m128i zero = _mm_setzero_si128();
__m128i tmp = _mm_castps_si128(_mm_load_ps1(from));
return _mm512_castps128_ps512(
_mm_castsi128_ps(_mm_unpacklo_epi16(zero, tmp)));
}
#endif
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet8f pexpand_bf16_l(const Packet8f& from) {
#ifdef EIGEN_VECTORIZE_AVX2
__m256i zero = _mm256_setzero_si256();
__m256i tmp = _mm256_castps_si256(from);
return _mm256_castsi256_ps(_mm256_unpacklo_epi16(zero, tmp));
#else
__m128i zero = _mm_setzero_si128();
__m128i low = _mm_castps_si128(_mm256_extractf128_ps(from, 0));
__m128i res_l = _mm_unpacklo_epi16(zero, low);
__m128i high = _mm_castps_si128(_mm256_extractf128_ps(from, 1));
__m128i res_h = _mm_unpacklo_epi16(zero, high);
__m256 res = _mm256_castps128_ps256(_mm_castsi128_ps(res_l));
res = _mm256_insertf128_ps(res, _mm_castsi128_ps(res_h), 1);
return res;
#endif
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet8f pexpand_bf16_u(const Packet8f& from) {
#ifdef EIGEN_VECTORIZE_AVX2
__m256i zero = _mm256_setzero_si256();
__m256i tmp = _mm256_castps_si256(from);
return _mm256_castsi256_ps(_mm256_unpackhi_epi16(zero, tmp));
#else
__m128i zero = _mm_setzero_si128();
__m128i low = _mm_castps_si128(_mm256_extractf128_ps(from, 0));
__m128i res_l = _mm_unpackhi_epi16(zero, low);
__m128i high = _mm_castps_si128(_mm256_extractf128_ps(from, 1));
__m128i res_h = _mm_unpackhi_epi16(zero, high);
__m256 res = _mm256_castps128_ps256(_mm_castsi128_ps(res_l));
res = _mm256_insertf128_ps(res, _mm_castsi128_ps(res_h), 1);
return res;
#endif
}
template <>
EIGEN_STRONG_INLINE Packet8f pbroadcast_first<Packet8f>(const Packet8f& a) {
return _mm256_set1_ps(pfirst<Packet8f>(a));
}
template <>
EIGEN_STRONG_INLINE Packet8f pbroadcast_second<Packet8f>(const Packet8f& a) {
return _mm256_set1_ps(
_mm_cvtss_f32(_mm256_castps256_ps128(_mm256_permute_ps(a, 1))));
}
template <>
EIGEN_STRONG_INLINE Packet8f pbroadcast_third<Packet8f>(const Packet8f& a) {
return _mm256_set1_ps(
_mm_cvtss_f32(_mm256_castps256_ps128(_mm256_permute_ps(a, 2))));
}
template <>
EIGEN_STRONG_INLINE Packet8f pbroadcast_fourth<Packet8f>(const Packet8f& a) {
return _mm256_set1_ps(
_mm_cvtss_f32(_mm256_castps256_ps128(_mm256_permute_ps(a, 3))));
}
#endif
#ifdef EIGEN_VECTORIZE_AVX512
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet16f pexpand_bf16_l(const Packet16f& from) {
return _mm512_castsi512_ps(_mm512_slli_epi32(
_mm512_cvtepu16_epi32(_mm512_castsi512_si256(_mm512_castps_si512(from))),
16));
}
template <typename Packet>
EIGEN_DEVICE_FUNC inline Packet16f pexpand_bf16_u(const Packet16f& from) {
Packet16i tmp = _mm512_castps_si512(from);
Packet16i tmp2 = _mm512_alignr_epi32(tmp, tmp, 8);
return _mm512_castsi512_ps(_mm512_slli_epi32(
_mm512_cvtepu16_epi32(_mm512_castsi512_si256(tmp2)), 16));
}
#endif
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/sparse_matmul_op.h"
#include <map>
#include <memory>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/bfloat16.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
#include "xla/tsl/framework/contraction/eigen_contraction_kernel.h"
#endif
#define ALWAYS_INLINE EIGEN_ALWAYS_INLINE
namespace tensorflow {
namespace {
template <typename T>
using BasicMatrix = Eigen::Tensor<T, 2, Eigen::RowMajor>;
template <typename T>
using BasicMatrixMap =
Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Aligned>;
using Matrix = BasicMatrix<float>;
using MatrixMap = BasicMatrixMap<float>;
using CPUDevice = Eigen::ThreadPoolDevice;
using DSizes = Eigen::DSizes<Eigen::DenseIndex, 2>;
inline Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<0>>
dsizes_00() {
return Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<0>>();
}
inline Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<0>>
dsizes_10() {
return Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<0>>();
}
const int K = 64;
const int M = 64;
const int N = 128;
template <typename T>
struct SparseSlice {
using ConstMatrixMap = BasicMatrixMap<const T>;
public:
struct Index3 {
Index3(uint8 m, uint8 k1, uint8 k2, uint8 k3)
: m(m), k1(k1), k2(k2), k3(k3) {}
uint8 m;
uint8 k1;
uint8 k2;
uint8 k3;
};
struct Index {
Index(uint8 m, uint8 k) : m(m), k(k) {}
uint8 m;
uint8 k;
};
SparseSlice(int nrows, int ncols, int bsize)
: num_rows(nrows), num_cols(ncols), block_size(bsize) {
DCHECK_LE(nrows, 256);
DCHECK_LE(block_size, 256);
}
template <bool Transpose = false>
void Initialize(const ConstMatrixMap& mat, int col_offset);
void Clear();
std::vector<int> index3_offset;
std::vector<Index3> index3;
std::vector<T> data3;
std::vector<int> index_offset;
std::vector<Index> index;
std::vector<T> data;
const int num_rows;
const int num_cols;
const int block_size;
};
template <typename T>
bool IsZero(T v);
template <>
ALWAYS_INLINE bool IsZero(bfloat16 v) {
return !static_cast<bool>(v);
}
template <>
ALWAYS_INLINE bool IsZero(float v) {
return v == 0.0f;
}
template <typename T>
class StridedIterator {
public:
StridedIterator(int stride, const T* start, const T* end)
: stride_(stride), k_(0), curr_(start), end_(end) {}
ALWAYS_INLINE bool Done() const { return curr_ >= end_; }
ALWAYS_INLINE T Value() const { return *curr_; }
ALWAYS_INLINE uint8 K() const { return k_; }
ALWAYS_INLINE void Next() {
curr_ += stride_;
++k_;
}
ALWAYS_INLINE void EatZeros() {
while (curr_ < end_ && IsZero<T>(*curr_)) {
Next();
}
}
private:
const int stride_;
uint8 k_;
const T* curr_;
const T* const end_;
};
template <typename T>
template <bool Transpose>
void SparseSlice<T>::Initialize(
const typename SparseSlice<T>::ConstMatrixMap& mat, int col_offset) {
const int mat_rows = Transpose ? mat.dimension(1) : mat.dimension(0);
const int mat_cols = Transpose ? mat.dimension(0) : mat.dimension(1);
DCHECK_LE(num_rows, mat_rows);
DCHECK_LE(num_cols + col_offset, mat_cols);
int num_blocks = (num_cols + block_size - 1) / block_size;
int mat_size = num_rows * num_cols;
index3_offset.reserve(num_blocks);
data3.reserve(mat_size);
index3.reserve(mat_size / 3);
index_offset.reserve(num_blocks);
data.reserve(num_blocks * num_rows * 2);
index.reserve(num_blocks * num_rows * 2);
const int stride = Transpose ? mat.dimension(1) : 1;
for (int i = 0; i < num_blocks; ++i) {
int num_block_cols = std::min(block_size, num_cols - block_size * i);
for (int row = 0; row < num_rows; ++row) {
const uint8 m = static_cast<uint8>(row);
const auto* start =
Transpose ? &mat(col_offset, row) : &mat(row, col_offset);
const auto* end = start + stride * num_block_cols;
StridedIterator<T> iter(stride, start, end);
while (true) {
iter.EatZeros();
if (iter.Done()) break;
const uint8 k1 = iter.K();
const T value1 = iter.Value();
iter.Next();
iter.EatZeros();
if (iter.Done()) {
data.push_back(value1);
index.emplace_back(m, k1);
break;
}
const uint8 k2 = iter.K();
const T value2 = iter.Value();
iter.Next();
iter.EatZeros();
if (iter.Done()) {
data.push_back(value2);
index.emplace_back(m, k2);
data.push_back(value1);
index.emplace_back(m, k1);
break;
}
const uint8 k3 = iter.K();
data3.push_back(value1);
data3.push_back(value2);
data3.push_back(iter.Value());
iter.Next();
;
index3.emplace_back(m, k1, k2, k3);
}
}
col_offset += block_size;
index3_offset.push_back(index3.size());
index_offset.push_back(index.size());
}
DCHECK_EQ(index3_offset.size(), num_blocks);
DCHECK_EQ(index_offset.size(), num_blocks);
DCHECK_EQ(3 * index3.size(), data3.size());
DCHECK_EQ(index.size(), data.size());
}
template <typename T>
void SparseSlice<T>::Clear() {
index3_offset.clear();
index3.clear();
data3.clear();
index_offset.clear();
index.clear();
data.clear();
}
using Packet = Eigen::internal::packet_traits<float>::type;
const int kNumOperands = (sizeof(Packet) / sizeof(float));
#define LOAD(x) Eigen::internal::pload<Packet>(x);
#define EXPAND_BFLOAT_L(x, y) \
const auto y = Eigen::internal::pexpand_bf16_l<Packet>(x);
#define EXPAND_BFLOAT_U(x, y) \
const auto y = Eigen::internal::pexpand_bf16_u<Packet>(x);
#define STORE(x, y) Eigen::internal::pstore<float>(x, y);
#define FMA(a, b, c, d) d = Eigen::internal::pmadd<Packet>(a, b, c);
ALWAYS_INLINE float ConvertBfloat16ToFloat(const bfloat16* src) {
float out = 0;
auto tmp = reinterpret_cast<bfloat16*>(&out);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
tmp[0] = *src;
#else
tmp[1] = *src;
#endif
return out;
}
ALWAYS_INLINE Packet ConvertFourBfloat16ToFloat(const bfloat16* src) {
return Eigen::internal::pload4bf16<Packet>(
reinterpret_cast<const float*>(src));
}
ALWAYS_INLINE Packet ConvertTwoBfloat16ToFloat(const bfloat16* src) {
return Eigen::internal::pload2bf16<Packet>(
reinterpret_cast<const float*>(src));
}
ALWAYS_INLINE void ScalarMulAdd(const float a, const float** inp, float** out) {
**out += a * **inp;
++*inp;
++*out;
}
ALWAYS_INLINE void ScalarMulAdd(const float a, const bfloat16** inp,
float** out) {
float inp_f = ConvertBfloat16ToFloat(*inp);
**out += a * inp_f;
++*inp;
++*out;
}
ALWAYS_INLINE void ScalarMulAdd3Way(const float a1, const float a2,
const float a3, const bfloat16** inp1,
const bfloat16** inp2,
const bfloat16** inp3, float** out) {
float inp1_f = ConvertBfloat16ToFloat(*inp1);
float inp2_f = ConvertBfloat16ToFloat(*inp2);
float inp3_f = ConvertBfloat16ToFloat(*inp3);
**out += a1 * inp1_f + a2 * inp2_f + a3 * inp3_f;
++*out;
++*inp1;
++*inp2;
++*inp3;
}
ALWAYS_INLINE void ScalarMulAdd3Way(const float a1, const float a2,
const float a3, const float** inp1,
const float** inp2, const float** inp3,
float** out) {
**out += a1 * **inp1 + a2 * **inp2 + a3 * **inp3;
++*out;
++*inp1;
++*inp2;
++*inp3;
}
ALWAYS_INLINE void LoadSingleScalar(const bfloat16** data, Packet* l) {
auto tmp = ConvertBfloat16ToFloat(*data);
*l = Eigen::internal::pset1<Packet>(tmp);
++*data;
}
ALWAYS_INLINE void LoadTwoScalars(const bfloat16** data, Packet* l1,
Packet* l2) {
if (kNumOperands >= 2) {
auto tmp = ConvertTwoBfloat16ToFloat(*data);
*l1 = Eigen::internal::pbroadcast_first<Packet>(tmp);
*l2 = Eigen::internal::pbroadcast_second<Packet>(tmp);
*data += 2;
} else {
LoadSingleScalar(data, l1);
LoadSingleScalar(data, l2);
}
}
ALWAYS_INLINE void LoadFourScalars(const bfloat16** data, Packet* l1,
Packet* l2, Packet* l3, Packet* l4) {
if (kNumOperands >= 4) {
auto tmp = ConvertFourBfloat16ToFloat(*data);
*l1 = Eigen::internal::pbroadcast_first<Packet>(tmp);
*l2 = Eigen::internal::pbroadcast_second<Packet>(tmp);
*l3 = Eigen::internal::pbroadcast_third<Packet>(tmp);
*l4 = Eigen::internal::pbroadcast_fourth<Packet>(tmp);
*data += 4;
} else {
LoadTwoScalars(data, l1, l2);
LoadTwoScalars(data, l3, l4);
}
}
ALWAYS_INLINE void LoadSingleScalar(const float** data, Packet* l) {
*l = Eigen::internal::pload1<Packet>(*data);
++(*data);
}
ALWAYS_INLINE void LoadTwoScalars(const float** data, Packet* l1, Packet* l2) {
LoadSingleScalar(data, l1);
LoadSingleScalar(data, l2);
}
ALWAYS_INLINE void LoadFourScalars(const float** data, Packet* l1, Packet* l2,
Packet* l3, Packet* l4) {
LoadTwoScalars(data, l1, l2);
LoadTwoScalars(data, l3, l4);
}
template <typename T>
ALWAYS_INLINE void LoadThreeScalars(const T** data, Packet* l1, Packet* l2,
Packet* l3) {
LoadTwoScalars(data, l1, l2);
LoadSingleScalar(data, l3);
}
template <typename T>
ALWAYS_INLINE void LoadSixScalars(const T** data, Packet* l1, Packet* l2,
Packet* l3, Packet* l4, Packet* l5,
Packet* l6) {
LoadFourScalars(data, l1, l2, l3, l4);
LoadTwoScalars(data, l5, l6);
}
ALWAYS_INLINE void MulAdd(const Packet a, const bfloat16** binp, float** out) {
auto inp = reinterpret_cast<const float*>(*binp);
const auto b = LOAD(inp);
EXPAND_BFLOAT_L(b, b_0);
EXPAND_BFLOAT_U(b, b_1);
*binp += 2 * kNumOperands;
auto c1 = LOAD(*out);
auto c2 = LOAD(*out + kNumOperands);
FMA(a, b_0, c1, c1);
FMA(a, b_1, c2, c2);
STORE(*out, c1);
STORE(*out + kNumOperands, c2);
*out += 2 * kNumOperands;
}
ALWAYS_INLINE void MulAdd3Way(const Packet a1, const Packet a2, const Packet a3,
const bfloat16** binp1, const bfloat16** binp2,
const bfloat16** binp3, float** out) {
auto inp1 = reinterpret_cast<const float*>(*binp1);
auto inp2 = reinterpret_cast<const float*>(*binp2);
auto inp3 = reinterpret_cast<const float*>(*binp3);
auto c1 = LOAD(*out);
auto c2 = LOAD(*out + kNumOperands);
const auto b1 = LOAD(inp1);
EXPAND_BFLOAT_L(b1, b1_0);
EXPAND_BFLOAT_U(b1, b1_1);
*binp1 += 2 * kNumOperands;
const auto b2 = LOAD(inp2);
EXPAND_BFLOAT_L(b2, b2_0);
EXPAND_BFLOAT_U(b2, b2_1);
*binp2 += 2 * kNumOperands;
const auto b3 = LOAD(inp3);
EXPAND_BFLOAT_L(b3, b3_0);
EXPAND_BFLOAT_U(b3, b3_1);
*binp3 += 2 * kNumOperands;
FMA(a1, b1_0, c1, c1);
FMA(a1, b1_1, c2, c2);
FMA(a2, b2_0, c1, c1);
FMA(a2, b2_1, c2, c2);
FMA(a3, b3_0, c1, c1);
FMA(a3, b3_1, c2, c2);
STORE(*out, c1);
STORE(*out + kNumOperands, c2);
*out += 2 * kNumOperands;
}
ALWAYS_INLINE void | #include "tensorflow/core/kernels/sparse_matmul_op.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/bfloat16.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
random::PhiloxRandom philox(1, 1);
random::SimplePhilox rnd(&philox);
using Eigen::operator==;
template <typename T>
void Sparsify(Tensor* t, float sparsity) {
const int64_t N = t->NumElements();
CHECK_LE(sparsity, 1);
auto flat = t->flat<T>();
if (sparsity == 1) {
flat.setZero();
return;
}
static const uint32 K = 10000;
for (int64_t i = 0; i < N; ++i) {
if (rnd.Uniform(K) < sparsity * K) {
flat(i) = T(0);
} else if (flat(i) == T(0)) {
flat(i) = T(1);
}
}
}
Node* SparseMatMulNode(Graph* g, Node* in0, Node* in1, bool transpose_a,
bool transpose_b, bool a_sparse, bool b_sparse) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "SparseMatMul")
.Input(in0)
.Input(in1)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Attr("a_is_sparse", a_sparse)
.Attr("b_is_sparse", b_sparse)
.Finalize(g, &ret));
return ret;
}
template <typename TA, typename TB>
static Graph* SparseMatMulHelper(Graph* g, int m, int n, int d,
float sparsity_a, float sparsity_b,
bool transpose_a, bool transpose_b) {
bool a_sparse = (sparsity_a > 0);
bool b_sparse = (sparsity_b > 0);
auto left_shape = transpose_a ? TensorShape({d, m}) : TensorShape({m, d});
Tensor left(DataTypeToEnum<TA>::value, left_shape);
left.flat<TA>().setRandom();
Sparsify<TA>(&left, sparsity_a);
auto right_shape = transpose_b ? TensorShape({n, d}) : TensorShape({d, n});
Tensor right(DataTypeToEnum<TB>::value, right_shape);
right.flat<TB>().setRandom();
Sparsify<TB>(&right, sparsity_b);
SparseMatMulNode(g, test::graph::Constant(g, left),
test::graph::Constant(g, right), transpose_a, transpose_b,
a_sparse, b_sparse);
return g;
}
template <typename TA, typename TB>
static Graph* SparseMatMul(int m, int n, int d, float sparsity_a,
float sparsity_b, bool transpose_a,
bool transpose_b) {
Graph* g = new Graph(OpRegistry::Global());
return SparseMatMulHelper<TA, TB>(g, m, n, d, sparsity_a, sparsity_b,
transpose_a, transpose_b);
}
static Graph* ReplicatedSparseMatMul(int m, int n, int d, float sparsity_1,
float sparsity_2, int copies) {
Graph* g = new Graph(OpRegistry::Global());
for (int i = 0; i < copies; ++i) {
SparseMatMulHelper<float, float>(g, m, n, d, sparsity_1, sparsity_2, false,
false);
}
return g;
}
#define BM_SPARSE(M, K, N, S1, S2, TRA, TRB, TA, TB) \
static void \
BM_Sparse##_##M##_##K##_##N##_##S1##_##S2##_##TRA##_##TRB##_##TA##_##TB( \
::testing::benchmark::State& state) { \
auto label = strings::Printf("tr_a: %d tr_b: %d sp_a: %0.2f sp_b: %0.2f", \
TRA, TRB, S1 / 100.0, S2 / 100.0); \
state.SetLabel(label); \
auto g = SparseMatMul<TA, TB>(M, N, K, S1 / 100.0, S2 / 100.0, TRA, TRB); \
test::Benchmark("cpu", g, false).Run(state); \
} \
BENCHMARK( \
BM_Sparse##_##M##_##K##_##N##_##S1##_##S2##_##TRA##_##TRB##_##TA##_##TB) \
->UseRealTime();
#define BM_SPARSE_REPLICATED(M, K, N, S1, S2, Copies) \
static void BM_Sparse_replicated##_##M##_##K##_##N##_##S1##_##S2##_##Copies( \
::testing::benchmark::State& state) { \
auto label = strings::Printf("copies: %d sp_a: %0.2f sp_b: %0.2f", \
(Copies), S1 / 100.0, S2 / 100.0); \
state.SetLabel(label); \
auto g = \
ReplicatedSparseMatMul(M, N, K, S1 / 100.0, S2 / 100.0, (Copies)); \
test::Benchmark("cpu", g, false).Run(state); \
state.SetItemsProcessed(state.iterations() * M * K * N * Copies * 2); \
} \
BENCHMARK(BM_Sparse_replicated##_##M##_##K##_##N##_##S1##_##S2##_##Copies) \
->UseRealTime();
#define BM_SPARSE_FLOAT(M, K, N, S1, S2, TRA, TRB) \
BM_SPARSE(M, K, N, S1, S2, TRA, TRB, float, float)
#define BM_SPARSE_BFLOAT16(M, K, N, S1, S2, TRA, TRB) \
BM_SPARSE(M, K, N, S1, S2, TRA, TRB, bfloat16, bfloat16)
#define BM_SPARSE_FLOAT_BFLOAT16(M, K, N, S1, S2, TRA, TRB) \
BM_SPARSE(M, K, N, S1, S2, TRA, TRB, float, bfloat16)
#define BM_SPARSE_BFLOAT16_FLOAT(M, K, N, S1, S2, TRA, TRB) \
BM_SPARSE(M, K, N, S1, S2, TRA, TRB, bfloat16, float)
BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 0, false, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 1, 0, false, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 50, 0, false, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, false, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 99, 0, false, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 50, false, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, false, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, true, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, false, true);
BM_SPARSE_FLOAT(2048, 2048, 2048, 85, 0, true, true);
BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, true, false);
BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, false, true);
BM_SPARSE_FLOAT(2048, 2048, 2048, 0, 85, true, true);
BM_SPARSE_FLOAT(1024, 1024, 1024, 0, 0, false, false);
BM_SPARSE_FLOAT(1024, 1024, 1024, 1, 0, false, false);
BM_SPARSE_FLOAT(1024, 1024, 1024, 85, 0, false, false);
BM_SPARSE_FLOAT(256, 256, 256, 1, 0, false, false);
BM_SPARSE_FLOAT(512, 512, 512, 1, 0, false, false);
BM_SPARSE_FLOAT(2560, 400, 1024, 85, 0, false, false);
BM_SPARSE_FLOAT(2560, 400, 1024, 85, 0, true, false);
BM_SPARSE_FLOAT(400, 800, 2560, 85, 0, false, false);
BM_SPARSE_FLOAT(400, 2560, 1024, 85, 0, false, false);
BM_SPARSE_FLOAT(400, 1024, 256, 85, 0, false, false);
BM_SPARSE_FLOAT(400, 256, 1, 85, 0, false, false);
BM_SPARSE_REPLICATED(400, 800, 2560, 85, 0, 6);
BM_SPARSE_REPLICATED(400, 2560, 1024, 85, 0, 6);
BM_SPARSE_REPLICATED(400, 1024, 256, 85, 0, 6);
BM_SPARSE_REPLICATED(400, 256, 1, 85, 0, 6);
BM_SPARSE_FLOAT(2048, 1792, 1024, 85, 0, false, false);
BM_SPARSE_FLOAT(2048, 1024, 768, 85, 0, false, false);
BM_SPARSE_FLOAT(2048, 768, 512, 85, 0, false, false);
BM_SPARSE_FLOAT(2048, 512, 256, 85, 0, false, false);
BM_SPARSE_FLOAT(2049, 1792, 1024, 85, 0, false, false);
BM_SPARSE_FLOAT(2049, 1024, 768, 85, 0, false, false);
BM_SPARSE_FLOAT(2049, 768, 512, 85, 0, false, false);
BM_SPARSE_FLOAT(2049, 512, 256, 85, 0, false, false);
BM_SPARSE_REPLICATED(2048, 1792, 1024, 85, 0, 6);
BM_SPARSE_REPLICATED(2048, 1024, 768, 85, 0, 6);
BM_SPARSE_REPLICATED(2048, 768, 512, 85, 0, 6);
BM_SPARSE_REPLICATED(2048, 512, 256, 85, 0, 6);
BM_SPARSE_BFLOAT16(2048, 2048, 2048, 0, 0, false, false);
BM_SPARSE_BFLOAT16(2048, 2048, 2048, 1, 0, false, false);
BM_SPARSE_BFLOAT16(2048, 2048, 2048, 85, 0, false, false);
BM_SPARSE_BFLOAT16(2048, 2048, 2048, 99, 0, false, false);
BM_SPARSE_BFLOAT16_FLOAT(2048, 2048, 2048, 85, 0, false, false);
BM_SPARSE_BFLOAT16_FLOAT(2048, 2048, 2048, 99, 0, false, false);
BM_SPARSE_FLOAT_BFLOAT16(2048, 2048, 2048, 85, 0, false, false);
BM_SPARSE_FLOAT_BFLOAT16(2048, 2048, 2048, 99, 0, false, false);
static Graph* MultiSparseMatMul(int m, int n, int d, float sparsity_1,
float sparsity_2, int copies) {
Graph* g = new Graph(OpRegistry::Global());
for (int i = 0; i < copies; ++i) {
SparseMatMulHelper<float, float>(g, d, n, m, sparsity_1, sparsity_2, true,
false);
SparseMatMulHelper<float, float>(g, m, d, n, sparsity_2, 0, false, true);
}
return g;
}
#define BM_SPARSE_MULTI(M, K, N, S1, S2, Copies) \
static void BM_Sparse_Multi##_##M##_##K##_##N##_##S1##_##S2##_##Copies(::testing::benchmark::State& state) { \
auto label = strings::Printf("%d_%d_%d_%d_%0.2f_%0.2f", M, K, N, Copies, \
S1 / 100.0, S2 / 100.0); \
state.SetLabel(label); \
auto g = MultiSparseMatMul(M, N, K, S1 / 100.0, S2 / 100.0, Copies); \
test::Benchmark("cpu", g, false).Run(state); \
state.SetItemsProcessed(state.iterations() * M * K * N * 2 * 2 * Copies); \
} \
BENCHMARK(BM_Sparse_Multi##_##M##_##K##_##N##_##S1##_##S2##_##Copies) \
->UseRealTime();
BM_SPARSE_MULTI(1024, 2140, 4096, 0, 82, 1);
BM_SPARSE_MULTI(1024, 4096, 2048, 83, 83, 1);
BM_SPARSE_MULTI(400, 800, 2560, 85, 85, 1);
BM_SPARSE_MULTI(400, 2560, 1024, 85, 85, 1);
BM_SPARSE_MULTI(400, 1024, 256, 85, 85, 1);
BM_SPARSE_MULTI(400, 256, 1, 85, 85, 1);
BM_SPARSE_MULTI(2048, 1792, 1024, 85, 85, 1);
BM_SPARSE_MULTI(2048, 1024, 768, 85, 85, 1);
BM_SPARSE_MULTI(2048, 768, 512, 85, 85, 1);
BM_SPARSE_MULTI(2048, 512, 256, 85, 85, 1);
BM_SPARSE_MULTI(2048, 1792, 1024, 85, 85, 3);
BM_SPARSE_MULTI(2048, 1024, 768, 85, 85, 3);
BM_SPARSE_MULTI(2048, 768, 512, 85, 85, 3);
BM_SPARSE_MULTI(2048, 512, 256, 85, 85, 3);
BM_SPARSE_MULTI(2048, 1792, 1024, 85, 85, 6);
BM_SPARSE_MULTI(2048, 1024, 768, 85, 85, 6);
BM_SPARSE_MULTI(2048, 768, 512, 85, 85, 6);
BM_SPARSE_MULTI(2048, 512, 256, 85, 85, 6);
}
namespace Eigen {
namespace internal {
class SparseMatmulOpTest : public ::testing::Test {
protected:
SparseMatmulOpTest()
: PacketSize(Eigen::internal::packet_traits<float>::size) {
typedef typename NumTraits<float>::Real RealFloat;
for (int i = 0; i < kMaxPacketSize; ++i) {
data1[i] = internal::random<float>() / RealFloat(PacketSize);
data2[i] = internal::random<float>() / RealFloat(PacketSize);
data3[i] = internal::random<float>() / RealFloat(PacketSize);
}
for (int i = kMaxPacketSize; i < kMaxPacketSize * 2; ++i) {
data3[i] = internal::random<float>() / RealFloat(PacketSize);
}
for (int i = 0; i < kMaxPacketSize * 2; ++i) {
uint16_t* data3_p = reinterpret_cast<uint16_t*>(&data3[i]);
uint16_t* data3_bfloat16_p =
reinterpret_cast<uint16_t*>(data3_bfloat16) + i;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
data3_p[1] = 0;
data3_bfloat16_p[0] = data3_p[0];
#else
data3_p[0] = 0;
data3_bfloat16_p[0] = data3_p[1];
#endif
}
}
bool areApprox(const float* a, const float* b, int size) {
for (int i = 0; i < size; ++i) {
if (a[i] != b[i] && !internal::isApprox(a[i], b[i])) {
auto ma = Map<const Matrix<float, 1, Dynamic> >(a, size);
auto mb = Map<const Matrix<float, 1, Dynamic> >(b, size);
std::cout << "[" << ma << "]"
<< " != [" << mb << "], differences: [" << (mb - ma) << "]\n";
return false;
}
}
return true;
}
#ifdef EIGEN_VECTORIZE_AVX512
static const int kMaxPacketSize = 16;
#elif defined EIGEN_VECTORIZE_AVX || defined EIGEN_VECTORIZE_AVX2
static const int kMaxPacketSize = 8;
#else
static constexpr int kMaxPacketSize = 4;
#endif
typedef typename Eigen::internal::packet_traits<float>::type Packet;
const int PacketSize;
EIGEN_ALIGN_MAX float data1[kMaxPacketSize];
EIGEN_ALIGN_MAX float data2[kMaxPacketSize];
EIGEN_ALIGN_MAX float data3[kMaxPacketSize * 2];
EIGEN_ALIGN_MAX float data3_bfloat16[kMaxPacketSize];
EIGEN_ALIGN_MAX float ref[kMaxPacketSize];
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
TEST_F(SparseMatmulOpTest, BroadcastPacketTest) {
for (int i = 0; i < PacketSize; ++i) ref[i] = data1[0];
internal::pstoreu(data2, internal::pbroadcast_first<Packet>(
internal::ploadu<Packet>(data1)));
ASSERT_TRUE(areApprox(ref, data2, PacketSize));
if (PacketSize > 1) {
for (int i = 0; i < PacketSize; ++i) ref[i] = data1[1];
internal::pstoreu(data2, internal::pbroadcast_second<Packet>(
internal::ploadu<Packet>(data1)));
ASSERT_TRUE(areApprox(ref, data2, PacketSize));
if (PacketSize > 2) {
for (int i = 0; i < PacketSize; ++i) ref[i] = data1[2];
internal::pstoreu(data2, internal::pbroadcast_third<Packet>(
internal::ploadu<Packet>(data1)));
ASSERT_TRUE(areApprox(ref, data2, PacketSize));
if (PacketSize > 3) {
for (int i = 0; i < PacketSize; ++i) ref[i] = data1[3];
internal::pstoreu(data2, internal::pbroadcast_fourth<Packet>(
internal::ploadu<Packet>(data1)));
ASSERT_TRUE(areApprox(ref, data2, PacketSize));
}
}
}
}
TEST_F(SparseMatmulOpTest, InterleavePacketTest) {
if (PacketSize == 8) {
for (int i = 0; i < PacketSize / 4; ++i) ref[i] = data1[i];
for (int i = PacketSize / 4; i < PacketSize / 2; ++i)
ref[i] = data1[i + PacketSize / 4];
for (int i = PacketSize / 2; i < 3 * PacketSize / 4; ++i)
ref[i] = data1[i - PacketSize / 4];
for (int i = 3 * PacketSize / 4; i < PacketSize; ++i) ref[i] = data1[i];
} else {
for (int i = 0; i < PacketSize; ++i) ref[i] = data1[i];
}
internal::pstoreu(data2, internal::pinterleave4x64<Packet>(
internal::ploadu<Packet>(data1)));
ASSERT_TRUE(areApprox(ref, data2, PacketSize));
}
TEST_F(SparseMatmulOpTest, Bfloat16ExpandTest) {
if (PacketSize == 8) {
for (int i = 0; i < PacketSize / 2; ++i) {
ref[i] = data3[i];
}
for (int i = 0; i < PacketSize / 2; ++i) {
ref[i + PacketSize / 2] = data3[i + PacketSize];
}
} else {
for (int i = 0; i < PacketSize; ++i) {
ref[i] = data3[i];
}
}
internal::pstoreu(data2, internal::pexpand_bf16_l<Packet>(
internal::ploadu<Packet>(data3_bfloat16)));
ASSERT_TRUE(areApprox(ref, data2, PacketSize));
if (PacketSize == 8) {
for (int i = 0; i < PacketSize / 2; ++i) {
ref[i] = data3[i + PacketSize / 2];
}
for (int i = 0; i < PacketSize / 2; ++i) {
ref[i + PacketSize / 2] = data3[i + 3 * PacketSize / 2];
}
} else {
for (int i = 0; i < PacketSize; ++i) {
ref[i] = data3[i + PacketSize];
}
}
internal::pstoreu(data2, internal::pexpand_bf16_u<Packet>(
internal::ploadu<Packet>(data3_bfloat16)));
ASSERT_TRUE(areApprox(ref, data2, PacketSize));
}
TEST_F(SparseMatmulOpTest, Bfloat16LoadTest) {
if (PacketSize >= 4) {
for (int i = 0; i < 4; ++i) ref[i] = data3[i];
internal::pstoreu(data2, internal::pload4bf16<Packet>(data3_bfloat16));
ASSERT_TRUE(areApprox(ref, data2, 4));
internal::pstoreu(data2, internal::pload2bf16<Packet>(data3_bfloat16));
ASSERT_TRUE(areApprox(ref, data2, 2));
}
}
}
} |
1,513 | cpp | tensorflow/tensorflow | multinomial_op | tensorflow/core/kernels/multinomial_op.cc | tensorflow/core/kernels/multinomial_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MULTINOMIAL_OP_H_
#define TENSORFLOW_CORE_KERNELS_MULTINOMIAL_OP_H_
namespace tensorflow {
namespace functor {
template <typename Device, typename T, typename OutputType>
struct MultinomialFunctor;
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/multinomial_op.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/stateless_random_ops.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace functor {
template <typename Device, typename T, typename OutputType>
struct MultinomialFunctor {
void operator()(OpKernelContext* ctx, const Device& d,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<float>::Flat noises,
typename TTypes<float>::Flat scores,
typename TTypes<float>::Flat scratch, int batch_size,
int num_classes, int num_samples,
const random::PhiloxRandom& gen,
typename TTypes<OutputType>::Matrix output);
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
extern template struct MultinomialFunctor<GPUDevice, Eigen::half, int32>;
extern template struct MultinomialFunctor<GPUDevice, float, int32>;
extern template struct MultinomialFunctor<GPUDevice, double, int32>;
extern template struct MultinomialFunctor<GPUDevice, int32, int32>;
extern template struct MultinomialFunctor<GPUDevice, int64_t, int32>;
extern template struct MultinomialFunctor<GPUDevice, Eigen::half, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, float, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, double, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, int32, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, int64_t, int64_t>;
#endif
template <typename T, typename OutputType>
struct MultinomialFunctor<CPUDevice, T, OutputType> {
void operator()(OpKernelContext* ctx, const CPUDevice& d,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<float>::Flat ,
typename TTypes<float>::Flat ,
typename TTypes<float>::Flat , int batch_size,
int num_classes, int num_samples,
const random::PhiloxRandom& gen,
typename TTypes<OutputType>::Matrix output) {
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
auto DoWork = [ctx, num_samples, num_classes, &gen, &output, &logits](
int64_t start_row, int64_t limit_row) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(start_row * (num_samples + 3) / 4);
random::SimplePhilox simple_philox(&gen_copy);
Tensor cdf_tensor;
OP_REQUIRES_OK(ctx,
ctx->allocate_temp(DT_DOUBLE, TensorShape({num_classes}),
&cdf_tensor));
auto cdf = cdf_tensor.flat<double>();
for (int64_t b = start_row; b < limit_row; ++b) {
const auto* logits_row = &logits(b, 0);
T max = std::numeric_limits<T>::lowest();
for (int64_t j = 0; j < num_classes; ++j) {
if (Eigen::numext::isfinite(logits_row[j])) {
max = std::max(max, logits_row[j]);
}
}
const double max_logit = static_cast<double>(max);
cdf = (logits.template chip<0>(b).template cast<double>() - max_logit)
.exp();
double running_total = 0;
for (int64_t j = 0; j < num_classes; ++j) {
if (Eigen::numext::isfinite(logits_row[j])) {
running_total += cdf(j);
}
cdf(j) = running_total;
}
const double* cdf_begin = cdf.data();
const double* cdf_end = cdf.data() + num_classes;
for (int64_t j = 0; j < num_samples; ++j) {
const double to_find = simple_philox.RandDouble() * running_total;
auto found_iter = std::upper_bound(cdf_begin, cdf_end, to_find);
output(b, j) = std::distance(cdf_begin, found_iter);
}
}
};
const int64_t cost =
50 * (num_samples * std::log(num_classes) / std::log(2) + num_classes);
Shard(worker_threads.num_threads, worker_threads.workers, batch_size, cost,
DoWork);
}
};
}
namespace {
template <typename Device, typename T, typename OutputType>
class MultinomialOp : public OpKernel {
public:
explicit MultinomialOp(OpKernelConstruction* context) : OpKernel(context) {}
void DoCompute(OpKernelContext* ctx, const Tensor& logits_t,
const Tensor& num_samples_t, GuardedPhiloxRandom* generator) {
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(logits_t.shape()),
errors::InvalidArgument("logits should be a matrix, got shape ",
logits_t.shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(num_samples_t.shape()),
errors::InvalidArgument("num_samples should be a scalar, got shape ",
num_samples_t.shape().DebugString()));
const int num_samples = num_samples_t.scalar<int>()();
OP_REQUIRES(ctx, num_samples >= 0,
errors::InvalidArgument(
"num_samples should be nonnegative, got ", num_samples));
for (int i = 0; i < 2; i++) {
const int64_t dim = logits_t.dim_size(i);
OP_REQUIRES(ctx, static_cast<int>(dim) == dim,
errors::InvalidArgument(
"logits.shape = ", logits_t.shape().DebugString(),
" too large for int"));
}
const int batch_size = static_cast<int>(logits_t.dim_size(0));
const int num_classes = static_cast<int>(logits_t.dim_size(1));
OP_REQUIRES(ctx, num_classes > 0,
errors::InvalidArgument("num_classes should be positive, got ",
num_classes));
Tensor* samples_t;
OP_REQUIRES_OK(
ctx, ctx->allocate_output(0, TensorShape({batch_size, num_samples}),
&samples_t));
if (samples_t->NumElements() > 0) {
Tensor noises, scores, scratch;
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(
DT_FLOAT, TensorShape({batch_size, num_samples, num_classes}),
&noises));
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(
DT_FLOAT, TensorShape({batch_size, num_samples, num_classes}),
&scores));
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(DT_FLOAT, TensorShape({batch_size, num_samples}),
&scratch));
}
int num_samples_ceil_4 = (num_samples + 3) / 4 * 4;
if (std::is_same<Device, CPUDevice>::value) num_samples_ceil_4 *= 2;
auto rng =
generator->ReserveRandomOutputs(batch_size * num_samples_ceil_4, 256);
functor::MultinomialFunctor<Device, T, OutputType>()(
ctx, ctx->eigen_device<Device>(), logits_t.matrix<T>(),
noises.flat<float>(), scores.flat<float>(), scratch.flat<float>(),
batch_size, num_classes, num_samples, rng,
samples_t->matrix<OutputType>());
}
}
};
template <typename Device, typename T, typename OutputType>
class StatefulMultinomialOp : public MultinomialOp<Device, T, OutputType> {
public:
explicit StatefulMultinomialOp(OpKernelConstruction* ctx)
: MultinomialOp<Device, T, OutputType>(ctx) {
OP_REQUIRES_OK(ctx, generator_.Init(ctx));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& logits_t = ctx->input(0);
const Tensor& num_samples_t = ctx->input(1);
this->DoCompute(ctx, logits_t, num_samples_t, &generator_);
}
private:
GuardedPhiloxRandom generator_;
};
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatefulMultinomialOp<CPUDevice, TYPE, int32>); \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatefulMultinomialOp<CPUDevice, TYPE, int64>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatefulMultinomialOp<GPUDevice, TYPE, int32>) \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatefulMultinomialOp<GPUDevice, TYPE, int64>)
TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#endif
template <typename Device, typename T, typename OutputType>
class StatelessMultinomialOp : public MultinomialOp<Device, T, OutputType> {
public:
explicit StatelessMultinomialOp(OpKernelConstruction* ctx)
: MultinomialOp<Device, T, OutputType>(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& logits_t = ctx->input(0);
const Tensor& num_samples_t = ctx->input(1);
const Tensor& seed_t = ctx->input(2);
OP_REQUIRES(ctx, seed_t.dims() == 1 && seed_t.dim_size(0) == 2,
errors::InvalidArgument("seed must have shape [2], not ",
seed_t.shape().DebugString()));
random::PhiloxRandom::Key key;
random::PhiloxRandom::ResultType counter;
OP_REQUIRES_OK(ctx, GenerateKey(seed_t, &key, &counter));
GuardedPhiloxRandom generator;
generator.Init(counter, key);
this->DoCompute(ctx, logits_t, num_samples_t, &generator);
}
private:
GuardedPhiloxRandom generator_;
};
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatelessMultinomialOp<CPUDevice, TYPE, int32>); \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatelessMultinomialOp<CPUDevice, TYPE, int64>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.HostMemory("seed") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatelessMultinomialOp<GPUDevice, TYPE, int32>) \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.HostMemory("seed") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatelessMultinomialOp<GPUDevice, TYPE, int64>)
TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#endif
}
} | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* Multinomial(int batch_size, int num_classes, int num_samples) {
Graph* g = new Graph(OpRegistry::Global());
Tensor logits_t(DT_FLOAT, TensorShape({batch_size, num_classes}));
Tensor num_samples_t(DT_INT32, TensorShape());
logits_t.flat<float>().setRandom();
num_samples_t.scalar<int32>().setConstant(num_samples);
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("multinomial"), "Multinomial")
.Input(test::graph::Constant(g, logits_t))
.Input(test::graph::Constant(g, num_samples_t))
.Attr("T", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
#define BM_MultinomialDev(DEVICE, B, C, S) \
static void BM_Multinomial_##DEVICE##_##B##_##C##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Multinomial(B, C, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * C * S * \
state.iterations()); \
} \
BENCHMARK(BM_Multinomial_##DEVICE##_##B##_##C##_##S);
#define BM_MultinomialBCS(B, C, S) \
BM_MultinomialDev(cpu, B, C, S); \
BM_MultinomialDev(gpu, B, C, S);
BM_MultinomialBCS(1, 10000, 4);
BM_MultinomialBCS(1, 10000, 128);
BM_MultinomialBCS(1, 10000, 10000);
BM_MultinomialBCS(1, 100000, 4);
BM_MultinomialBCS(32, 10000, 4);
BM_MultinomialBCS(32, 10000, 128);
BM_MultinomialBCS(32, 100000, 4);
BM_MultinomialBCS(128, 100000, 1);
} |
1,514 | cpp | tensorflow/tensorflow | random_op | tensorflow/core/kernels/random_op.cc | tensorflow/core/kernels/random_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_RANDOM_OP_H_
#define TENSORFLOW_CORE_KERNELS_RANDOM_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/lib/random/random_distributions.h"
namespace tensorflow {
class OpKernelContext;
namespace functor {
template <typename Device, class Distribution>
struct FillPhiloxRandom;
typedef Eigen::ThreadPoolDevice CPUDevice;
template <class Distribution>
struct FillPhiloxRandom<CPUDevice, Distribution> {
void operator()(OpKernelContext* ctx, const CPUDevice& d, const uint64* key,
const uint64* counter, random::PhiloxRandom gen,
typename Distribution::ResultElementType* data, int64_t size,
Distribution dist);
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
typedef Eigen::GpuDevice GPUDevice;
template <class Distribution>
struct FillPhiloxRandom<GPUDevice, Distribution> {
void operator()(OpKernelContext* ctx, const GPUDevice& d, const uint64* key,
const uint64* counter, random::PhiloxRandom gen,
typename Distribution::ResultElementType* data, int64_t size,
Distribution dist);
};
#endif
}
}
#endif
#define EIGEN_USE_THREADS
#include <algorithm>
#include <cmath>
#include <memory>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/kernels/random_op_cpu.h"
#include "tensorflow/core/lib/hash/crc32c.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
#if EIGEN_COMP_GNUC && __cplusplus > 199711L
#define DISABLE_FLOAT_EQUALITY_WARNING \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#define ENABLE_FLOAT_EQUALITY_WARNING _Pragma("GCC diagnostic pop")
#else
#define DISABLE_FLOAT_EQUALITY_WARNING
#define ENABLE_FLOAT_EQUALITY_WARNING
#endif
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace {
static Status AllocateOutputWithShape(OpKernelContext* ctx, const Tensor& shape,
int index, Tensor** output) {
TensorShape tensor_shape;
TF_RETURN_IF_ERROR(tensor::MakeShape(shape, &tensor_shape));
return ctx->allocate_output(index, tensor_shape, output);
}
template <typename Device, class Distribution>
class PhiloxRandomOp : public OpKernel {
public:
typedef typename Distribution::ResultElementType T;
explicit PhiloxRandomOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, generator_.Init(ctx));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape = ctx->input(0);
Tensor* output;
OP_REQUIRES_OK(ctx, AllocateOutputWithShape(ctx, shape, 0, &output));
auto output_flat = output->flat<T>();
functor::FillPhiloxRandom<Device, Distribution>()(
ctx, ctx->eigen_device<Device>(), nullptr, nullptr,
generator_.ReserveRandomOutputs(output_flat.size(), 256),
output_flat.data(), output_flat.size(), Distribution());
}
private:
GuardedPhiloxRandom generator_;
};
template <typename Device, class IntType>
class RandomUniformIntOp : public OpKernel {
public:
explicit RandomUniformIntOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, generator_.Init(ctx));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape = ctx->input(0);
const Tensor& minval = ctx->input(1);
const Tensor& maxval = ctx->input(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(minval.shape()),
errors::InvalidArgument("minval must be 0-D, got shape ",
minval.shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(maxval.shape()),
errors::InvalidArgument("maxval must be 0-D, got shape ",
maxval.shape().DebugString()));
Tensor* output;
OP_REQUIRES_OK(ctx, AllocateOutputWithShape(ctx, shape, 0, &output));
if (output->NumElements() == 0) return;
IntType lo = minval.scalar<IntType>()();
IntType hi = maxval.scalar<IntType>()();
OP_REQUIRES(
ctx, lo < hi,
errors::InvalidArgument("Need minval < maxval, got ", lo, " >= ", hi));
typedef random::UniformDistribution<random::PhiloxRandom, IntType>
Distribution;
Distribution dist(lo, hi);
auto output_flat = output->flat<IntType>();
functor::FillPhiloxRandom<Device, Distribution>()(
ctx, ctx->eigen_device<Device>(), nullptr, nullptr,
generator_.ReserveRandomOutputs(output_flat.size(), 256),
output_flat.data(), output_flat.size(), dist);
}
private:
GuardedPhiloxRandom generator_;
};
template <typename T>
class RandomGammaOp : public OpKernel {
public:
explicit RandomGammaOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, generator_.Init(context));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape_t = ctx->input(0);
const Tensor& alpha_t = ctx->input(1);
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(shape_t.shape()) &&
(shape_t.dtype() == DataType::DT_INT32 ||
shape_t.dtype() == DataType::DT_INT64),
errors::InvalidArgument(
"shape must be a vector of {int32,int64}, got shape: ",
shape_t.DebugString()));
TensorShape samples_shape;
if (shape_t.dtype() == DataType::DT_INT32) {
auto vec = shape_t.flat<int32>();
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(),
&samples_shape));
} else if (shape_t.dtype() == DataType::DT_INT64) {
auto vec = shape_t.flat<int64_t>();
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(),
&samples_shape));
}
const int64_t samples_per_alpha = samples_shape.num_elements();
OP_REQUIRES_OK(ctx, samples_shape.AppendShapeWithStatus(alpha_t.shape()));
Tensor* samples_t = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, samples_shape, &samples_t));
if (samples_shape.num_elements() == 0) return;
using random::PhiloxRandom;
typedef random::NormalDistribution<PhiloxRandom, double> Normal;
typedef random::UniformDistribution<PhiloxRandom, double> Uniform;
#define UNIFORM(X) \
if (uniform_remaining == 0) { \
uniform_remaining = Uniform::kResultElementCount; \
uniform_result = uniform(&gen); \
} \
uniform_remaining--; \
double X = uniform_result[uniform_remaining]
static constexpr int kReservedSamplesPerOutput = 256;
const auto alpha_flat = alpha_t.flat<T>().data();
const int64_t num_alphas = alpha_t.NumElements();
OP_REQUIRES(ctx, num_alphas > 0,
errors::InvalidArgument(
"Input alpha should have non-zero element count, got: ",
num_alphas));
auto samples_flat = samples_t->flat<T>().data();
PhiloxRandom rng = generator_.ReserveRandomOutputs(
samples_per_alpha * num_alphas, kReservedSamplesPerOutput);
auto DoWork = [samples_per_alpha, num_alphas, &rng, samples_flat,
alpha_flat](int64_t start_output, int64_t limit_output) {
using Eigen::numext::exp;
using Eigen::numext::log;
using Eigen::numext::log1p;
using Eigen::numext::pow;
Normal normal;
Uniform uniform;
typename Normal::ResultType norm_result;
typename Uniform::ResultType uniform_result;
for (int64_t output_idx = start_output; output_idx < limit_output;
) {
int64_t alpha_idx = output_idx / samples_per_alpha;
T* const samples_alpha_offset = samples_flat + alpha_idx;
const double alpha = static_cast<double>(alpha_flat[alpha_idx]);
DISABLE_FLOAT_EQUALITY_WARNING
if (alpha == static_cast<double>(1.0)) {
ENABLE_FLOAT_EQUALITY_WARNING
for (int64_t sample_idx = output_idx % samples_per_alpha;
sample_idx < samples_per_alpha && output_idx < limit_output;
sample_idx++, output_idx++) {
PhiloxRandom gen = rng;
gen.Skip(kReservedSamplesPerOutput * output_idx);
int16_t uniform_remaining = 0;
UNIFORM(u);
const double res = -log1p(-u);
samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res);
}
} else {
const bool alpha_less_than_one = alpha < 1;
const double d = alpha + (alpha_less_than_one ? 2.0 / 3 : -1.0 / 3);
const double c = 1.0 / 3 / sqrt(d);
for (int64_t sample_idx = output_idx % samples_per_alpha;
sample_idx < samples_per_alpha && output_idx < limit_output;
sample_idx++, output_idx++) {
PhiloxRandom gen = rng;
gen.Skip(kReservedSamplesPerOutput * output_idx);
int16_t norm_remaining = 0;
int16_t uniform_remaining = 0;
while (true) {
if (norm_remaining == 0) {
norm_remaining = Normal::kResultElementCount;
norm_result = normal(&gen);
}
norm_remaining--;
const double x = norm_result[norm_remaining];
double v = 1 + c * x;
if (v <= 0) {
continue;
}
v = v * v * v;
UNIFORM(u);
if ((u < 1 - 0.0331 * (x * x) * (x * x)) ||
(log(u) < 0.5 * x * x + d * (1 - v + log(v)))) {
double res = d * v;
if (alpha_less_than_one) {
UNIFORM(b);
res *= pow(b, 1 / alpha);
}
samples_alpha_offset[sample_idx * num_alphas] =
static_cast<T>(res);
break;
}
}
}
}
}
};
#undef UNIFORM
static const int kElementCost = 85 + 2 * Normal::kElementCost +
Uniform::kElementCost +
3 * PhiloxRandom::kElementCost;
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers,
num_alphas * samples_per_alpha, kElementCost, DoWork);
}
private:
GuardedPhiloxRandom generator_;
RandomGammaOp(const RandomGammaOp&) = delete;
void operator=(const RandomGammaOp&) = delete;
};
}
#define REGISTER(TYPE) \
template struct functor::FillPhiloxRandom< \
CPUDevice, random::UniformDistribution<random::PhiloxRandom, TYPE>>; \
template struct functor::FillPhiloxRandom< \
CPUDevice, random::NormalDistribution<random::PhiloxRandom, TYPE>>; \
template struct functor::FillPhiloxRandom< \
CPUDevice, \
random::TruncatedNormalDistribution< \
random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>; \
REGISTER_KERNEL_BUILDER( \
Name("RandomUniform") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<CPUDevice, random::UniformDistribution< \
random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("RandomStandardNormal") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<CPUDevice, \
random::NormalDistribution<random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("TruncatedNormal") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp< \
CPUDevice, \
random::TruncatedNormalDistribution< \
random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("RandomGamma").Device(DEVICE_CPU).TypeConstraint<TYPE>("T"), \
RandomGammaOp<TYPE>)
#define REGISTER_FULL_INT(IntType) \
template struct functor::FillPhiloxRandom< \
CPUDevice, \
random::UniformFullIntDistribution<random::PhiloxRandom, IntType>>
#define REGISTER_INT(IntType) \
REGISTER_FULL_INT(IntType); \
template struct functor::FillPhiloxRandom< \
CPUDevice, random::UniformDistribution<random::PhiloxRandom, IntType>>; \
REGISTER_KERNEL_BUILDER(Name("RandomUniformInt") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.HostMemory("minval") \
.HostMemory("maxval") \
.TypeConstraint<IntType>("Tout"), \
RandomUniformIntOp<CPUDevice, IntType>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
TF_CALL_int32(REGISTER_INT);
TF_CALL_int64(REGISTER_INT);
TF_CALL_uint32(REGISTER_FULL_INT);
TF_CALL_uint64(REGISTER_FULL_INT);
#undef REGISTER
#undef REGISTER_INT
#undef REGISTER_FULL_INT
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER( \
Name("RandomUniform") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.TypeConstraint<int32>("T") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<GPUDevice, random::UniformDistribution< \
random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("RandomStandardNormal") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.TypeConstraint<int32>("T") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<GPUDevice, \
random::NormalDistribution<random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("TruncatedNormal") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.TypeConstraint<int32>("T") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp< \
GPUDevice, \
random::TruncatedNormalDistribution< \
random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>);
#define REGISTER_FULL_INT(IntType) \
template struct functor::FillPhiloxRandom< \
GPUDevice, \
random::UniformFullIntDistribution<random::PhiloxRandom, IntType>>
#define REGISTER_INT(IntType) \
REGISTER_FULL_INT(IntType); \
template struct functor::FillPhiloxRandom< \
GPUDevice, random::UniformDistribution<random::PhiloxRandom, IntType>>; \
REGISTER_KERNEL_BUILDER(Name("RandomUniformInt") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.HostMemory("minval") \
.HostMemory("maxval") \
.TypeConstraint<int32>("T") \
.TypeConstraint<IntType>("Tout"), \
RandomUniformIntOp<GPUDevice, IntType>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
TF_CALL_int32(REGISTER_INT);
TF_CALL_int64(REGISTER_INT);
TF_CALL_uint32(REGISTER_FULL_INT);
TF_CALL_uint64(REGISTER_FULL_INT);
#undef REGISTER
#undef REGISTER_INT
#undef REGISTER_FULL_INT
#endif
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
Tensor VecShape(int64_t v) {
if (v >= std::numeric_limits<int32>::max()) {
Tensor shape(DT_INT64, TensorShape({1}));
shape.vec<int64_t>()(0) = v;
return shape;
} else {
Tensor shape(DT_INT32, TensorShape({1}));
shape.vec<int32>()(0) = v;
return shape;
}
}
Graph* RandomUniform(int64_t n) {
Graph* g = new Graph(OpRegistry::Global());
test::graph::RandomUniform(g, test::graph::Constant(g, VecShape(n)),
DT_FLOAT);
return g;
}
Graph* RandomNormal(int64_t n) {
Graph* g = new Graph(OpRegistry::Global());
test::graph::RandomGaussian(g, test::graph::Constant(g, VecShape(n)),
DT_FLOAT);
return g;
}
Graph* TruncatedNormal(int64_t n) {
Graph* g = new Graph(OpRegistry::Global());
test::graph::TruncatedNormal(g, test::graph::Constant(g, VecShape(n)),
DT_FLOAT);
return g;
}
#define BM_RNG(DEVICE, RNG) \
void BM_##DEVICE##_##RNG(::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
test::Benchmark(#DEVICE, RNG(arg), false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * arg); \
} \
BENCHMARK(BM_##DEVICE##_##RNG)->Range(1 << 20, 8 << 20);
BM_RNG(cpu, RandomUniform);
BM_RNG(cpu, RandomNormal);
BM_RNG(cpu, TruncatedNormal);
BM_RNG(gpu, RandomUniform);
BM_RNG(gpu, RandomNormal);
BM_RNG(gpu, TruncatedNormal);
Tensor VecAlphas(int64_t n) {
Tensor alphas(DT_DOUBLE, TensorShape({n}));
for (int i = 0; i < n; i++) {
alphas.vec<double>()(i) =
0.25 + MathUtil::IPow(1.1, i % 2 == 0 ? i : n - i);
}
return alphas;
}
void BM_cpu_RandomGamma(::testing::benchmark::State& state) {
const int nsamp = state.range(0);
const int nalpha = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
test::graph::RandomGamma(g, test::graph::Constant(g, VecShape(nsamp)),
test::graph::Constant(g, VecAlphas(nalpha)));
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * nsamp *
nalpha);
}
BENCHMARK(BM_cpu_RandomGamma)->RangePair(1 << 14, 4 << 15, 2, 50);
void BM_PhiloxRandom(::testing::benchmark::State& state) {
int count = 2 << 20;
random::PhiloxRandom gen(0x12345);
for (auto s : state) {
for (int j = 0; j < count; j += 4) {
auto samples = gen();
tensorflow::testing::DoNotOptimize(samples);
}
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * count);
}
BENCHMARK(BM_PhiloxRandom);
void BM_StdMTRandom(::testing::benchmark::State& state) {
int count = 2 << 20;
std::mt19937 gen(0x12345);
for (auto s : state) {
for (int j = 0; j < count; ++j) {
uint_fast32_t sample = gen();
tensorflow::testing::DoNotOptimize(sample);
}
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * count);
}
BENCHMARK(BM_StdMTRandom);
}
} |
1,515 | cpp | tensorflow/tensorflow | fake_clock_env | tensorflow/core/kernels/batching_util/fake_clock_env.cc | tensorflow/core/util/fake_clock_env_test.cc | #ifndef TENSORFLOW_CORE_UTIL_FAKE_CLOCK_ENV_H_
#define TENSORFLOW_CORE_UTIL_FAKE_CLOCK_ENV_H_
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class FakeClockEnv : public EnvWrapper {
public:
explicit FakeClockEnv(Env* wrapped);
~FakeClockEnv() override = default;
void AdvanceByMicroseconds(int64_t micros);
uint64 NowMicros() const override;
private:
mutable mutex mu_;
uint64 current_time_ TF_GUARDED_BY(mu_) = 0;
FakeClockEnv(const FakeClockEnv&) = delete;
void operator=(const FakeClockEnv&) = delete;
};
}
#endif
#include "tensorflow/core/util/fake_clock_env.h"
#include <string>
namespace tensorflow {
FakeClockEnv::FakeClockEnv(Env* wrapped) : EnvWrapper(wrapped) {}
void FakeClockEnv::AdvanceByMicroseconds(int64_t micros) {
{
mutex_lock l(mu_);
current_time_ += micros;
}
}
uint64 FakeClockEnv::NowMicros() const {
{
mutex_lock l(mu_);
return current_time_;
}
}
} | #include "tensorflow/core/util/fake_clock_env.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace {
class FakeClockEnvTest : public ::testing::Test {
protected:
void SetUp() override {
fake_clock_env_ = std::make_unique<FakeClockEnv>(Env::Default());
}
void TearDown() override { fake_clock_env_.reset(); }
std::unique_ptr<FakeClockEnv> fake_clock_env_;
};
TEST_F(FakeClockEnvTest, TimeInitializedToZero) {
EXPECT_EQ(0, fake_clock_env_->NowMicros());
}
TEST_F(FakeClockEnvTest, AdvanceTimeByMicroseconds) {
int current_time = fake_clock_env_->NowMicros();
int64_t duration = 100;
current_time += duration;
fake_clock_env_->AdvanceByMicroseconds(duration);
EXPECT_EQ(current_time, fake_clock_env_->NowMicros());
for (int i = 0; i < 5; ++i) {
fake_clock_env_->AdvanceByMicroseconds(100);
current_time += 100;
}
EXPECT_EQ(current_time, fake_clock_env_->NowMicros());
current_time += duration;
duration = 200;
fake_clock_env_->AdvanceByMicroseconds(duration);
EXPECT_NE(current_time, fake_clock_env_->NowMicros());
}
}
} |
1,516 | cpp | tensorflow/tensorflow | threadsafe_status | tensorflow/core/kernels/batching_util/threadsafe_status.cc | tensorflow/core/kernels/batching_util/threadsafe_status_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_THREADSAFE_STATUS_H_
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
class ThreadSafeStatus {
public:
const Status& status() const& TF_LOCKS_EXCLUDED(mutex_);
Status status() && TF_LOCKS_EXCLUDED(mutex_);
void Update(const Status& new_status) TF_LOCKS_EXCLUDED(mutex_);
void Update(Status&& new_status) TF_LOCKS_EXCLUDED(mutex_);
private:
mutable mutex mutex_;
Status status_ TF_GUARDED_BY(mutex_);
};
}
#endif
#include "tensorflow/core/kernels/batching_util/threadsafe_status.h"
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
const Status& ThreadSafeStatus::status() const& {
tf_shared_lock lock(mutex_);
return status_;
}
Status ThreadSafeStatus::status() && {
tf_shared_lock lock(mutex_);
return std::move(status_);
}
void ThreadSafeStatus::Update(const Status& new_status) {
if (new_status.ok()) {
return;
}
mutex_lock lock(mutex_);
status_.Update(new_status);
}
void ThreadSafeStatus::Update(Status&& new_status) {
if (new_status.ok()) {
return;
}
mutex_lock lock(mutex_);
status_.Update(std::forward<Status>(new_status));
}
} | #include "tensorflow/core/kernels/batching_util/threadsafe_status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
TEST(ThreadSafeStatus, DefaultOk) {
ThreadSafeStatus status;
TF_EXPECT_OK(status.status());
}
TEST(ThreadSafeStatus, Update) {
ThreadSafeStatus status;
TF_EXPECT_OK(status.status());
status.Update(errors::FailedPrecondition("original error"));
EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION);
status.Update(absl::OkStatus());
EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION);
status.Update(errors::Internal("new error"));
EXPECT_EQ(status.status().code(), error::FAILED_PRECONDITION);
}
TEST(ThreadSafeStatus, Move) {
ThreadSafeStatus status;
TF_EXPECT_OK(std::move(status).status());
}
}
} |
1,517 | cpp | tensorflow/tensorflow | periodic_function | tensorflow/core/kernels/batching_util/periodic_function.cc | tensorflow/core/kernels/batching_util/periodic_function_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_PERIODIC_FUNCTION_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_PERIODIC_FUNCTION_H_
#include <functional>
#include <memory>
#include <string>
#include "absl/functional/any_invocable.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace serving {
namespace internal {
class PeriodicFunctionTestAccess;
}
class PeriodicFunction {
public:
struct Options {
Options() {}
ThreadOptions thread_options;
string thread_name_prefix = "periodic_function";
Env* env = Env::Default();
int64_t startup_delay_micros = 0;
};
PeriodicFunction(absl::AnyInvocable<void()> function, int64_t interval_micros,
const Options& options = Options());
~PeriodicFunction();
private:
friend class internal::PeriodicFunctionTestAccess;
void NotifyStop();
void RunLoop(int64_t start);
absl::AnyInvocable<void()> function_;
const int64_t interval_micros_;
const Options options_;
Notification stop_thread_;
std::unique_ptr<Thread> thread_ = nullptr;
PeriodicFunction(const PeriodicFunction&) = delete;
void operator=(const PeriodicFunction&) = delete;
};
}
}
#endif
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <algorithm>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace serving {
PeriodicFunction::PeriodicFunction(absl::AnyInvocable<void()> function,
const int64_t interval_micros,
const Options& options)
: function_(std::move(function)),
interval_micros_([interval_micros]() -> int64 {
if (interval_micros < 0) {
const string error = strings::StrCat(
" The value of 'interval_micros' should be >= 0: ",
interval_micros, ". ");
DCHECK(false) << error;
LOG(WARNING) << error << "Resetting it to 0.";
return 0;
}
return interval_micros;
}()),
options_(options) {
thread_.reset(options_.env->StartThread(
options_.thread_options, options_.thread_name_prefix, [this]() {
RunLoop(options_.env->NowMicros());
}));
}
PeriodicFunction::~PeriodicFunction() {
NotifyStop();
thread_.reset();
}
void PeriodicFunction::NotifyStop() {
if (!stop_thread_.HasBeenNotified()) {
stop_thread_.Notify();
}
}
void PeriodicFunction::RunLoop(const int64_t start) {
{
if (options_.startup_delay_micros > 0) {
const int64_t deadline = start + options_.startup_delay_micros;
options_.env->SleepForMicroseconds(deadline - start);
}
while (!stop_thread_.HasBeenNotified()) {
VLOG(3) << "Running function.";
const int64_t begin = options_.env->NowMicros();
function_();
const int64_t end =
std::max(static_cast<int64_t>(options_.env->NowMicros()), begin);
const int64_t deadline = begin + interval_micros_;
if (deadline > end) {
if (end > begin) {
VLOG(3) << "Reducing interval_micros from " << interval_micros_
<< " to " << (deadline - end);
}
options_.env->SleepForMicroseconds(deadline - end);
} else {
VLOG(3) << "Function took longer than interval_micros, so not sleeping";
}
}
}
}
}
} | #include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <memory>
#include <string>
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace internal {
class PeriodicFunctionTestAccess {
public:
explicit PeriodicFunctionTestAccess(PeriodicFunction* periodic_function)
: periodic_function_(periodic_function) {}
void NotifyStop() { periodic_function_->NotifyStop(); }
private:
PeriodicFunction* const periodic_function_;
};
}
namespace {
using test_util::FakeClockEnv;
void StopPeriodicFunction(PeriodicFunction* periodic_function,
FakeClockEnv* fake_clock_env,
const uint64 pf_interval_micros) {
fake_clock_env->BlockUntilThreadsAsleep(1);
internal::PeriodicFunctionTestAccess(periodic_function).NotifyStop();
fake_clock_env->AdvanceByMicroseconds(pf_interval_micros);
}
TEST(PeriodicFunctionTest, ObeyInterval) {
const int64_t kPeriodMicros = 2;
const int kCalls = 10;
int actual_calls = 0;
{
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
for (int i = 0; i < kCalls; ++i) {
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
ASSERT_EQ(actual_calls, kCalls + 1);
}
TEST(PeriodicFunctionTest, ObeyStartupDelay) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
int actual_calls = 0;
{
PeriodicFunction::Options options;
options.startup_delay_micros = kDelayMicros;
FakeClockEnv fake_clock_env(Env::Default());
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
EXPECT_EQ(0, actual_calls);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
StopPeriodicFunction(&periodic_function, &fake_clock_env, kDelayMicros);
}
EXPECT_EQ(1, actual_calls);
}
TEST(PeriodicFunctionTest, StartupDelayRace) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
mutex mu;
int counter = 0;
std::unique_ptr<Notification> listener(new Notification);
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
options.startup_delay_micros = kDelayMicros;
PeriodicFunction periodic_function(
[&mu, &counter, &listener]() {
mutex_lock l(mu);
counter++;
listener->Notify();
},
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(1, counter);
listener.reset(new Notification);
}
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(2, counter);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
TEST(PeriodicFunctionTest, MinInterval) {
PeriodicFunction periodic_function(
[]() { Env::Default()->SleepForMicroseconds(20 * 1000); }, 0);
}
class PeriodicFunctionWithFakeClockEnvTest : public ::testing::Test {
protected:
const int64_t kPeriodMicros = 50;
PeriodicFunctionWithFakeClockEnvTest()
: fake_clock_env_(Env::Default()),
counter_(0),
pf_(
[this]() {
mutex_lock l(counter_mu_);
++counter_;
},
kPeriodMicros, GetPeriodicFunctionOptions()) {}
PeriodicFunction::Options GetPeriodicFunctionOptions() {
PeriodicFunction::Options options;
options.thread_name_prefix = "ignore";
options.env = &fake_clock_env_;
return options;
}
void SetUp() override {
ASSERT_TRUE(AwaitCount(1));
}
void TearDown() override {
StopPeriodicFunction(&pf_, &fake_clock_env_, kPeriodMicros);
}
bool AwaitCount(int expected_counter) {
fake_clock_env_.BlockUntilThreadsAsleep(1);
{
mutex_lock lock(counter_mu_);
return counter_ == expected_counter;
}
}
FakeClockEnv fake_clock_env_;
mutex counter_mu_;
int counter_;
PeriodicFunction pf_;
};
TEST_F(PeriodicFunctionWithFakeClockEnvTest, FasterThanRealTime) {
fake_clock_env_.AdvanceByMicroseconds(kPeriodMicros / 2);
for (int i = 2; i < 7; ++i) {
fake_clock_env_.AdvanceByMicroseconds(
kPeriodMicros);
EXPECT_TRUE(AwaitCount(i));
}
}
TEST_F(PeriodicFunctionWithFakeClockEnvTest, SlowerThanRealTime) {
Env::Default()->SleepForMicroseconds(
125 * 1000);
EXPECT_TRUE(AwaitCount(1));
}
TEST(PeriodicFunctionDeathTest, BadInterval) {
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function([]() {}, -1),
".* should be >= 0");
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function(
[]() {}, -1, PeriodicFunction::Options()),
".* should be >= 0");
}
}
}
} |
1,518 | cpp | tensorflow/tensorflow | batch_scheduler_utils | tensorflow/core/kernels/batching_util/batch_scheduler_utils.cc | tensorflow/core/kernels/batching_util/batch_scheduler_utils_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_SCHEDULER_UTILS_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_SCHEDULER_UTILS_H_
#include <vector>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace serving {
int GetNextAllowedBatchSize(int batch_size,
const std::vector<int32>& allowed_batch_sizes,
bool disable_padding);
int GetPrevAllowedBatchSize(int batch_size,
const std::vector<int32>& allowed_batch_sizes,
bool disable_padding);
}
}
#endif
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include <algorithm>
#include <vector>
#include "absl/algorithm/container.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace serving {
int GetNextAllowedBatchSize(int batch_size,
const std::vector<int32>& allowed_batch_sizes,
bool disable_padding) {
if (disable_padding || allowed_batch_sizes.empty()) {
return batch_size;
}
DCHECK(absl::c_is_sorted(allowed_batch_sizes));
DCHECK_GT(batch_size, 0);
for (int allowed_size : allowed_batch_sizes) {
if (allowed_size >= batch_size) {
return allowed_size;
}
}
LOG(ERROR) << "Batch size " << batch_size
<< " is greater than largest allowed size; ignoring allowed sizes "
"constraint.";
return batch_size;
}
int32 GetPrevAllowedBatchSize(int batch_size,
const std::vector<int32>& allowed_batch_sizes,
bool disable_padding) {
if (disable_padding || allowed_batch_sizes.empty()) {
return batch_size;
}
DCHECK(absl::c_is_sorted(allowed_batch_sizes));
DCHECK_GT(batch_size, 0);
auto result = std::find_if(
allowed_batch_sizes.rbegin(), allowed_batch_sizes.rend(),
[&](int allowed_size) { return allowed_size <= batch_size; });
if (result == allowed_batch_sizes.rend()) {
return batch_size;
}
return *result;
}
}
} | #include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include <gtest/gtest.h>
namespace tensorflow {
namespace serving {
namespace {
TEST(GetNextAllowedBatchSizeTest, PaddingDisallowed) {
EXPECT_EQ(GetNextAllowedBatchSize(3, {2, 4, 8}, true), 3);
}
TEST(GetNextAllowedBatchSizeTest, EmptyAllowedBatchSizes) {
EXPECT_EQ(GetNextAllowedBatchSize(3, {}, false), 3);
}
TEST(GetNextAllowedBatchSizeTest, NextAllowedBatchSizeFound) {
EXPECT_EQ(GetNextAllowedBatchSize(3, {2, 4, 8}, false), 4);
}
TEST(GetNextAllowedBatchSizeTest, AlreadyAllowedBatchSize) {
EXPECT_EQ(GetNextAllowedBatchSize(2, {2, 4, 8}, false), 2);
}
TEST(GetNextAllowedBatchSizeTest, GreaterThanAllowedBatchSize) {
EXPECT_EQ(GetNextAllowedBatchSize(10, {2, 4, 8}, false), 10);
}
TEST(GetPrevAllowedBatchSizeTest, PaddingDisallowed) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {2, 4, 8}, true), 3);
}
TEST(GetPrevAllowedBatchSizeTest, EmptyAllowedBatchSizes) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {}, false), 3);
}
TEST(GetPrevAllowedBatchSizeTest, PrevAllowedBatchSizeFound) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {1, 2, 4, 8}, false), 2);
}
TEST(GetPrevAllowedBatchSizeTest, NoSmallerAllowedBatchSizeFound) {
EXPECT_EQ(GetPrevAllowedBatchSize(3, {4, 8}, false), 3);
}
TEST(GetPrevAllowedBatchSizeTest, AlreadyAllowedBatchSize) {
EXPECT_EQ(GetPrevAllowedBatchSize(2, {1, 2, 4, 8}, false), 2);
}
TEST(GetPrevAllowedBatchSizeTest, GreaterThanMaxAllowedBatchSize) {
EXPECT_EQ(GetPrevAllowedBatchSize(10, {2, 4, 8}, false), 8);
}
}
}
} |
1,519 | cpp | tensorflow/tensorflow | bounded_executor | tensorflow/core/kernels/batching_util/bounded_executor.cc | tensorflow/core/kernels/batching_util/bounded_executor_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BOUNDED_EXECUTOR_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BOUNDED_EXECUTOR_H_
#include <string>
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/threadpool_interface.h"
namespace tensorflow {
namespace serving {
class BoundedExecutor : public thread::ThreadPoolInterface {
public:
struct Options {
Env* env = Env::Default();
ThreadOptions thread_options;
std::string thread_name;
int num_threads = -1;
};
static StatusOr<std::unique_ptr<BoundedExecutor>> Create(
const Options& options);
~BoundedExecutor() override;
void Schedule(std::function<void()> func) override;
int NumThreads() const override;
int CurrentThreadId() const override;
private:
explicit BoundedExecutor(const Options& options);
void InitWorker();
void Run();
const Options& options_;
mutex work_queue_mu_;
std::deque<std::function<void()>> work_queue_ TF_GUARDED_BY(work_queue_mu_);
condition_variable work_queue_cv_ TF_GUARDED_BY(work_queue_mu_);
std::vector<std::unique_ptr<Thread>> threads_;
BoundedExecutor(const BoundedExecutor&) = delete;
void operator=(const BoundedExecutor&) = delete;
};
}
}
#endif
#include "tensorflow/core/kernels/batching_util/bounded_executor.h"
#include <algorithm>
#include <atomic>
#include "absl/functional/bind_front.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
namespace serving {
StatusOr<std::unique_ptr<BoundedExecutor>> BoundedExecutor::Create(
const Options& options) {
if (options.env == nullptr) {
return errors::InvalidArgument("options.env must not be nullptr");
}
if (options.num_threads <= 0) {
return errors::InvalidArgument("options.num_threads must be positive");
}
return absl::WrapUnique(new BoundedExecutor(options));
}
BoundedExecutor::BoundedExecutor(const Options& options) : options_(options) {
InitWorker();
}
void BoundedExecutor::InitWorker() {
for (int i = 0; i < options_.num_threads; i++) {
std::unique_ptr<Thread> thread = absl::WrapUnique(
options_.env->StartThread(options_.thread_options, options_.thread_name,
[this]() { this->Run(); }));
threads_.push_back(std::move(thread));
}
}
BoundedExecutor::~BoundedExecutor() {
{
mutex_lock l(work_queue_mu_);
for (int i = 0; i < NumThreads(); i++) {
work_queue_.push_back(nullptr);
work_queue_cv_.notify_one();
}
}
threads_.clear();
}
void BoundedExecutor::Schedule(std::function<void()> func) {
DCHECK(func != nullptr) << "func is nullptr";
mutex_lock l(work_queue_mu_);
work_queue_.push_back(std::move(func));
work_queue_cv_.notify_one();
}
int BoundedExecutor::NumThreads() const { return options_.num_threads; }
int BoundedExecutor::CurrentThreadId() const { return -1; }
void BoundedExecutor::Run() {
while (true) {
std::function<void()> func = nullptr;
{
mutex_lock l(work_queue_mu_);
while (work_queue_.empty()) {
work_queue_cv_.wait(l);
}
func = std::move(work_queue_.front());
work_queue_.pop_front();
}
if (func != nullptr) {
func();
} else {
break;
}
}
}
}
} | #include "tensorflow/core/kernels/batching_util/bounded_executor.h"
#include "absl/functional/bind_front.h"
#include "absl/time/time.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace serving {
namespace {
class TaskTracker {
public:
std::function<void()> MakeTask(int task_id, absl::Duration sleep_duration) {
return absl::bind_front(&TaskTracker::Run, this, task_id, sleep_duration);
}
void Run(int task_id, absl::Duration sleep_duration) {
LOG(INFO) << "Entering task " << task_id;
{
mutex_lock l(mutex_);
++task_count_;
++running_count_;
if (running_count_ > max_running_count_) {
max_running_count_ = running_count_;
}
}
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(sleep_duration));
{
mutex_lock l(mutex_);
--running_count_;
}
LOG(INFO) << "Task " << task_id << " exiting.";
}
int task_count() {
mutex_lock l(mutex_);
return task_count_;
}
int running_count() {
mutex_lock l(mutex_);
return running_count_;
}
int max_running_count() {
mutex_lock l(mutex_);
return max_running_count_;
}
private:
mutex mutex_;
int task_count_ = 0;
int running_count_ = 0;
int max_running_count_ = 0;
};
TEST(BoundedExecutorTest, InvalidEmptyEnv) {
BoundedExecutor::Options options;
options.num_threads = 2;
options.env = nullptr;
EXPECT_THAT(BoundedExecutor::Create(options),
::tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT, "options.env must not be nullptr"));
}
TEST(BoundedExecutorTest, InvalidNumThreads) {
{
BoundedExecutor::Options options;
options.num_threads = 0;
EXPECT_THAT(
BoundedExecutor::Create(options),
::tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT, "options.num_threads must be positive"));
}
{
BoundedExecutor::Options options;
options.num_threads = -1;
EXPECT_THAT(
BoundedExecutor::Create(options),
::tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT, "options.num_threads must be positive"));
}
}
TEST(BoundedExecutorTest, AddRunsFunctionsEventually) {
BoundedExecutor::Options options;
options.num_threads = 2;
TF_ASSERT_OK_AND_ASSIGN(auto executor, BoundedExecutor::Create(options));
Notification done0;
executor->Schedule([&done0] { done0.Notify(); });
Notification done1;
executor->Schedule([&done1] { done1.Notify(); });
done0.WaitForNotification();
done1.WaitForNotification();
executor.reset();
}
TEST(BoundedExecutorTest, MaxInflightLimit) {
BoundedExecutor::Options options;
options.num_threads = 5;
TF_ASSERT_OK_AND_ASSIGN(auto executor, BoundedExecutor::Create(options));
const int num_tasks = 100;
TaskTracker task_tracker;
for (int i = 0; i < num_tasks; i++) {
executor->Schedule(task_tracker.MakeTask(i, absl::Seconds(1)));
}
executor.reset();
EXPECT_EQ(task_tracker.task_count(), num_tasks);
EXPECT_EQ(task_tracker.max_running_count(), options.num_threads);
EXPECT_EQ(task_tracker.running_count(), 0);
}
}
}
} |
1,520 | cpp | tensorflow/tensorflow | batch_scheduler | tensorflow/core/kernels/batching_util/batch_scheduler.cc | tensorflow/core/kernels/batching_util/batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_SCHEDULER_H_
#include <stddef.h>
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <deque>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
const absl::string_view kLowPriorityPaddingWithMaxBatchSizeAttrValue =
"low_priority_padding_with_max_batch_size";
const absl::string_view kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue =
"low_priority_padding_with_next_allowed_batch_size";
const absl::string_view kPriorityIsolationAttrValue = "priority_isolation";
enum class MixedPriorityBatchingPolicy {
kLowPriorityPaddingWithMaxBatchSize,
kLowPriorityPaddingWithNextAllowedBatchSize,
kPriorityIsolation
};
absl::StatusOr<MixedPriorityBatchingPolicy> GetMixedPriorityBatchingPolicy(
absl::string_view attr_value);
class BatchTask {
public:
virtual ~BatchTask() = default;
virtual size_t size() const = 0;
virtual tsl::criticality::Criticality criticality() const {
return tsl::criticality::Criticality::kCritical;
}
};
template <typename TaskType>
class TaskQueue {
public:
TaskQueue() = default;
struct TaskWrapper {
std::unique_ptr<TaskType> task;
uint64 start_time_micros;
TaskWrapper(std::unique_ptr<TaskType> task, uint64 start_time_micros)
: task(std::move(task)), start_time_micros(start_time_micros) {}
};
void AddTask(std::unique_ptr<TaskType> task, uint64 start_time_micros);
std::unique_ptr<TaskType> RemoveTask();
std::vector<std::unique_ptr<TaskType>> RemoveTask(int size);
std::optional<uint64> EarliestTaskStartTime() const;
bool empty() const;
int num_tasks() const;
int size() const;
private:
mutable mutex mu_;
std::deque<TaskWrapper> tasks_ TF_GUARDED_BY(mu_);
int size_ TF_GUARDED_BY(mu_) = 0;
std::atomic<bool> empty_ TF_GUARDED_BY(mu_){true};
TaskQueue(const TaskQueue&) = delete;
void operator=(const TaskQueue&) = delete;
};
template <typename TaskType>
void TaskQueue<TaskType>::AddTask(std::unique_ptr<TaskType> task,
uint64 start_time_micros) {
{
mutex_lock l(mu_);
size_ += task->size();
tasks_.emplace_back(std::move(task), start_time_micros);
empty_.store(false);
}
}
template <typename TaskType>
std::unique_ptr<TaskType> TaskQueue<TaskType>::RemoveTask() {
{
mutex_lock l(mu_);
if (tasks_.empty()) {
return nullptr;
}
std::unique_ptr<TaskType> task = std::move(tasks_.front().task);
size_ -= task->size();
tasks_.pop_front();
if (tasks_.empty()) {
empty_.store(true);
}
return task;
}
}
template <typename TaskType>
std::vector<std::unique_ptr<TaskType>> TaskQueue<TaskType>::RemoveTask(
int size) {
{
mutex_lock l(mu_);
if (tasks_.empty()) {
return {};
}
int size_lower_bound = size_ - size;
std::vector<std::unique_ptr<TaskType>> remove_tasks;
while (!tasks_.empty() &&
size_ - static_cast<int>(tasks_.front().task->size()) >=
size_lower_bound) {
size_ -= static_cast<int>(tasks_.front().task->size());
remove_tasks.push_back(std::move(tasks_.front().task));
tasks_.pop_front();
if (tasks_.empty()) {
empty_.store(true);
}
}
return remove_tasks;
}
}
template <typename TaskType>
bool TaskQueue<TaskType>::empty() const {
{
mutex_lock l(mu_);
return empty_.load();
}
}
template <typename TaskType>
std::optional<uint64> TaskQueue<TaskType>::EarliestTaskStartTime() const {
{
mutex_lock l(mu_);
if (tasks_.empty()) {
return std::nullopt;
}
return tasks_.front().start_time_micros;
}
}
template <typename TaskType>
int TaskQueue<TaskType>::num_tasks() const {
{
mutex_lock l(mu_);
return tasks_.size();
}
}
template <typename TaskType>
int TaskQueue<TaskType>::size() const {
{
mutex_lock l(mu_);
return size_;
}
}
template <typename TaskType>
class Batch {
public:
Batch();
explicit Batch(uint64 traceme_context_id);
virtual ~Batch();
void AddTask(std::unique_ptr<TaskType> task);
std::unique_ptr<TaskType> RemoveTask();
std::vector<std::unique_ptr<TaskType>> RemoveAllTasks();
int num_tasks() const;
bool empty() const;
const TaskType& task(int i) const;
TaskType* mutable_task(int i);
size_t size() const;
bool IsClosed() const;
void WaitUntilClosed() const;
void Close();
uint64 traceme_context_id() const;
private:
mutable mutex mu_;
std::vector<std::unique_ptr<TaskType>> tasks_ TF_GUARDED_BY(mu_);
size_t size_ TF_GUARDED_BY(mu_) = 0;
std::atomic<bool> empty_ TF_GUARDED_BY(mu_){true};
Notification closed_;
const uint64 traceme_context_id_;
Batch(const Batch&) = delete;
void operator=(const Batch&) = delete;
};
template <typename TaskType>
class BatchScheduler {
public:
virtual ~BatchScheduler() = default;
virtual Status Schedule(std::unique_ptr<TaskType>* task) = 0;
virtual size_t NumEnqueuedTasks() const = 0;
virtual size_t SchedulingCapacity() const = 0;
virtual size_t max_task_size() const = 0;
};
template <typename TaskType>
Batch<TaskType>::Batch() : Batch(0) {}
template <typename TaskType>
Batch<TaskType>::Batch(uint64 traceme_context_id)
: traceme_context_id_(traceme_context_id) {}
template <typename TaskType>
Batch<TaskType>::~Batch() {
WaitUntilClosed();
}
template <typename TaskType>
void Batch<TaskType>::AddTask(std::unique_ptr<TaskType> task) {
DCHECK(!IsClosed());
{
mutex_lock l(mu_);
size_ += task->size();
tasks_.push_back(std::move(task));
empty_.store(false);
}
}
template <typename TaskType>
std::vector<std::unique_ptr<TaskType>> Batch<TaskType>::RemoveAllTasks() {
DCHECK(IsClosed());
{
mutex_lock l(mu_);
size_ = 0;
empty_.store(true);
std::vector<std::unique_ptr<TaskType>> tasks_to_return;
tasks_to_return.swap(tasks_);
return std::move(tasks_to_return);
}
}
template <typename TaskType>
std::unique_ptr<TaskType> Batch<TaskType>::RemoveTask() {
{
mutex_lock l(mu_);
if (tasks_.empty()) {
return nullptr;
}
std::unique_ptr<TaskType> task = std::move(tasks_.back());
size_ -= task->size();
tasks_.pop_back();
if (tasks_.empty()) {
empty_.store(true);
}
return task;
}
}
template <typename TaskType>
int Batch<TaskType>::num_tasks() const {
{
mutex_lock l(mu_);
return tasks_.size();
}
}
template <typename TaskType>
bool Batch<TaskType>::empty() const TF_NO_THREAD_SAFETY_ANALYSIS {
tsl::profiler::TraceMe tracer("BatchTask::empty");
return empty_.load();
}
template <typename TaskType>
const TaskType& Batch<TaskType>::task(int i) const {
DCHECK_GE(i, 0);
{
mutex_lock l(mu_);
DCHECK_LT(i, tasks_.size());
return *tasks_[i].get();
}
}
template <typename TaskType>
TaskType* Batch<TaskType>::mutable_task(int i) {
DCHECK_GE(i, 0);
{
mutex_lock l(mu_);
DCHECK_LT(i, tasks_.size());
return tasks_[i].get();
}
}
template <typename TaskType>
size_t Batch<TaskType>::size() const {
{
mutex_lock l(mu_);
return size_;
}
}
template <typename TaskType>
bool Batch<TaskType>::IsClosed() const {
return const_cast<Notification*>(&closed_)->HasBeenNotified();
}
template <typename TaskType>
void Batch<TaskType>::WaitUntilClosed() const {
const_cast<Notification*>(&closed_)->WaitForNotification();
}
template <typename TaskType>
void Batch<TaskType>::Close() {
closed_.Notify();
}
template <typename TaskType>
uint64 Batch<TaskType>::traceme_context_id() const {
return traceme_context_id_;
}
}
}
#endif
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace serving {
absl::StatusOr<MixedPriorityBatchingPolicy> GetMixedPriorityBatchingPolicy(
absl::string_view attr_value) {
if (attr_value == kLowPriorityPaddingWithMaxBatchSizeAttrValue) {
return MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize;
} else if (attr_value ==
kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue) {
return MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize;
} else if (attr_value == kPriorityIsolationAttrValue) {
return MixedPriorityBatchingPolicy::kPriorityIsolation;
}
return absl::InvalidArgumentError(absl::StrFormat(
"Unknown mixed priority batching policy: %s", attr_value));
}
}
} | #include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Property;
TEST(MixedPriorityBatchingPolicyTest, InvalidAttrValueError) {
EXPECT_THAT(
GetMixedPriorityBatchingPolicy("invalid_attr_value"),
testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"Unknown mixed priority batching policy: invalid_attr_value")));
}
using MixedPriorityBatchingPolicyParameterizedTest = ::testing::TestWithParam<
std::tuple<std::string, MixedPriorityBatchingPolicy>>;
TEST_P(MixedPriorityBatchingPolicyParameterizedTest,
GetMixedPriorityBatchingPolicySuccess) {
auto [attr_name, policy] = GetParam();
EXPECT_THAT(GetMixedPriorityBatchingPolicy(attr_name),
testing::IsOkAndHolds(Eq(policy)));
}
INSTANTIATE_TEST_SUITE_P(
Parameter, MixedPriorityBatchingPolicyParameterizedTest,
::testing::Values(
std::make_tuple(
kLowPriorityPaddingWithMaxBatchSizeAttrValue,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithMaxBatchSize),
std::make_tuple(
kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue,
MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithNextAllowedBatchSize),
std::make_tuple(
kPriorityIsolationAttrValue,
MixedPriorityBatchingPolicy::kPriorityIsolation)));
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
private:
const size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
TEST(TaskCriticalityTest, CriticalityDefaultsToCritical) {
FakeTask fake_task(0);
EXPECT_EQ(fake_task.criticality(), tsl::criticality::Criticality::kCritical);
}
TEST(TaskQueueTest, EmptyTaskQueue) {
TaskQueue<FakeTask> task_queue;
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, AddTaskToTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
}
TEST(TaskQueueTest, AddTasksToTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
}
TEST(TaskQueueTest, RemoveTaskFromTaskQueueWithSingleTask) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(),
Pointee(Property(&FakeTask::size, Eq(1))));
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, RemoveTaskFromTaskQueueWithMultipleTasks) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(2), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(2, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(1), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(),
Pointee(Property(&FakeTask::size, Eq(2))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
}
TEST(TaskQueueTest, RemoveTasksFromTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(3),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
}
TEST(TaskQueueTest, RemoveTasksFewerThanArgFromTaskQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(5),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
}
TEST(TaskQueueTest, RemoveAllTasksWhenArgGreaterThanTaskSize) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(1, task_queue.num_tasks());
EXPECT_EQ(1, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(2, task_queue.num_tasks());
EXPECT_EQ(3, task_queue.size());
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
EXPECT_FALSE(task_queue.empty());
EXPECT_EQ(3, task_queue.num_tasks());
EXPECT_EQ(6, task_queue.size());
EXPECT_THAT(task_queue.RemoveTask(8),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2))),
Pointee(Property(&FakeTask::size, Eq(3)))));
EXPECT_TRUE(task_queue.empty());
EXPECT_EQ(0, task_queue.num_tasks());
EXPECT_EQ(0, task_queue.size());
}
TEST(TaskQueueTest, EarliestStartTimeWithEmptyQueue) {
TaskQueue<FakeTask> task_queue;
EXPECT_FALSE(task_queue.EarliestTaskStartTime().has_value());
}
TEST(TaskQueueTest, EarliestStartTimeWithMultipleTasksInQueue) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
std::optional<uint64_t> result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 1);
}
TEST(TaskQueueTest, EarliestStartTimeAfterTaskRemoval) {
TaskQueue<FakeTask> task_queue;
task_queue.AddTask(std::make_unique<FakeTask>(1), 1);
task_queue.AddTask(std::make_unique<FakeTask>(2), 2);
task_queue.AddTask(std::make_unique<FakeTask>(3), 3);
std::optional<uint64_t> result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 1);
EXPECT_THAT(task_queue.RemoveTask(3),
ElementsAre(Pointee(Property(&FakeTask::size, Eq(1))),
Pointee(Property(&FakeTask::size, Eq(2)))));
result = task_queue.EarliestTaskStartTime();
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, 3);
}
TEST(BatchTest, Basic) {
Batch<FakeTask> batch;
EXPECT_EQ(0, batch.num_tasks());
EXPECT_TRUE(batch.empty());
EXPECT_EQ(0, batch.size());
EXPECT_FALSE(batch.IsClosed());
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
EXPECT_EQ(1, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size(), batch.size());
EXPECT_EQ(task0->size(), batch.task(0).size());
EXPECT_FALSE(batch.IsClosed());
auto task1 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
EXPECT_EQ(2, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size() + task1->size(), batch.size());
EXPECT_EQ(task1->size(), batch.task(1).size());
EXPECT_EQ(task1->size(), batch.mutable_task(1)->size());
EXPECT_FALSE(batch.IsClosed());
batch.Close();
EXPECT_TRUE(batch.IsClosed());
EXPECT_EQ(2, batch.num_tasks());
EXPECT_FALSE(batch.empty());
EXPECT_EQ(task0->size() + task1->size(), batch.size());
EXPECT_EQ(task0->size(), batch.task(0).size());
EXPECT_EQ(task1->size(), batch.task(1).size());
EXPECT_EQ(7, batch.RemoveTask()->size());
EXPECT_EQ(3, batch.size());
EXPECT_EQ(3, batch.RemoveTask()->size());
EXPECT_EQ(0, batch.size());
EXPECT_TRUE(batch.empty());
}
TEST(BatchTest, WaitUntilClosed) {
Batch<FakeTask> batch;
batch.AddTask(std::unique_ptr<FakeTask>(new FakeTask(3)));
EXPECT_FALSE(batch.IsClosed());
std::unique_ptr<Thread> close_thread(
Env::Default()->StartThread(ThreadOptions(), "test", [&batch]() {
Env::Default()->SleepForMicroseconds(100);
batch.Close();
}));
batch.WaitUntilClosed();
EXPECT_TRUE(batch.IsClosed());
}
TEST(BatchTest, DeletionBlocksUntilClosed) {
Batch<FakeTask>* batch = new Batch<FakeTask>;
batch->AddTask(std::unique_ptr<FakeTask>(new FakeTask(3)));
EXPECT_FALSE(batch->IsClosed());
Notification do_delete, deleted;
std::unique_ptr<Thread> delete_thread(Env::Default()->StartThread(
ThreadOptions(), "test", [&batch, &do_delete, &deleted]() {
do_delete.WaitForNotification();
delete batch;
deleted.Notify();
}));
do_delete.Notify();
Env::Default()->SleepForMicroseconds(10 * 1000 );
EXPECT_FALSE(deleted.HasBeenNotified());
batch->Close();
deleted.WaitForNotification();
}
TEST(BatchTest, RemoveAllTasks) {
Batch<FakeTask> batch;
auto task0 = new FakeTask(3);
batch.AddTask(std::unique_ptr<FakeTask>(task0));
auto task1 = new FakeTask(7);
batch.AddTask(std::unique_ptr<FakeTask>(task1));
batch.Close();
EXPECT_TRUE(batch.IsClosed());
std::vector<std::unique_ptr<FakeTask>> tasks_in_batch =
batch.RemoveAllTasks();
EXPECT_EQ(2, tasks_in_batch.size());
EXPECT_TRUE(batch.empty());
EXPECT_EQ(task0, tasks_in_batch[0].get());
EXPECT_EQ(task1, tasks_in_batch[1].get());
EXPECT_THAT(batch.RemoveAllTasks(), ::testing::IsEmpty());
EXPECT_THAT(batch.RemoveAllTasks(), ::testing::IsEmpty());
}
}
}
} |
1,521 | cpp | tensorflow/tensorflow | batch_resource_base | tensorflow/core/kernels/batching_util/batch_resource_base.cc | tensorflow/core/kernels/batching_util/batch_resource_base_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_RESOURCE_BASE_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_RESOURCE_BASE_H_
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_join.h"
#include "absl/synchronization/blocking_counter.h"
#include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "tensorflow/core/common_runtime/request_cost.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/threadsafe_status.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
struct BatchResourceOptions {
int32_t num_batch_threads;
int32_t max_batch_size;
int32_t batch_timeout_micros;
int32_t max_enqueued_batches;
std::vector<int32_t> allowed_batch_sizes;
int32_t low_priority_max_batch_size;
int32_t low_priority_batch_timeout_micros;
int32_t low_priority_max_enqueued_batches;
std::vector<int32_t> low_priority_allowed_batch_sizes;
MixedPriorityBatchingPolicy mixed_priority_batching_policy;
};
class BatchResourceBase : public ResourceBase {
public:
typedef std::vector<std::vector<Tensor>> TensorMatrix;
struct BatchTask : public tensorflow::serving::BatchTask {
BatchTask() : criticality_val(tsl::criticality::GetCriticality()){};
int64_t guid;
Context propagated_context;
std::vector<Tensor> inputs;
std::vector<Tensor> captured_inputs;
OpKernelContext* context;
AsyncOpKernel::DoneCallback done_callback;
int split_index = 0;
std::shared_ptr<TensorMatrix> output;
std::shared_ptr<ThreadSafeStatus> status;
bool is_partial = false;
uint64 start_time;
size_t size() const override { return inputs[0].shape().dim_size(0); }
std::unique_ptr<BatchTask> CreateSplitTask(
int split_index, AsyncOpKernel::DoneCallback done_callback);
RequestCost* request_cost = nullptr;
tsl::criticality::Criticality criticality() const override {
return criticality_val;
};
int forced_warmup_batch_size = 0;
protected:
virtual std::unique_ptr<BatchTask> CreateDerivedTask() {
return std::make_unique<BatchTask>();
}
private:
::tsl::criticality::Criticality criticality_val;
};
using BatcherT = SharedBatchScheduler<BatchResourceBase::BatchTask>;
using AdaptiveBatcherT =
AdaptiveSharedBatchScheduler<BatchResourceBase::BatchTask>;
using BatcherQueueT = BatchScheduler<BatchResourceBase::BatchTask>;
using BatchT = Batch<BatchResourceBase::BatchTask>;
BatchResourceBase(bool has_process_batch_function,
std::shared_ptr<BatcherT> batcher,
const BatcherT::QueueOptions& batcher_queue_options,
std::vector<int32> allowed_batch_sizes)
: has_process_batch_function_(has_process_batch_function),
batcher_(std::move(batcher)),
batcher_queue_options_(batcher_queue_options),
allowed_batch_sizes_(std::move(allowed_batch_sizes)),
allowed_batch_sizes_str_(absl::StrJoin(allowed_batch_sizes_, ",")) {}
BatchResourceBase(bool has_process_batch_function,
std::shared_ptr<AdaptiveBatcherT> batcher,
const AdaptiveBatcherT::QueueOptions& batcher_queue_options,
std::vector<int32> allowed_batch_sizes)
: has_process_batch_function_(has_process_batch_function),
adaptive_batcher_(std::move(batcher)),
adaptive_batcher_queue_options_(batcher_queue_options),
allowed_batch_sizes_(std::move(allowed_batch_sizes)),
allowed_batch_sizes_str_(absl::StrJoin(allowed_batch_sizes_, ",")) {}
void set_session_metadata(tensorflow::SessionMetadata session_metadata) {
session_metadata_ = std::move(session_metadata);
}
const SessionMetadata& session_metadata() const { return session_metadata_; }
using CreateBatchTaskFn =
std::function<StatusOr<std::unique_ptr<BatchTask>>()>;
Status RegisterWarmupInputs(int64_t guid, OpKernelContext* context,
const string& batcher_queue_name,
const CreateBatchTaskFn& create_batch_task_fn,
AsyncOpKernel::DoneCallback done);
Status RegisterInput(int64_t guid, OpKernelContext* context,
const string& batcher_queue_name,
const CreateBatchTaskFn& create_batch_task_fn,
AsyncOpKernel::DoneCallback done_callback,
int forced_warmup_batch_size = 0);
static BatcherT::QueueOptions GetBatcherQueueOptions(
int32_t num_batch_threads, int32_t max_batch_size,
int32_t batch_timeout_micros, int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
bool enable_large_batch_splitting, bool disable_padding);
static BatcherT::QueueOptions GetBatcherQueueOptions(
int32_t num_batch_threads, int32_t max_batch_size,
int32_t batch_timeout_micros, int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
bool enable_large_batch_splitting, bool disable_padding,
int32_t low_priority_max_batch_size,
int32_t low_priority_batch_timeout_micros,
int32_t low_priority_max_enqueued_batches,
const std::vector<int32>& low_priority_allowed_batch_sizes,
MixedPriorityBatchingPolicy mixed_priority_batching_policy);
static AdaptiveBatcherT::QueueOptions GetAdaptiveBatcherQueueOptions(
int32_t max_batch_size, int32_t batch_timeout_micros,
int32_t max_enqueued_batches, bool enable_large_batch_splitting,
const std::vector<int32>& allowed_batch_sizes, bool disable_padding);
static Status SplitInputTask(
std::unique_ptr<BatchTask>* input_task_ptr, int open_batch_remaining_slot,
int max_batch_size,
std::vector<std::unique_ptr<BatchTask>>* output_tasks);
static void SplitBatchCostsAndRecordMetrics(
const std::string& model_name, const std::string& op_name,
const std::vector<std::unique_ptr<CostMeasurement>>&
batch_cost_measurements,
int64_t processed_size, BatchT& batch);
private:
virtual void ProcessFuncBatchImpl(
const BatchResourceBase::BatchTask& last_task,
absl::Span<const Tensor> inputs, std::vector<Tensor>* combined_outputs,
std::function<void(const Status&)> done) const = 0;
static Status ValidateBatch(const BatchT& batch);
bool IsLowPriorityBatch(const BatchT& batch) const;
int RoundToLowestAllowedBatchSize(int batch_size,
bool is_low_priority_batch = false) const;
void CleanUpFunctionHelper(BatchTask& task, const Status& status) const;
Status ConcatInputTensors(
const BatchT& batch,
const std::vector<std::unique_ptr<BatchTask>>& unbatched_tasks,
OpKernelContext* context,
std::vector<Tensor>* concatenated_tensors) const;
Status SplitOutputTensors(
const std::vector<Tensor>& combined_outputs, BatchT* batch,
std::vector<std::unique_ptr<BatchTask>>& unbatched_tasks) const;
void ProcessFuncBatch(
std::unique_ptr<BatchT> batch,
std::vector<std::unique_ptr<BatchTask>> unbatched_tasks = {}) const;
void ProcessBatch(std::unique_ptr<BatchT> batch) const;
void ProcessBatchCallBack(
std::unique_ptr<Batch<BatchTask>> batch,
std::vector<std::unique_ptr<BatchTask>> unbatched_tasks);
static Status EmitIndexTensor(OpKernelContext* context, const BatchT& batch,
int output_index);
Status LookupOrCreateBatcherQueue(const string& queue_name,
BatcherQueueT** queue);
SessionMetadata session_metadata_;
absl::Mutex outstanding_batch_mu_;
int num_outstanding_batched_items_ TF_GUARDED_BY(outstanding_batch_mu_) = 0;
const bool has_process_batch_function_;
std::shared_ptr<BatcherT> batcher_;
BatcherT::QueueOptions batcher_queue_options_;
std::shared_ptr<AdaptiveBatcherT> adaptive_batcher_;
AdaptiveBatcherT::QueueOptions adaptive_batcher_queue_options_;
mutable mutex batcher_queues_mu_;
std::map<string, std::unique_ptr<BatcherQueueT>> batcher_queues_
TF_GUARDED_BY(batcher_queues_mu_);
std::vector<int32> allowed_batch_sizes_;
string allowed_batch_sizes_str_;
};
}
}
#endif
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/cost_constants.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "tensorflow/core/common_runtime/cost_util.h"
#include "tensorflow/core/common_runtime/request_cost.h"
#include "tensorflow/core/common_runtime/request_cost_accessor.h"
#include "tensorflow/core/common_runtime/request_cost_accessor_registry.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include "tensorflow/core/kernels/batching_util/concat_split_util.h"
#include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "tensorflow/core/kernels/batching_util/threadsafe_status.h"
#include "tensorflow/core/kernels/batching_util/warmup.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/percentile_sampler.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/lib/monitoring/types.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/util/incremental_barrier.h"
#include "tsl/platform/criticality.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace serving {
namespace {
void RecordPaddingSize(int32_t padding_size, const string& model_name,
int32_t execution_batch_size, const string& op_name) {
static auto* cell = tensorflow::monitoring::PercentileSampler<3>::New(
{"/tensorflow/serving/batching/padding_size",
"Tracks the padding size distribution on batches by model_name (if "
"available).",
"model_name", "execution_batch_size", "op_name"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, tensorflow::monitoring::UnitOfMeasure::kNumber);
cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name)
->Add(static_cast<double>(padding_size));
}
void RecordPaddingSizeV2(int32_t padding_size, const string& model_name,
int32_t execution_batch_size, const string& op_name) {
std::vector<double> bucket_limits;
bucket_limits.push_back(-2.0 / 3.0);
double bound = 2.0 / 3.0;
double growth_factor = 2;
for (int i = 0; i < 16; i++) {
bucket_limits.push_back(bound);
bound *= growth_factor;
}
static auto* cell = tensorflow::monitoring::Sampler<3>::New(
{"/tensorflow/serving/batching/padding_size_v2",
"Tracks the padding size distribution on batches by model_name (if "
"available).",
"model_name", "execution_batch_size", "op_name"},
monitoring::Buckets::Explicit(bucket_limits));
cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name)
->Add(static_cast<double>(padding_size));
}
void RecordInputBatchSize(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New(
{"/tensorflow/serving/batching/input_batch_size",
"Tracks the batch size distribution on the inputs by model_name (if "
"available).",
"model_name", "op_name"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, tensorflow::monitoring::UnitOfMeasure::kNumber);
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordInputBatchSizeV2(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::Sampler<2>::New(
{"/tensorflow/serving/batching/input_batch_size_v2",
"Tracks the batch size distribution on the inputs by model_name (if "
"available).",
"model_name", "op_name"},
monitoring::Buckets::Exponential(2.0 / 3.0, 2, 15));
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordBatchSize(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::Sampler<2>::New(
{"/tensorflow/serving/batching/batch_size",
"Tracks the batch size distribution on the batch result by model_name "
"(if available).",
"model_name", "op_name"},
monitoring::Buckets::Exponential(1, 1.5, 20));
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordProcessedBatchSize(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New(
{"/tensorflow/serving/batching/processed_batch_size",
"Tracks the batch size distribution on processing by model_name (if "
"available).",
"model_name", "op_name"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, tensorflow::monitoring::UnitOfMeasure::kNumber);
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordProcessedBatchSizeV2(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Counter<3>::New(
"/tensorflow/serving/batching/processed_batch_size_v2",
"Tracks the batch size on processing by model_name and op name (if "
"available).",
"model_name", "op_name", "batch_size");
cell->GetCell(model_name, op_name, std::to_string(batch_size))
->IncrementBy(1);
}
void RecordBatchDelayUs(int64_t batch_delay_us, const string& model_name,
const string& op_name, int32_t batch_size) {
static auto* cell = monitoring::PercentileSampler<3>::New(
{"/tensorflow/serving/batching/batch_delay_us",
"Tracks the batching delay (in microseconds) for inputs by model_name "
"(if available).",
"model_name", "op_name", "processed_batch_size"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, monitoring::UnitOfMeasure::kTime);
cell->GetCell(model_name, op_name, std::to_string(batch_size))
->Add(static_cast<double>(batch_delay_us));
}
void RecordBatchDelayUsV2(int64_t batch_delay_us, const string& model_name,
const string& op_name, int32_t batch_size) {
static auto* cell = tensorflow::monitoring::Sampler<3>::New(
{"/tensorflow/serving/batching/batch_delay_us_v2",
"Tracks the batching delay (in microseconds) for inputs by model_name "
"(if available).",
"model_name", "op_name", "processed_batch_size"},
monitoring::Buckets::Exponential(1, 2, 27));
cell->GetCell(model_name, op_name, std::to_string(batch_size))
->Add(static_cast<double>(batch_delay_us));
}
void RecordBatchParamBatchTimeoutMicros(int64_t batch_timeout_micros,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<int64_t, 2>::New(
"/tensorflow/serving/batching/batch_timeout_micros",
"Tracks how long a request can wait before being processed by a batch.",
"model_name", "op_name");
cell->GetCell(model_name, op_name)->Set(batch_timeout_micros);
}
void RecordBatchParamMaxBatchSize(int64_t max_batch_size,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<int64_t, 2>::New(
"/tensorflow/serving/batching/max_batch_size",
"Tracks the maximum size of a batch.", "model_name", "op_name");
cell->GetCell(model_name, op_name)->Set(max_batch_size);
}
void RecordBatchParamMaxEnqueuedBatches(int64_t max_enqueued_batches,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<int64_t, 2>::New(
"/tensorflow/serving/batching/max_enqueued_batches",
"Tracks the maximum number of enqueued batches.", "model_name",
"op_name");
cell->GetCell(model_name, op_name)->Set(max_enqueued_batches);
}
void RecordBatchParamAllowedBatchSizes(const string& allowed_batch_sizes,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<string, 2>::New(
"/tensorflow/serving/batching/allowed_batch_sizes",
"Tracks the sizes that are allowed to form a batch.", "model_name",
"op_name");
cell->GetCell(model_name, op_name)->Set(allowed_batch_sizes);
}
void RecordBatchCosts(const std::string& model_name,
const int64_t processed_size,
const absl::string_view cost_type,
const absl::Duration total_cost) {
static auto* cell = tensorflow::monitoring::Sampler<3>::New(
{"/tensorflow/serving/batching/costs",
"Tracks the batch costs (in microseconds) by model name and processed "
"size.",
"model_name", "processed_size", "cost_type"},
monitoring::Buckets::Exponential(1, 2, 27));
cell->GetCell(model_name, std::to_string(processed_size),
std::string(cost_type))
->Add(absl::ToDoubleMicroseconds(total_cost));
}
const string& GetModelName(OpKernelContext* ctx) {
static string* kModelNameUnset = new string("model_name_unset");
if (!ctx->session_metadata()) return *kModelNameUnset;
if (ctx->session_metadata()->name().empty()) return *kModelNameUnset;
return ctx->session_metadata()->name();
}
int GetTotalTaskSize(
const std::vector<std::unique_ptr<BatchResourceBase::BatchTask>>& tasks) {
int tasks_size = 0;
for (const auto& task : tasks) {
tasks_size += task->size();
}
return tasks_size;
}
}
std::unique_ptr<BatchResourceBase::BatchTask>
BatchResourceBase::BatchTask::CreateSplitTask(
int split_index, AsyncOpKernel::DoneCallback done_callback) {
std::unique_ptr<BatchTask> task = CreateDerivedTask();
task->guid = this->guid;
task->propagated_context = Context(ContextKind::kThread);
task->inputs.reserve(this->inputs.size());
task->captured_inputs = this->captured_inputs;
task->context = this->context;
task->done_callback = done_callback;
task->split_index = split_index;
task->output = this->output;
task->status = this->status;
task->is_partial = true;
task->start_time = this->start_time;
task->request_cost = this->request_cost;
task->forced_warmup_batch_size = this->forced_warmup_batch_size;
return task;
}
using ::tensorflow::concat_split_util::Concat;
using ::tensorflow::concat_split_util::Split;
using TensorMatrix = std::vector<std::vector<Tensor>>;
string GetTensorNamesAndShapesString(const OpKernelContext* context,
const OpInputList& tensors) {
std::stringstream out;
int i = 0;
for (const Tensor& tensor : tensors) {
out << " - " << context->op_kernel().requested_input(i++) << " has shape "
<< tensor.shape().DebugString() << "\n";
}
return out.str();
}
Status BatchResourceBase::RegisterWarmupInputs(
int64_t guid, OpKernelContext* context, const string& batcher_queue_name,
const CreateBatchTaskFn& create_batch_task_fn,
AsyncOpKernel::DoneCallback done) {
auto shared_status = std::make_shared<ThreadSafeStatus>();
auto create_batch_task_fn_share_status = [&create_batch_task_fn,
&shared_status]() {
auto batch_task = create_batch_task_fn();
if (!batch_task.ok()) {
return batch_task;
}
(*batch_task)->status = shared_status;
return batch_task;
};
auto warmup_counter =
std::make_shared<absl::BlockingCounter>(allowed_batch_sizes_.size());
for (int i = 0; i < allowed_batch_sizes_.size(); ++i) {
Status status = RegisterInput(
guid, context, batcher_queue_name, create_batch_task_fn_share_status,
[warmup_counter = warmup_counter.get()]() {
warmup_counter->DecrementCount();
},
allowed_batch_sizes_[i]);
if (!status.ok()) return status;
}
return RegisterInput(
guid, context, batcher_queue_name, create_batch_task_fn_share_status,
[warmup_counter, context, shared_status, done = std::move(done)]() {
warmup_counter->Wait();
context->SetStatus(shared_status->status());
done();
});
}
Status BatchResourceBase::RegisterInput(
int64_t guid, OpKernelContext* context, const string& batcher_queue_name,
const CreateBatchTaskFn& create_batch_task_fn,
AsyncOpKernel::DoneCallback done_callback, int forced_warmup_batch_size) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<BatchTask> batch_components,
create_batch_task_fn());
batch_components->start_time = EnvTime::NowNanos();
batch_components->guid = guid;
batch_components->propagated_context = Context(ContextKind::kThread);
OpInputList tensors;
TF_RETURN_IF_ERROR(context->input_list("in_tensors", &tensors));
batch_components->inputs.reserve(tensor | #include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/cost_constants.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "tensorflow/core/common_runtime/request_cost.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include "tsl/platform/criticality.h"
namespace tensorflow {
namespace serving {
namespace {
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(BatchTaskCriticalityTest, CriticalityDefaultsToCritical) {
BatchResourceBase::BatchTask batch_task;
EXPECT_EQ(batch_task.criticality(), tsl::criticality::Criticality::kCritical);
}
#if defined(PLATFORM_GOOGLE)
TEST(BatchTaskCriticalityTest, CriticalitySuccessfullyPropagated) {
std::vector<BatchResourceBase::BatchTask> batch_tasks;
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCritical);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCritical);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddablePlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddablePlus);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
batch_tasks.push_back(BatchResourceBase::BatchTask());
EXPECT_EQ(batch_tasks[0].criticality(),
tsl::criticality::Criticality::kCriticalPlus);
EXPECT_EQ(batch_tasks[1].criticality(),
tsl::criticality::Criticality::kCritical);
EXPECT_EQ(batch_tasks[2].criticality(),
tsl::criticality::Criticality::kSheddablePlus);
EXPECT_EQ(batch_tasks[3].criticality(),
tsl::criticality::Criticality::kSheddable);
EXPECT_EQ(batch_tasks[4].criticality(),
tsl::criticality::Criticality::kCritical);
}
#endif
class TestTpuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::Milliseconds(100); }
absl::string_view GetCostType() const override { return "test_tpu"; }
};
REGISTER_COST_MEASUREMENT("test_tpu", TestTpuCostMeasurement);
class TestGcuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::Milliseconds(200); }
absl::string_view GetCostType() const override { return "test_gcu"; }
};
REGISTER_COST_MEASUREMENT("test_gcu", TestGcuCostMeasurement);
std::unique_ptr<BatchResourceBase::BatchTask> MakeBatchTask(
const int64_t task_size, RequestCost* request_cost) {
auto task = std::make_unique<BatchResourceBase::BatchTask>();
task->inputs.push_back(Tensor(DT_DOUBLE, TensorShape({task_size, 1})));
task->request_cost = request_cost;
return task;
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoCostMeasurement) {
BatchResourceBase::BatchT batch;
RequestCost cost;
batch.AddTask(MakeBatchTask(1, &cost));
batch.Close();
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 16,
batch);
EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty());
EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
16, 1, 15,
::testing::IsEmpty())));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroCost) {
BatchResourceBase::BatchT batch;
RequestCost cost;
batch.AddTask(MakeBatchTask(1, &cost));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("no_op", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 16,
batch);
EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty());
EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
16, 1, 15,
::testing::IsEmpty())));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroBatchSize) {
BatchResourceBase::BatchT batch;
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 0,
batch);
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoRequestCost) {
BatchResourceBase::BatchT batch;
batch.AddTask(MakeBatchTask(1, nullptr));
batch.AddTask(MakeBatchTask(9, nullptr));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 16,
batch);
EXPECT_EQ(batch.task(0).request_cost, nullptr);
EXPECT_EQ(batch.task(1).request_cost, nullptr);
}
TEST(SplitBatchCostsAndRecordMetricsTest, SplitSingleCostType) {
BatchResourceBase::BatchT batch;
RequestCost cost1, cost2;
batch.AddTask(MakeBatchTask(1, &cost1));
batch.AddTask(MakeBatchTask(9, &cost2));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 20,
batch);
EXPECT_THAT(
batch.task(0).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)),
Pair("test_tpu_no_smear", absl::Milliseconds(5))));
EXPECT_THAT(
batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 1, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
EXPECT_THAT(
batch.task(1).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)),
Pair("test_tpu_no_smear", absl::Milliseconds(45))));
EXPECT_THAT(
batch.task(1).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 9, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SplitMultiCostTypes) {
BatchResourceBase::BatchT batch;
RequestCost cost1, cost2;
batch.AddTask(MakeBatchTask(1, &cost1));
batch.AddTask(MakeBatchTask(9, &cost2));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_gcu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 20,
batch);
EXPECT_THAT(
batch.task(0).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)),
Pair("test_tpu_no_smear", absl::Milliseconds(5)),
Pair("test_gcu_with_smear", absl::Milliseconds(20)),
Pair("test_gcu_no_smear", absl::Milliseconds(10))));
EXPECT_THAT(
batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 1, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)),
Pair("test_gcu", absl::Milliseconds(200))))));
EXPECT_THAT(
batch.task(1).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)),
Pair("test_tpu_no_smear", absl::Milliseconds(45)),
Pair("test_gcu_with_smear", absl::Milliseconds(180)),
Pair("test_gcu_no_smear", absl::Milliseconds(90))));
EXPECT_THAT(
batch.task(1).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 9, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)),
Pair("test_gcu", absl::Milliseconds(200))))));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SplitOnlyNonZeroCostTypes) {
BatchResourceBase::BatchT batch;
RequestCost cost1, cost2;
batch.AddTask(MakeBatchTask(1, &cost1));
batch.AddTask(MakeBatchTask(9, &cost2));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("no_op", context));
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 20,
batch);
EXPECT_THAT(
batch.task(0).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)),
Pair("test_tpu_no_smear", absl::Milliseconds(5))));
EXPECT_THAT(
batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 1, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
EXPECT_THAT(
batch.task(1).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)),
Pair("test_tpu_no_smear", absl::Milliseconds(45))));
EXPECT_THAT(
batch.task(1).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 9, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
}
TEST(SplitBatchCostsAndRecordMetricsTest, UpdatesGlobalBatchStats) {
class FakeTpuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::Hours(555); }
absl::string_view GetCostType() const override { return kTpuCostName; }
};
CostMeasurement::Context context{ false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
std::make_unique<FakeTpuCostMeasurement>(context));
BatchResourceBase::BatchT batch;
batch.AddTask(MakeBatchTask( 1, nullptr));
batch.Close();
const char kModelName[] = __FILE__;
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
kModelName, "op_name",
batch_cost_measurements, 17, batch);
EXPECT_EQ(GlobalBatchStats()
.model( kModelName, "op_name")
.batch_size(17)
.tpu_cost()
.mean(),
absl::Hours(555));
}
}
}
} |
1,522 | cpp | tensorflow/tensorflow | input_split_metadata | tensorflow/core/kernels/batching_util/input_split_metadata.cc | tensorflow/core/kernels/batching_util/input_split_metadata_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_INPUT_SPLIT_METADATA_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_INPUT_SPLIT_METADATA_H_
#include <algorithm>
#include "absl/container/fixed_array.h"
namespace tensorflow {
namespace serving {
namespace internal {
class InputSplitMetadata {
public:
InputSplitMetadata(int input_task_size, int open_batch_remaining_slot,
int batch_size_limit);
const absl::FixedArray<int>& task_sizes() const;
std::string DebugString() const;
private:
absl::FixedArray<int> generate_task_sizes(int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit) const;
const absl::FixedArray<int> task_sizes_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include <algorithm>
#include "absl/container/fixed_array.h"
#include "absl/strings/str_join.h"
namespace tensorflow {
namespace serving {
namespace internal {
namespace {
int compute_task_size_from_open_batch(int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit) {
return (open_batch_remaining_slot > 0)
? (input_task_size + batch_size_limit - open_batch_remaining_slot)
: input_task_size;
}
int compute_head_task_size(int input_task_size, int open_batch_remaining_slot,
int batch_size_limit) {
if (open_batch_remaining_slot == 0) {
return std::min(input_task_size, batch_size_limit);
}
return std::min(open_batch_remaining_slot, input_task_size);
}
int compute_tail_task_size(int task_size_from_open_batch, int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit) {
int tail_task_size;
if (input_task_size <= open_batch_remaining_slot) {
tail_task_size = input_task_size;
} else {
tail_task_size = task_size_from_open_batch % batch_size_limit;
if (tail_task_size == 0) {
tail_task_size = batch_size_limit;
}
}
return tail_task_size;
}
int compute_num_batches(int task_size_from_open_batch, int batch_size_limit) {
return (task_size_from_open_batch + batch_size_limit - 1) / batch_size_limit;
}
}
InputSplitMetadata::InputSplitMetadata(int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit)
: task_sizes_(generate_task_sizes(
input_task_size, open_batch_remaining_slot, batch_size_limit)) {}
const absl::FixedArray<int>& InputSplitMetadata::task_sizes() const {
return task_sizes_;
}
std::string InputSplitMetadata::DebugString() const {
return absl::StrJoin(task_sizes_, ", ");
}
absl::FixedArray<int> InputSplitMetadata::generate_task_sizes(
int input_task_size, int open_batch_remaining_slot,
int batch_size_limit) const {
const int task_size_from_open_batch = compute_task_size_from_open_batch(
input_task_size, open_batch_remaining_slot, batch_size_limit);
const int num_batches =
compute_num_batches(task_size_from_open_batch, batch_size_limit);
absl::FixedArray<int> task_sizes(num_batches, batch_size_limit);
task_sizes.front() = compute_head_task_size(
input_task_size, open_batch_remaining_slot, batch_size_limit);
task_sizes.back() =
compute_tail_task_size(task_size_from_open_batch, input_task_size,
open_batch_remaining_slot, batch_size_limit);
return task_sizes;
}
}
}
} | #include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace internal {
namespace {
TEST(InputSplitUtilTest, Basic) {
for (const auto& batch_task_param :
{std::tuple<int , int ,
int , int ,
int ,
int ,
int >{5, 1, 1, 5, 4, 1,
1},
{10, 3, 4, 3, 2, 3, 3},
{20, 5, 6, 4, 3, 5, 3},
{30, 0, 11, 3, 3, 11, 8},
{5, 6, 8, 1, 0, 5, 5}}) {
const int input_size = std::get<0>(batch_task_param);
const int open_batch_remaining_slot = std::get<1>(batch_task_param);
const int batch_size_limit = std::get<2>(batch_task_param);
const int expected_num_batches = std::get<3>(batch_task_param);
const int expected_head_batch_task_size = std::get<5>(batch_task_param);
const int expected_tail_batch_task_size = std::get<6>(batch_task_param);
ASSERT_LE(open_batch_remaining_slot, batch_size_limit);
InputSplitMetadata input_split_metadata(
input_size, open_batch_remaining_slot, batch_size_limit);
EXPECT_EQ(input_split_metadata.task_sizes().size(), expected_num_batches);
absl::FixedArray<int> expected_task_sizes(expected_num_batches);
for (int i = 0; i < expected_num_batches; i++) {
if (i == 0) {
expected_task_sizes[i] = expected_head_batch_task_size;
} else if (i == expected_num_batches - 1) {
expected_task_sizes[i] = expected_tail_batch_task_size;
} else {
expected_task_sizes[i] = batch_size_limit;
}
}
EXPECT_THAT(input_split_metadata.task_sizes(),
::testing::ElementsAreArray(expected_task_sizes));
EXPECT_EQ(input_split_metadata.DebugString(),
absl::StrJoin(expected_task_sizes, ", "));
}
}
}
}
}
} |
1,523 | cpp | tensorflow/tensorflow | mkl_conv_ops | tensorflow/core/kernels/mkl/mkl_conv_ops.cc | tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MKL_MKL_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_MKL_CONV_OPS_H_
#ifdef INTEL_MKL
#include <limits>
#include <memory>
#include <vector>
#include "dnnl.hpp"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/kernel_shape_util.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_slice.h"
#include "tensorflow/core/kernels/conv_grad_ops.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/onednn_env_vars.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
using dnnl::convolution_forward;
using dnnl::prop_kind;
using dnnl::stream;
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
using ConvFwdDesc = dnnl::convolution_forward::desc;
#endif
using ConvFwdPd = dnnl::convolution_forward::primitive_desc;
class MklDnnConvUtil {
protected:
OpKernelContext* context_;
std::vector<int32> strides_;
std::vector<int32> dilations_;
Padding padding_;
TensorFormat data_format_;
public:
MklDnnConvUtil(OpKernelContext* context, const std::vector<int32>& strides,
Padding pad, TensorFormat fm,
const std::vector<int32>& dilations, bool is_depthwise = false)
: context_(context),
strides_(strides),
dilations_(dilations),
padding_(pad),
data_format_(fm) {}
virtual ~MklDnnConvUtil() { context_ = nullptr; }
virtual inline void GetStridesInMklOrder(memory::dims* strides) {
DCHECK(strides);
if (strides_.size() == 4) {
int stride_rows = GetTensorDim(strides_, data_format_, 'H');
int stride_cols = GetTensorDim(strides_, data_format_, 'W');
*strides = {stride_rows, stride_cols};
} else if (strides_.size() == 5) {
int stride_planes = GetTensorDim(strides_, data_format_, '0');
int stride_rows = GetTensorDim(strides_, data_format_, '1');
int stride_cols = GetTensorDim(strides_, data_format_, '2');
*strides = {stride_planes, stride_rows, stride_cols};
}
}
virtual inline void GetDilationsInMklOrder(memory::dims* dilations) {
DCHECK(dilations);
if (dilations_.size() == 4) {
int dilations_rows = GetTensorDim(dilations_, data_format_, 'H');
int dilations_cols = GetTensorDim(dilations_, data_format_, 'W');
*dilations = {dilations_rows, dilations_cols};
} else if (dilations_.size() == 5) {
int dilations_planes = GetTensorDim(dilations_, data_format_, '0');
int dilations_rows = GetTensorDim(dilations_, data_format_, '1');
int dilations_cols = GetTensorDim(dilations_, data_format_, '2');
*dilations = {dilations_planes, dilations_rows, dilations_cols};
}
}
virtual inline void GetInputSizeInMklOrder(const TensorShape& input_shape,
memory::dims* input_dims) {
#define CHECK_BOUNDS(val, err_msg) \
do { \
OP_REQUIRES(context_, \
FastBoundsCheck(val, std::numeric_limits<int>::max()), \
errors::InvalidArgument(err_msg)); \
} while (0)
DCHECK(input_dims);
int64 input_depth_raw = GetTensorDim(input_shape, data_format_, 'C');
int input_depth = static_cast<int>(input_depth_raw);
int64 input_batch_raw = GetTensorDim(input_shape, data_format_, 'N');
CHECK_BOUNDS(input_batch_raw, "Input batch too large");
int input_batch = static_cast<int>(input_batch_raw);
if (strides_.size() == 4) {
int64 input_rows_raw = GetTensorDim(input_shape, data_format_, 'H');
CHECK_BOUNDS(input_rows_raw, "Input rows too large");
int input_rows = static_cast<int>(input_rows_raw);
int64 input_cols_raw = GetTensorDim(input_shape, data_format_, 'W');
CHECK_BOUNDS(input_cols_raw, "Input cols too large");
int input_cols = static_cast<int>(input_cols_raw);
std::vector<memory::dim> input_sizes(4, -1);
input_sizes[MklDnnDims::Dim_N] = input_batch;
input_sizes[MklDnnDims::Dim_C] = input_depth;
input_sizes[MklDnnDims::Dim_H] = input_rows;
input_sizes[MklDnnDims::Dim_W] = input_cols;
*input_dims = input_sizes;
} else if (strides_.size() == 5) {
int64 input_planes_raw = GetTensorDim(input_shape, data_format_, '0');
CHECK_BOUNDS(input_planes_raw, "Input depth too large");
int input_planes = static_cast<int>(input_planes_raw);
int64 input_rows_raw = GetTensorDim(input_shape, data_format_, '1');
CHECK_BOUNDS(input_rows_raw, "Input rows too large");
int input_rows = static_cast<int>(input_rows_raw);
int64 input_cols_raw = GetTensorDim(input_shape, data_format_, '2');
CHECK_BOUNDS(input_cols_raw, "Input cols too large");
int input_cols = static_cast<int>(input_cols_raw);
std::vector<memory::dim> input_sizes(5, -1);
input_sizes[MklDnnDims3D::Dim3d_N] = input_batch;
input_sizes[MklDnnDims3D::Dim3d_C] = input_depth;
input_sizes[MklDnnDims3D::Dim3d_D] = input_planes;
input_sizes[MklDnnDims3D::Dim3d_H] = input_rows;
input_sizes[MklDnnDims3D::Dim3d_W] = input_cols;
*input_dims = input_sizes;
}
#undef CHECK_BOUNDS
}
virtual inline void GetFilterSizeInMklOrder(const TensorShape& input_shape,
const TensorShape& filter_shape,
memory::dims* filter_dims,
bool* is_grouped_convolution,
bool is_depthwise) {
DCHECK(filter_dims);
OP_REQUIRES(context_, filter_shape.dims() == strides_.size(),
errors::InvalidArgument((strides_.size() == 4)
? "filter must be 4-dimensional: "
: "filter must be 5-dimensional: ",
filter_shape.DebugString()));
for (int i = 0; i < ((strides_.size() == 4) ? 3 : 5); i++) {
OP_REQUIRES(context_,
FastBoundsCheck(filter_shape.dim_size(i),
std::numeric_limits<int>::max()),
errors::InvalidArgument("filter too large"));
}
int input_depth = GetTensorDim(input_shape, data_format_, 'C');
if (strides_.size() == 4) {
int filter_rows =
static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_H));
int filter_cols =
static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_W));
int filter_in_depth =
static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_I));
int filter_out_depth =
static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_O));
OP_REQUIRES(context_, input_depth % filter_in_depth == 0,
errors::InvalidArgument(
"input depth must be evenly divisible by filter depth: ",
input_depth, " vs ", filter_in_depth));
*is_grouped_convolution = filter_in_depth != input_depth;
int group_count = input_depth / filter_in_depth;
OP_REQUIRES(context_, group_count > 0,
errors::InvalidArgument(
"grouped convolution must have at least one group: ",
group_count, " groups"));
if (is_depthwise) {
std::vector<memory::dim> filter_sizes(5, -1);
filter_sizes[MKL_GROUP_FILTER_DIM_G] = filter_in_depth;
filter_sizes[MKL_GROUP_FILTER_DIM_O] = filter_out_depth;
filter_sizes[MKL_GROUP_FILTER_DIM_I] = 1;
filter_sizes[MKL_GROUP_FILTER_DIM_H] = filter_rows;
filter_sizes[MKL_GROUP_FILTER_DIM_W] = filter_cols;
*filter_dims = filter_sizes;
} else if (*is_grouped_convolution) {
std::vector<memory::dim> filter_sizes(5, -1);
filter_sizes[MKL_GROUP_FILTER_DIM_G] = group_count;
filter_sizes[MKL_GROUP_FILTER_DIM_O] = filter_out_depth / group_count;
filter_sizes[MKL_GROUP_FILTER_DIM_I] = filter_in_depth;
filter_sizes[MKL_GROUP_FILTER_DIM_H] = filter_rows;
filter_sizes[MKL_GROUP_FILTER_DIM_W] = filter_cols;
*filter_dims = filter_sizes;
} else {
std::vector<memory::dim> filter_sizes(4, -1);
filter_sizes[MklDnnDims::Dim_O] = filter_out_depth;
filter_sizes[MklDnnDims::Dim_I] = filter_in_depth;
filter_sizes[MklDnnDims::Dim_H] = filter_rows;
filter_sizes[MklDnnDims::Dim_W] = filter_cols;
*filter_dims = filter_sizes;
}
} else {
OP_REQUIRES(context_, input_depth == filter_shape.dim_size(3),
errors::InvalidArgument(
"input and filter must have the same depth: ",
input_depth, " vs ", filter_shape.dim_size(3)));
int filter_planes =
static_cast<int>(filter_shape.dim_size(TF_3DFILTER_DIM_P));
int filter_rows =
static_cast<int>(filter_shape.dim_size(TF_3DFILTER_DIM_H));
int filter_cols =
static_cast<int>(filter_shape.dim_size(TF_3DFILTER_DIM_W));
int filter_in_depth =
static_cast<int>(filter_shape.dim_size(TF_3DFILTER_DIM_I));
int filter_out_depth =
static_cast<int>(filter_shape.dim_size(TF_3DFILTER_DIM_O));
std::vector<memory::dim> filter_sizes(5, -1);
filter_sizes[MklDnnDims3D::Dim3d_O] = filter_out_depth;
filter_sizes[MklDnnDims3D::Dim3d_I] = filter_in_depth;
filter_sizes[MklDnnDims3D::Dim3d_D] = filter_planes;
filter_sizes[MklDnnDims3D::Dim3d_H] = filter_rows;
filter_sizes[MklDnnDims3D::Dim3d_W] = filter_cols;
*filter_dims = filter_sizes;
}
}
virtual inline void GetFilterSizeInMklOrder(size_t src_index,
size_t filter_index,
memory::dims* filter_dims,
bool* is_grouped_convolution,
bool is_depthwise) {
DCHECK(filter_dims);
GetFilterSizeInMklOrder(GetTfShape(context_, src_index),
GetTfShape(context_, filter_index), filter_dims,
is_grouped_convolution, is_depthwise);
}
virtual inline void GetBiasSizeInMklOrder(size_t bias_index,
memory::dims* bias_dims) {
const Tensor& bias = MklGetInput(context_, bias_index);
if (bias.dims() > 1) {
if (strides_.size() == 4) {
OP_REQUIRES(
context_, bias.dims() <= 4,
errors::InvalidArgument("For NHWC format, bias should have "
"4 or less dimensions",
bias.shape().DebugString()));
} else if (strides_.size() == 5) {
OP_REQUIRES(
context_, bias.dims() <= 5,
errors::InvalidArgument("For NDHWC format, bias should have "
"5 or less dimensions",
bias.shape().DebugString()));
}
for (int i = 0; i < bias.dims() - 1; i++) {
OP_REQUIRES(
context_, bias.dim_size(i) == 1,
errors::InvalidArgument("For bias_dims > 1, all except the last "
"dimension (channel) must be 1: ",
bias.shape().DebugString()));
}
*bias_dims = {static_cast<int>(bias.dim_size(bias.dims() - 1))};
} else {
*bias_dims = {static_cast<int>(bias.dim_size(0))};
}
}
virtual inline void GetOutputAndPadSizeInMklOrder(
const TensorShape& input_shape, const TensorShape& filter_shape,
const memory::dims& strides, const memory::dims& dilations,
memory::dims* output_dims_tf_order, memory::dims* output_dims_mkl_order,
memory::dims* pad_l, memory::dims* pad_r, bool is_grouped_convolution,
bool pad_enabled = false, bool is_depthwise = false) {
DCHECK(output_dims_tf_order);
DCHECK(output_dims_mkl_order);
DCHECK(pad_l);
DCHECK(pad_r);
bool is_conv2d = (strides_.size() == 4);
int input_planes, input_rows, input_cols;
if (is_conv2d) {
input_rows = GetTensorDim(input_shape, data_format_, 'H');
input_cols = GetTensorDim(input_shape, data_format_, 'W');
} else {
input_planes = GetTensorDim(input_shape, data_format_, '0');
input_rows = GetTensorDim(input_shape, data_format_, '1');
input_cols = GetTensorDim(input_shape, data_format_, '2');
}
int filter_planes, filter_rows, filter_cols;
if (is_conv2d) {
filter_rows = filter_shape.dim_size(TF_2DFILTER_DIM_H);
filter_cols = filter_shape.dim_size(TF_2DFILTER_DIM_W);
} else {
filter_planes = filter_shape.dim_size(TF_3DFILTER_DIM_P);
filter_rows = filter_shape.dim_size(TF_3DFILTER_DIM_H);
filter_cols = filter_shape.dim_size(TF_3DFILTER_DIM_W);
}
int stride_planes, stride_rows, stride_cols;
int dilation_planes, dilation_rows, dilation_cols;
if (is_conv2d) {
stride_rows = strides[0];
stride_cols = strides[1];
dilation_rows = dilations[0];
dilation_cols = dilations[1];
} else {
stride_planes = strides[0];
stride_rows = strides[1];
stride_cols = strides[2];
dilation_planes = dilations[0];
dilation_rows = dilations[1];
dilation_cols = dilations[2];
}
int out_batch = GetTensorDim(input_shape, data_format_, 'N');
int out_depth;
if (is_depthwise) {
out_depth = (filter_shape.dim_size(TF_2DFILTER_DIM_I) *
filter_shape.dim_size(TF_2DFILTER_DIM_O));
} else if (is_grouped_convolution) {
out_depth = filter_shape.dim_size(TF_2DFILTER_DIM_O);
} else {
out_depth = filter_shape.dim_size(
is_conv2d ? static_cast<int>(TF_2DFILTER_DIM_O)
: static_cast<int>(TF_3DFILTER_DIM_O));
}
int64 out_rows = 0, out_cols = 0, out_planes = 0;
int64 pad_top = 0, pad_bottom = 0, pad_left = 0, pad_right = 0;
int64 pad_front, pad_back;
if (is_conv2d) {
Padding padding_type;
if (pad_enabled) {
padding_type = Padding::EXPLICIT;
pad_top = static_cast<int64_t>((*pad_l)[0]);
pad_left = static_cast<int64_t>((*pad_l)[1]);
pad_bottom = static_cast<int64_t>((*pad_r)[0]);
pad_right = static_cast<int64_t>((*pad_r)[1]);
} else {
padding_type = padding_;
}
OP_REQUIRES_OK(context_,
GetWindowedOutputSizeVerbose(
input_rows, filter_rows, dilation_rows, stride_rows,
padding_type, &out_rows, &pad_top, &pad_bottom));
OP_REQUIRES_OK(context_,
GetWindowedOutputSizeVerbose(
input_cols, filter_cols, dilation_cols, stride_cols,
padding_type, &out_cols, &pad_left, &pad_right));
} else {
Padding padding_type;
if (pad_enabled) {
padding_type = Padding::EXPLICIT;
pad_front = static_cast<int64>((*pad_l)[0]);
pad_top = static_cast<int64>((*pad_l)[1]);
pad_left = static_cast<int64>((*pad_l)[2]);
pad_back = static_cast<int64>((*pad_r)[0]);
pad_bottom = static_cast<int64>((*pad_r)[1]);
pad_right = static_cast<int64>((*pad_r)[2]);
} else {
padding_type = padding_;
}
OP_REQUIRES_OK(context_, GetWindowedOutputSizeVerbose(
input_planes, filter_planes, dilation_planes,
stride_planes, padding_type, &out_planes,
&pad_front, &pad_back));
OP_REQUIRES_OK(context_,
GetWindowedOutputSizeVerbose(
input_rows, filter_rows, dilation_rows, stride_rows,
padding_type, &out_rows, &pad_top, &pad_bottom));
OP_REQUIRES_OK(context_,
GetWindowedOutputSizeVerbose(
input_cols, filter_cols, dilation_cols, stride_cols,
padding_type, &out_cols, &pad_left, &pad_right));
}
if (is_conv2d) {
if (!pad_enabled) {
*pad_l = {static_cast<int>(pad_top), static_cast<int>(pad_left)};
*pad_r = {static_cast<int>(pad_bottom), static_cast<int>(pad_right)};
}
} else {
if (!pad_enabled) {
*pad_l = {static_cast<int>(pad_front), static_cast<int>(pad_top),
static_cast<int>(pad_left)};
*pad_r = {static_cast<int>(pad_back), static_cast<int>(pad_bottom),
static_cast<int>(pad_right)};
}
}
TensorShape out_shape;
if (is_conv2d) {
OP_REQUIRES_OK(
context_, ShapeFromFormatWithStatus(data_format_, out_batch, out_rows,
out_cols, out_depth, &out_shape));
} else {
OP_REQUIRES_OK(context_, ShapeFromFormatWithStatus(
data_format_, out_batch,
{{out_planes, out_rows, out_cols}},
out_depth, &out_shape));
}
*output_dims_tf_order = TFShapeToMklDnnDims(out_shape);
if (is_grouped_convolution) {
int out_depth = GetTensorDim(out_shape, data_format_, 'C');
int input_depth = GetTensorDim(input_shape, data_format_, 'C');
int filter_in_depth =
static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_I));
int num_groups = input_depth / filter_in_depth;
OP_REQUIRES(
context_, out_depth % num_groups == 0 && out_depth >= num_groups,
errors::InvalidArgument(
"output depth must be evenly divisible by number of groups: ",
out_depth, " vs ", num_groups));
}
if (is_conv2d) {
std::vector<memory::dim> output_sizes(4, -1);
output_sizes[MklDnnDims::Dim_N] = out_batch;
output_sizes[MklDnnDims::Dim_C] = out_depth;
output_sizes[MklDnnDims::Dim_H] = static_cast<int>(out_rows);
output_sizes[MklDnnDims::Dim_W] = static_cast<int>(out_cols);
*output_dims_mkl_order = output_sizes;
} else {
std::vector<memory::dim> output_sizes(5, -1);
output_sizes[MklDnnDims3D::Dim3d_N] = out_batch;
output_sizes[MklDnnDims3D::Dim3d_C] = out_depth;
output_sizes[MklDnnDims3D::Dim3d_D] = static_cast<int>(out_planes);
output_sizes[MklDnnDims3D::Dim3d_H] = static_cast<int>(out_rows);
output_sizes[MklDnnDims3D::Dim3d_W] = static_cast<int>(out_cols);
*output_dims_mkl_order = output_sizes;
}
}
inline void GetOutputAndPadSizeInMklOrder(
size_t src_index, size_t filter_index, const memory::dims& strides,
const memory::dims& dilations, memory::dims* output_dims_tf_order,
memory::dims* output_dims_mkl_order, memory::dims* pad_l,
memory::dims* pad_r, bool is_grouped_convolution, bool is_depthwise) {
DCHECK(output_dims_tf_order);
DCHECK(output_dims_mkl_order);
DCHECK(pad_l);
DCHECK(pad_r);
auto input_tf_shape = GetTfShape(context_, src_index);
auto filter_tf_shape = GetTfShape(context_, filter_index);
if (strides_.size() == 4) {
OP_REQUIRES(context_, input_tf_shape.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input_tf_shape.DebugString()));
OP_REQUIRES(context_, filter_tf_shape.dims() == 4,
errors::InvalidArgument("filter must be 4-dimensional",
filter_tf_shape.DebugString()));
} else {
OP_REQUIRES(context_, input_tf_shape.dims() == 5,
errors::InvalidArgument("input must be 5-dimensional",
input_tf_shape.DebugString()));
OP_REQUIRES(context_, filter_tf_shape.dims() == 5,
errors::InvalidArgument("filter must be 5-dimensional",
filter_tf_shape.DebugString()));
}
GetOutputAndPadSizeInMklOrder(input_tf_shape, filter_tf_shape, strides,
dilations, output_dims_tf_order,
output_dims_mkl_order, pad_l, pad_r,
is_grouped_convolution, is_depthwise);
}
inline void GetConvFwdSizesInMklOrder(
const TensorShape& input_shape, const TensorShape& filter_shape,
memory::dims* input_dims, memory::dims* filter_dims,
memory::dims* strides, memory::dims* dilations,
memory::dims* output_dims_tf_order, memory::dims* output_dims_mkl_order,
memory::dims* pad_l, memory::dims* pad_r, bool* is_grouped_convolution,
bool pad_enabled = false, bool is_depthwise = false) {
DCHECK(input_dims);
DCHECK(filter_dims);
DCHECK(strides);
DCHECK(dilations);
DCHECK(output_dims_tf_order);
DCHECK(output_dims_mkl_order);
DCHECK(pad_l);
DCHECK(pad_r);
GetInputSizeInMklOrder(input_shape, input_dims);
if (!context_->status().ok()) return;
GetFilterSizeInMklOrder(input_shape, filter_shape, filter_dims,
is_grouped_convolution, is_depthwise);
if (!context_->status().ok()) return;
GetStridesInMklOrder(strides);
GetDilationsInMklOrder(dilations);
GetOutputAndPadSizeInMklOrder(
input_shape, filter_shape, *strides, *dilations, output_dims_tf_order,
output_dims_mkl_order, pad_l, pad_r, *is_grouped_convolution,
pad_enabled, is_depthwise);
if (!context_->status().ok()) return;
}
};
template <typename Device, class T, bool is_depthwise>
class MklConvBackpropCommonOp : public OpKernel {
public:
~MklConvBackpropCommonOp() {}
explicit MklConvBackpropCommonOp(OpKernelConstruction* context)
: OpKernel(context) {
string data_format_str;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format_str));
OP_REQUIRES(context, FormatFromString(data_format_str, &data_format_),
errors::InvalidArgument("Invalid data format"));
OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_));
int stride_n = GetTensorDim(strides_, data_format_, 'N');
int stride_c = GetTensorDim(strides_, data_format_, 'C');
OP_REQUIRES(
context, (stride_n == 1 && stride_c == 1),
errors::InvalidArgument("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
if (!is_depthwise) {
OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_));
if (strides_.size() == 4) {
OP_REQUIRES(
context, dilations_.size() == 4,
errors::InvalidArgument("Sliding window dilations field must "
"specify 4 dimensions"));
int dilation_n = GetTensorDim(dilations_, data_format_, 'N');
int dilation_c = GetTensorDim(dilations_, data_format_, 'C');
int dilation_h = GetTensorDim(dilations_, data_format_, 'H');
int dilation_w = GetTensorDim(dilations_, data_format_, 'W');
OP_REQUIRES(context, (dilation_n == 1 && dilation_c == 1),
errors::InvalidArgument(
"Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
OP_REQUIRES(
context, dilation_h > 0 && dilation_w > 0,
errors::InvalidArgument("Dilated rates should be larger than 0."));
}
} else {
dilations_ = {1, 1, 1, 1};
}
OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_));
}
protected:
std::vector<int32> dilations_;
std::vector<int32> strides_;
Padding padding_;
TensorFormat data_format_;
};
template <typename Device, typename T>
class MklDummyOp : public OpKernel {
public:
~MklDummyOp() {}
explicit MklDummyOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
TF_CHECK_OK(
errors::Unimplemented("This is a dummy op."
"It should not have been invoked."));
}
};
}
#endif
#endif
#ifdef INTEL_MKL
#include "tensorflow/core/kernels/mkl/mkl_conv_ops.h"
#include <algorithm>
#include <map>
#include <string>
#include <unordered_map>
#include "absl/strings/str_join.h"
#include "tensorflow/core/kernels/mkl/mkl_kernel_util.h"
#include "tensorflow/core/kernels/mkl/mkl_qu | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
struct Conv2DDimensions {
Conv2DDimensions(int n, int h, int w, int c, int fc, int fh, int fw)
: input_batches(n),
input_height(h),
input_width(w),
input_depth(c),
filter_count(fc),
filter_height(fh),
filter_width(fw) {}
int input_batches;
int input_height;
int input_width;
int input_depth;
int filter_count;
int filter_height;
int filter_width;
};
static Tensor GetRandomTensor(const TensorShape& shape) {
Tensor tensor(DT_FLOAT, TensorShape(shape));
tensor.flat<float>() = tensor.flat<float>().setRandom();
return tensor;
}
static Tensor GetRandomInputTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.input_batches, dims.input_height,
dims.input_width, dims.input_depth});
}
static Tensor GetRandomFilterTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.filter_height, dims.filter_width,
dims.input_depth, dims.filter_count});
}
static Tensor GetRandomOutputTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.input_batches, dims.input_height,
dims.input_width, dims.filter_count});
}
static Tensor GetInputSizesTensor(const Conv2DDimensions& dims) {
return test::AsTensor<int32>({dims.input_batches, dims.input_height,
dims.input_width, dims.input_depth});
}
static Tensor GetFilterSizesTensor(const Conv2DDimensions& dims) {
return test::AsTensor<int32>({dims.filter_height, dims.filter_width,
dims.input_depth, dims.filter_count});
}
static Graph* DefaultConv2D(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* conv2d;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d"), "Conv2D")
.Input(input)
.Input(filter)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d));
return graph;
}
static Graph* MklConv2D(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d;
TF_CHECK_OK(NodeBuilder(graph->NewName("mkl_conv_2d"), "_MklConv2D")
.Input(input)
.Input(filter)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d));
return graph;
}
static Graph* DefaultConv2DBwdInput(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_sizes_t = GetInputSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input_sizes =
test::graph::Constant(graph, input_sizes_t, "input_sizes");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* conv2d_bwd_input;
TF_CHECK_OK(
NodeBuilder(graph->NewName("conv_2d_bwd_input"), "Conv2DBackpropInput")
.Input(input_sizes)
.Input(filter)
.Input(out_backprop)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d_bwd_input));
return graph;
}
static Graph* MklConv2DBwdInput(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_sizes_t = GetInputSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input_sizes =
test::graph::Constant(graph, input_sizes_t, "input_sizes");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d_bwd_input;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_input"),
"_MklConv2DBackpropInput")
.Input(input_sizes)
.Input(filter)
.Input(out_backprop)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d_bwd_input));
return graph;
}
static Graph* DefaultConv2DBwdFilter(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_sizes_t = GetFilterSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter_sizes =
test::graph::Constant(graph, filter_sizes_t, "filter_sizes");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* conv2d_bwd_filter;
TF_CHECK_OK(
NodeBuilder(graph->NewName("conv_2d_bwd_filter"), "Conv2DBackpropFilter")
.Input(input)
.Input(filter_sizes)
.Input(out_backprop)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d_bwd_filter));
return graph;
}
static Graph* MklConv2DBwdFilter(const Conv2DDimensions& dims) {
Graph* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_sizes_t = GetFilterSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter_sizes =
test::graph::Constant(graph, filter_sizes_t, "filter_sizes");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d_bwd_filter;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_filter"),
"_MklConv2DBackpropFilter")
.Input(input)
.Input(filter_sizes)
.Input(out_backprop)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d_bwd_filter));
return graph;
}
#define BM_CONCAT(a, b) a##b
#define BM_NAME(p, type, N, H, W, C, FC, FH, FW) \
BM_CONCAT(BM_##p##_##type##_in_##N##_##H##_##W##_##C, _f_##FC##_##FH##_##FW)
#define BM_Conv2DT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (N) * (H) * (W) * (FC); \
int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2D)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2D(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
#define BM_Conv2DBwdInputT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (N) * (H) * (W) * (C); \
int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdInput)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2DBwdInput(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DBwdInputT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DBwdInputT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
#define BM_Conv2DBwdFilterT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (FH) * (FW) * (C) * (FC); \
int64 flops_per_iter = num_computed_elements * ((N) * (H) * (W)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdFilter)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2DBwdFilter(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DBwdFilterT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DBwdFilterT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
BM_Conv2D(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2D(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2D(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2D(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2D(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2D(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2D(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2DBwdInput(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2DBwdInput(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2DBwdInput(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2DBwdInput(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2DBwdFilter(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2DBwdFilter(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2DBwdFilter(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2DBwdFilter(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
} |
1,524 | cpp | tensorflow/tensorflow | crop_and_resize_op | tensorflow/core/kernels/image/crop_and_resize_op.cc | tensorflow/core/kernels/image/crop_and_resize_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_CROP_AND_RESIZE_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_CROP_AND_RESIZE_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct CropAndResize {
bool operator()(const OpKernelContext* context,
typename TTypes<T, 4>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_ind,
const std::string& method_name, float extrapolation_value,
typename TTypes<float, 4>::Tensor crops);
};
template <typename Device, typename T>
struct CropAndResizeBackpropImage {
bool operator()(const OpKernelContext* context,
typename TTypes<float, 4>::ConstTensor grads,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_ind,
typename TTypes<T, 4>::Tensor grads_image,
const std::string& method_name);
};
template <typename Device, typename T>
struct CropAndResizeBackpropBoxes {
bool operator()(const Device& d, typename TTypes<float, 4>::ConstTensor grads,
typename TTypes<T, 4>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_ind,
typename TTypes<float, 2>::Tensor grads_boxes);
};
template <typename Device>
struct CheckValidBoxIndexHelper {
void operator()(const Device& d,
typename TTypes<int32, 1>::ConstTensor box_index, int batch,
typename TTypes<bool, 0>::Tensor isvalid) {
isvalid.device(d) = ((box_index >= 0) && (box_index < batch)).all();
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/crop_and_resize_op.h"
#include <functional>
#include <string>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/work_sharder.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
#include "tensorflow/core/platform/stream_executor.h"
#endif
#if GOOGLE_CUDA
#include "xla/stream_executor/cuda/cuda_activation.h"
using stream_executor::cuda::ScopedActivateExecutorContext;
#elif TENSORFLOW_USE_ROCM
#include "tensorflow/core/platform/rocm.h"
using stream_executor::rocm::ScopedActivateExecutorContext;
#endif
namespace tensorflow {
namespace {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
using Callback = std::function<void()>;
static inline Status ParseAndCheckBoxSizes(const Tensor& boxes,
const Tensor& box_index,
int* num_boxes) {
if (boxes.NumElements() == 0 && box_index.NumElements() == 0) {
*num_boxes = 0;
return absl::OkStatus();
}
if (boxes.dims() != 2) {
return errors::InvalidArgument("boxes must be 2-D",
boxes.shape().DebugString());
}
*num_boxes = boxes.dim_size(0);
if (boxes.dim_size(1) != 4) {
return errors::InvalidArgument("boxes must have 4 columns");
}
if (box_index.dims() != 1) {
return errors::InvalidArgument("box_index must be 1-D",
box_index.shape().DebugString());
}
if (box_index.dim_size(0) != *num_boxes) {
return errors::InvalidArgument("box_index has incompatible shape");
}
return absl::OkStatus();
}
template <typename Device>
inline void RunIfBoxIndexIsValid(
OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index,
int batch_size, const Callback& compute, const Callback& done);
template <>
inline void RunIfBoxIndexIsValid<CPUDevice>(
OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index,
int batch_size, const Callback& compute, const Callback& done) {
const int num_boxes = box_index.dimension(0);
for (int b = 0; b < num_boxes; ++b) {
OP_REQUIRES_ASYNC(
context, FastBoundsCheck(box_index(b), batch_size),
errors::OutOfRange("box_index has values outside [0, batch_size)"),
done);
}
if (compute) {
compute();
}
if (done) {
done();
}
}
}
template <typename Device, typename T>
class CropAndResizeOp : public AsyncOpKernel {
public:
explicit CropAndResizeOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("method", &method_));
OP_REQUIRES(context, method_ == "bilinear" || method_ == "nearest",
errors::InvalidArgument(
"method must be 'bilinear' or 'nearest'", method_));
OP_REQUIRES_OK(context, context->GetAttr("extrapolation_value",
&extrapolation_value_));
}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
const Tensor& image = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const Tensor& crop_size = context->input(3);
OP_REQUIRES_ASYNC(context, image.dims() == 4,
errors::InvalidArgument("input image must be 4-D",
image.shape().DebugString()),
done);
const int batch_size = image.dim_size(0);
const int image_height = image.dim_size(1);
const int image_width = image.dim_size(2);
const int depth = image.dim_size(3);
OP_REQUIRES_ASYNC(
context, image_height > 0 && image_width > 0,
errors::InvalidArgument("image dimensions must be positive"), done);
OP_REQUIRES_ASYNC(
context, boxes.dims() == 2,
absl::InvalidArgumentError(absl::StrCat("boxes must be 2-D, got: ",
boxes.shape().DebugString())),
done);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsVector(box_index.shape()),
errors::InvalidArgument("box_indices must be rank 1 but is shape ",
box_index.shape().DebugString()),
done);
int num_boxes = 0;
OP_REQUIRES_OK_ASYNC(
context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done);
OP_REQUIRES_ASYNC(context, crop_size.dims() == 1,
errors::InvalidArgument("crop_size must be 1-D",
crop_size.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(
context, crop_size.dim_size(0) == 2,
errors::InvalidArgument("crop_size must have two elements",
crop_size.shape().DebugString()),
done);
auto crop_size_vec = crop_size.vec<int32>();
const int crop_height = internal::SubtleMustCopy(crop_size_vec(0));
const int crop_width = internal::SubtleMustCopy(crop_size_vec(1));
OP_REQUIRES_ASYNC(
context, crop_height > 0 && crop_width > 0,
errors::InvalidArgument("crop dimensions must be positive"), done);
TensorShape shape;
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(num_boxes), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_height), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_width), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done);
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output),
done);
auto compute_callback = [this, context, output]() {
const Tensor& image = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const bool status = functor::CropAndResize<Device, T>()(
context, image.tensor<T, 4>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), method_, extrapolation_value_,
output->tensor<float, 4>());
if (!status) {
context->SetStatus(
errors::Internal("Failed to launch CropAndResizeKernel."));
}
};
RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(),
batch_size, std::move(compute_callback),
std::move(done));
}
private:
float extrapolation_value_;
string method_;
};
namespace functor {
template <typename T>
struct CropAndResize<CPUDevice, T> {
bool operator()(OpKernelContext* context,
typename TTypes<T, 4>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
const string& method_name, float extrapolation_value,
typename TTypes<float, 4>::Tensor crops) {
const int batch_size = image.dimension(0);
const int image_height = image.dimension(1);
const int image_width = image.dimension(2);
const int num_boxes = crops.dimension(0);
const int crop_height = crops.dimension(1);
const int crop_width = crops.dimension(2);
const int depth = crops.dimension(3);
const Eigen::Tensor<bool, 0, Eigen::RowMajor> only_finite_elements =
boxes.isfinite().all();
if (!only_finite_elements()) {
context->SetStatus(errors::InvalidArgument(
"Boxes contains at least one element that is not finite"));
return false;
}
auto CropAndResizePerBox = [&](int64_t start_box, int64_t limit_box) {
for (int b = start_box; b < limit_box; ++b) {
const float y1 = boxes(b, 0);
const float x1 = boxes(b, 1);
const float y2 = boxes(b, 2);
const float x2 = boxes(b, 3);
const int32_t b_in = box_index(b);
if (!FastBoundsCheck(b_in, batch_size)) {
continue;
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y) {
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1) {
for (int x = 0; x < crop_width; ++x) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
}
continue;
}
if (method_name == "bilinear") {
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d) {
const float top_left(static_cast<float>(
image(b_in, top_y_index, left_x_index, d)));
const float top_right(static_cast<float>(
image(b_in, top_y_index, right_x_index, d)));
const float bottom_left(static_cast<float>(
image(b_in, bottom_y_index, left_x_index, d)));
const float bottom_right(static_cast<float>(
image(b_in, bottom_y_index, right_x_index, d)));
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
crops(b, y, x, d) = top + (bottom - top) * y_lerp;
}
}
} else {
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = extrapolation_value;
}
continue;
}
const int closest_x_index = roundf(in_x);
const int closest_y_index = roundf(in_y);
for (int d = 0; d < depth; ++d) {
crops(b, y, x, d) = static_cast<float>(
image(b_in, closest_y_index, closest_x_index, d));
}
}
}
}
}
};
double cost_per_pixel =
depth * (Eigen::TensorOpCost::AddCost<float>() * 6 +
Eigen::TensorOpCost::MulCost<float>() * 3 +
Eigen::TensorOpCost::CastCost<T, float>() * 4) +
(Eigen::TensorOpCost::AddCost<float>() * 2 +
Eigen::TensorOpCost::AddCost<float>() * 3);
if (method_name == "nearest") {
cost_per_pixel = depth * Eigen::TensorOpCost::CastCost<T, float>() +
Eigen::TensorOpCost::AddCost<float>() * 4 +
Eigen::TensorOpCost::MulCost<float>() * 4;
}
const double cost_per_box = crop_height * crop_width * cost_per_pixel;
const DeviceBase::CpuWorkerThreads& worker_threads =
*(context->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, num_boxes,
cost_per_box, CropAndResizePerBox);
return true;
}
};
}
template <typename Device, typename T>
class CropAndResizeGradImageOp : public AsyncOpKernel {
public:
explicit CropAndResizeGradImageOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("method", &method_));
OP_REQUIRES(context, method_ == "bilinear" || method_ == "nearest",
errors::InvalidArgument(
"method must be 'bilinear' or 'nearest'", method_));
}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
const Tensor& grads = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const Tensor& image_size = context->input(3);
OP_REQUIRES_ASYNC(context, grads.dims() == 4,
errors::InvalidArgument("grads image must be 4-D",
grads.shape().DebugString()),
done);
const int crop_height = grads.dim_size(1);
const int crop_width = grads.dim_size(2);
OP_REQUIRES_ASYNC(
context, crop_height > 0 && crop_width > 0,
errors::InvalidArgument("grads dimensions must be positive"), done);
int num_boxes = 0;
OP_REQUIRES_OK_ASYNC(
context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done);
OP_REQUIRES_ASYNC(
context, grads.dim_size(0) == num_boxes,
errors::InvalidArgument("boxes and grads have incompatible shape"),
done);
OP_REQUIRES_ASYNC(context, image_size.dims() == 1,
errors::InvalidArgument("image_size must be 1-D",
image_size.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, image_size.dim_size(0) == 4,
errors::InvalidArgument("image_size must have 4 elements",
image_size.shape().DebugString()),
done);
auto image_size_vec = image_size.vec<int32>();
const int batch_size = internal::SubtleMustCopy(image_size_vec(0));
const int image_height = internal::SubtleMustCopy(image_size_vec(1));
const int image_width = internal::SubtleMustCopy(image_size_vec(2));
const int depth = internal::SubtleMustCopy(image_size_vec(3));
OP_REQUIRES_ASYNC(
context, image_height > 0 && image_width > 0,
errors::InvalidArgument("image dimensions must be positive"), done);
OP_REQUIRES_ASYNC(
context, grads.dim_size(3) == depth,
errors::InvalidArgument("image_size and grads are incompatible"), done);
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES_ASYNC(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"Deterministic GPU implementation of CropAndResizeBackpropImage"
" not available."),
done);
}
TensorShape shape;
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(batch_size), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_height), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_width), done);
OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done);
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output),
done);
auto compute_callback = [this, context, output]() {
const Tensor& grads = context->input(0);
const Tensor& boxes = context->input(1);
const Tensor& box_index = context->input(2);
const bool status = functor::CropAndResizeBackpropImage<Device, T>()(
context, grads.tensor<float, 4>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), output->tensor<T, 4>(), method_);
if (!status) {
context->SetStatus(errors::Internal(
"Failed to launch CropAndResizeBackpropImage kernel."));
}
};
RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(),
batch_size, std::move(compute_callback),
std::move(done));
}
private:
string method_;
};
namespace functor {
template <typename T>
struct CropAndResizeBackpropImage<CPUDevice, T> {
bool operator()(const OpKernelContext* context,
typename TTypes<float, 4>::ConstTensor grads,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
typename TTypes<T, 4>::Tensor grads_image,
const string& method_name) {
const int batch_size = grads_image.dimension(0);
const int image_height = grads_image.dimension(1);
const int image_width = grads_image.dimension(2);
const int num_boxes = grads.dimension(0);
const int crop_height = grads.dimension(1);
const int crop_width = grads.dimension(2);
const int depth = grads.dimension(3);
grads_image.setZero();
auto CropAndResizeBackImgPerBox = [&](int64_t start_box,
int64_t limit_box) {
for (int b = start_box; b < limit_box; ++b) {
const float y1 = boxes(b, 0);
const float x1 = boxes(b, 1);
const float y2 = boxes(b, 2);
const float x2 = boxes(b, 3);
const int32_t b_in = box_index(b);
if (!FastBoundsCheck(b_in, batch_size)) {
continue;
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y) {
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1) {
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x) {
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1) {
continue;
}
if (method_name == "bilinear") {
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d) {
const float dtop = (1 - y_lerp) * grads(b, y, x, d);
grads_image(b_in, top_y_index, left_x_index, d) +=
static_cast<T>((1 - x_lerp) * dtop);
grads_image(b_in, top_y_index, right_x_index, d) +=
static_cast<T>(x_lerp * dtop);
const float dbottom = y_lerp * grads(b, y, x, d);
grads_image(b_in, bottom_y_index, left_x_index, d) +=
static_cast<T>((1 - x_lerp) * dbottom);
grads_image(b_in, bottom_y_index, right_x_index, d) +=
static_cast<T>(x_lerp * dbottom);
}
} else {
for (int d = 0; d < depth; ++d) {
int closest_x_index = roundf(in_x);
int closest_y_index = roundf(in_y);
grads_image(b_in, closest_y_index, closest_x_index, d) +=
static_cast<T>(grads(b, y, x, d));
}
}
}
}
}
};
const double cost_per_pixel =
(method_name == "bilinear"
? depth * (Eigen::TensorOpCost::AddCost<float>() * 7 +
Eigen::TensorOpCost::MulCost<float>() * 6 +
Eigen::TensorOpCost::CastCost<T, float>() * 4) +
Eigen::TensorOpCost::AddCost<float>() * 4
: depth * (Eigen::TensorOpCost::AddCost<float>() +
Eigen::TensorOpCost::CastCost<T, float>()) +
Eigen::TensorOpCost::AddCost<float>() * 3);
const double cost_per_box = crop_height * crop_width * cost_per_pixel;
const DeviceBase::CpuWorkerThreads& worker_threads =
*(context->device()->tensorflow_cpu_worker_threads());
int max_threads = OpDeterminismRequired() ? 1 : worker_threads.num_threads;
Shard(max_threads, worker_threads.workers, num_boxes, cost_per_box,
CropAndResizeBackImgPerBox);
return true;
}
};
}
template <typename Device, typename T>
class CropAndResizeGradBoxesOp : public AsyncOpKernel {
public:
explicit CropAndResizeGradBoxesOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {
string method;
OP_REQUIRES_OK(context, context->GetAttr("method", &method));
OP_REQUIRES(context, method == "bilinear",
errors::InvalidArgument("method must be 'bilinear'", method));
}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
const Tensor& grads = context->input(0);
const Tensor& boxes = context->input(2);
const Tensor& box_index = context->input(3);
const Tensor& image = context->input(1);
OP_REQUIRES_ASYNC(context, grads.dims() == 4,
errors::InvalidArgument("grads image must be 4-D",
grads.shape().DebugString()),
done);
const int crop_height = grads.dim_size(1);
const int crop_width = grads.dim_size(2);
const int depth = grads.dim_size(3);
OP_REQUIRES_ASYNC(
context, crop_height > 0 && crop_width > 0,
errors::InvalidArgument("grads dimensions must be positive"), done);
OP_REQUIRES_ASYNC(context, image.dims() == 4,
errors::InvalidArgument("input image must be 4-D",
image.shape().DebugString()),
done);
const int batch_size = image.dim_size(0);
const int image_height = image.dim_size(1);
const int image_width = image.dim_size(2);
OP_REQUIRES_ASYNC(
context, image_height > 0 && image_width > 0,
errors::InvalidArgument("image dimensions must be positive"), done);
OP_REQUIRES_ASYNC(context, image.dim_size(3) == depth,
errors::InvalidArgument("image, grads depth differ"),
done);
int num_boxes = 0;
OP_REQUIRES_OK_ASYNC(
context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done);
OP_REQUIRES_ASYNC(
context, grads.dim_size(0) == num_boxes,
errors::InvalidArgument("boxes and grads have incompatible shape"),
done);
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES_ASYNC(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"Deterministic GPU implementation of CropAndResizeBackpropBoxes"
" not available."),
done);
}
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(
context,
context->allocate_output(0, TensorShape({num_boxes, 4}), &output),
done);
auto compute_callback = [context, output]() {
const Tensor& grads = context->input(0);
const Tensor& image = context->input(1);
const Tensor& boxes = context->input(2);
const Tensor& box_index = context->input(3);
const bool status = functor::CropAndResizeBackpropBoxes<Device, T>()(
context->eigen_device<Device>(), grads.tensor<float, 4>(),
image.tensor<T, 4>(), boxes.tensor<float, 2>(),
box_index.tensor<int32, 1>(), output->tensor<float, 2>());
if (!status) {
context->SetStatus(errors::Internal(
"Failed to launch CropAndResizeBackpropBoxes kernel."));
}
};
RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(),
batch_size, std::move(compute_callback),
std::move(done));
}
};
namespace functor {
template <typename T>
struct CropAndResizeBackpropBoxes<CPUDevice, T> {
bool operator()(const CPUDevice& d,
typename TTypes<float, 4>::ConstTensor grads,
typename TTypes<T, 4>::ConstTensor image,
typename TTypes<float, 2>::ConstTensor boxes,
typename TTypes<int32, 1>::ConstTensor box_index,
typename TTypes<float, 2>::Tensor grads_boxes) {
const int batch_size = image.dimension(0);
const int image_height = image.dimension(1);
const int image_width = image.dimension(2);
const int num_boxes = grads.dimension(0);
const int crop_height = grads.dimension(1);
const int crop_width = grads.dimension(2);
const int depth = grads.dimension(3);
grads_boxes.setZero();
for (int b = 0; b < num_boxes; ++b) {
const float y1 = boxes(b, 0);
const float x1 = boxes(b, 1);
const float y2 = boxes(b, 2);
const float x2 = boxes(b, 3);
const int32_t b_in = box_index(b);
if (!FastBoundsCheck(b_in, batch_size)) {
continue;
}
const float height_ratio =
(crop_height > 1)
? static_cast<float>(image_height - 1) / (crop_height - 1)
: 0;
const float width_ratio =
(crop_width > 1)
? static_cast<float>(image_width - 1) / (crop_width - 1)
: 0;
const float height_scale =
(crop_height > 1) ? (y2 - y1) * height_ratio : 0;
const float width_scale = (crop_width > 1) ? (x2 - x1) * width_ratio : 0;
for (int y = 0; y < crop_height; ++y) {
const float in_y = (crop_height > 1)
? y1 * (image_ | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class CropAndResizeOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(float extrapolation_value, const string& method) {
TF_EXPECT_OK(NodeDefBuilder("crop_and_resize_op", "CropAndResize")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Attr("extrapolation_value", extrapolation_value)
.Attr("method", method)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(CropAndResizeOpTest, TestCropAndResize##T) { \
MakeOp<T>(0, "bilinear"); \
AddInputFromArray<T>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); \
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); \
AddInputFromArray<int32>(TensorShape({1}), {0}); \
AddInputFromArray<int32>(TensorShape({2}), {1, 1}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); \
test::FillValues<float>(&expected, {2.5}); \
test::ExpectTensorEqual<float>(expected, *GetOutput(0)); \
} \
\
TEST_F(CropAndResizeOpTest, TestCropAndResize##T##nearest) { \
MakeOp<T>(0, "nearest"); \
AddInputFromArray<T>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); \
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); \
AddInputFromArray<int32>(TensorShape({1}), {0}); \
AddInputFromArray<int32>(TensorShape({2}), {1, 1}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); \
test::FillValues<float>(&expected, {4.0}); \
test::ExpectTensorEqual<float>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Uint8) {
MakeOp<uint8>(0, "bilinear");
AddInputFromArray<uint8>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {2.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Uint8NearestNeibor) {
MakeOp<uint8>(0, "nearest");
AddInputFromArray<uint8>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {4.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Flipped) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {2.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1FlippedNearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {4.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2,
2, 2.5, 3,
3, 3.5, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3NearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3Flipped) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{4, 3.5, 3,
3, 2.5, 2,
2, 1.5, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3FlippedNearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{4, 4, 3,
4, 4, 3,
2, 2, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {0, 0, 1, 1, 0, 0, 0.5, 0.5});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9,
1, 2,
4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2NearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {0, 0, 1, 1, 0, 0, 0.5, 0.5});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9,
1, 2,
4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2Flipped) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {1, 1, 0, 0, 0.5, 0.5, 0, 0});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{9, 7,
3, 1,
5, 4,
2, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2FlippedNearestNeighbor) {
MakeOp<float>(0, "nearest");
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({2, 4}), {1, 1, 0, 0, 0.5, 0.5, 0, 0});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&expected,
{9, 7,
3, 1,
5, 4,
2, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3Extrapolated) {
const float v = -1;
MakeOp<float>(v, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {-1, -1, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{v, v, v,
v, 1, 2,
v, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3NoCrop) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<int32>(TensorShape({0}), {});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({0, 3, 3, 1}));
test::FillValues<float>(&expected, {});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CropAndResizeOpTest, TestInvalidInputShape) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "input image must be 4-D")) << s;
}
TEST_F(CropAndResizeOpTest, TestInvalidBoxIndexShape) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "box_index has incompatible shape"))
<< s;
}
TEST_F(CropAndResizeOpTest, TestInvalidBoxIndex) {
MakeOp<float>(0, "bilinear");
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<int32>(TensorShape({1}), {1});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(),
"box_index has values outside [0, batch_size)"))
<< s;
}
TEST_F(CropAndResizeOpTest, TestWithSharding) {
MakeOp<float>(0, "bilinear");
const int kLength = 999;
const int kHalf = (kLength + 1) / 2;
AddInput<float>(TensorShape({1, kLength, kLength, 1}),
[=](int i) -> float { return i % kLength; });
AddInputFromArray<float>(TensorShape({2, 4}),
{0, 0, 0.5, 0.5, 0.5, 0.5, 1, 1});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<int32>(TensorShape({2}), {kHalf, kHalf});
TF_ASSERT_OK(RunOpKernel());
Tensor result1(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1}));
test::FillFn<float>(&result1, [=](int i) -> float { return i % kHalf; });
Tensor result2(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1}));
test::FillFn<float>(&result2,
[=](int i) -> float { return i % kHalf + kHalf - 1; });
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, kHalf, kHalf, 1}));
TF_ASSERT_OK(tensor::Concat({result1, result2}, &expected));
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
} |
1,525 | cpp | tensorflow/tensorflow | colorspace_op | tensorflow/core/kernels/image/colorspace_op.cc | tensorflow/core/kernels/image/colorspace_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_COLORSPACE_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_COLORSPACE_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct RGBToHSV {
void operator()(const Device &d,
typename TTypes<T, 2>::ConstTensor input_data,
typename TTypes<T, 1>::Tensor range,
typename TTypes<T, 2>::Tensor output_data) {
auto H = output_data.template chip<1>(0);
auto S = output_data.template chip<1>(1);
auto V = output_data.template chip<1>(2);
auto R = input_data.template chip<1>(0);
auto G = input_data.template chip<1>(1);
auto B = input_data.template chip<1>(2);
Eigen::IndexList<Eigen::type2index<1> > channel_axis;
V.device(d) = input_data.maximum(channel_axis);
range.device(d) = V - input_data.minimum(channel_axis);
S.device(d) = (V > T(0)).select(range / V, V.constant(T(0)));
auto norm = range.inverse() * (T(1) / T(6));
H.device(d) = (R == V).select(
norm * (G - B), (G == V).select(norm * (B - R) + T(2) / T(6),
norm * (R - G) + T(4) / T(6)));
H.device(d) = (range > T(0)).select(H, H.constant(T(0)));
H.device(d) = (H < T(0)).select(H + T(1), H);
}
};
template <typename Device, typename T>
struct HSVToRGB {
void operator()(const Device &d,
typename TTypes<T, 2>::ConstTensor input_data,
typename TTypes<T, 2>::Tensor output_data) {
auto H = input_data.template chip<1>(0);
auto S = input_data.template chip<1>(1);
auto V = input_data.template chip<1>(2);
auto dh = H * T(6);
auto dr = ((dh - T(3)).abs() - T(1)).cwiseMax(T(0)).cwiseMin(T(1));
auto dg = (-(dh - T(2)).abs() + T(2)).cwiseMax(T(0)).cwiseMin(T(1));
auto db = (-(dh - T(4)).abs() + T(2)).cwiseMax(T(0)).cwiseMin(T(1));
auto one_s = -S + T(1);
auto R = output_data.template chip<1>(0);
auto G = output_data.template chip<1>(1);
auto B = output_data.template chip<1>(2);
R.device(d) = (one_s + S * dr) * V;
G.device(d) = (one_s + S * dg) * V;
B.device(d) = (one_s + S * db) * V;
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/colorspace_op.h"
#include <algorithm>
#include <cmath>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class RGBToHSVOp : public OpKernel {
public:
explicit RGBToHSVOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input.shape().DebugString()));
auto channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, channels == 3,
errors::FailedPrecondition(
"input must have 3 channels but input only has ", channels,
" channels."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
typename TTypes<T, 2>::ConstTensor input_data = input.flat_inner_dims<T>();
typename TTypes<T, 2>::Tensor output_data = output->flat_inner_dims<T>();
Tensor trange;
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({input_data.dimension(0)}),
&trange));
typename TTypes<T, 1>::Tensor range(trange.tensor<T, 1>());
functor::RGBToHSV<Device, T>()(context->eigen_device<Device>(), input_data,
range, output_data);
}
};
template <typename Device, typename T>
class HSVToRGBOp : public OpKernel {
public:
explicit HSVToRGBOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input.shape().DebugString()));
auto channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, channels == 3,
errors::FailedPrecondition(
"input must have 3 channels but input only has ", channels,
" channels."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
typename TTypes<T, 2>::ConstTensor input_data = input.flat_inner_dims<T>();
typename TTypes<T, 2>::Tensor output_data = output->flat_inner_dims<T>();
functor::HSVToRGB<Device, T>()(context->eigen_device<Device>(), input_data,
output_data);
}
};
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("RGBToHSV").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
RGBToHSVOp<CPUDevice, T>); \
template class RGBToHSVOp<CPUDevice, T>; \
REGISTER_KERNEL_BUILDER( \
Name("HSVToRGB").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
HSVToRGBOp<CPUDevice, T>); \
template class HSVToRGBOp<CPUDevice, T>;
TF_CALL_float(REGISTER_CPU);
TF_CALL_double(REGISTER_CPU);
TF_CALL_half(REGISTER_CPU);
TF_CALL_bfloat16(REGISTER_CPU);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU(T) \
template <> \
void RGBToHSV<GPUDevice, T>::operator()( \
const GPUDevice& d, TTypes<T, 2>::ConstTensor input_data, \
TTypes<T, 1>::Tensor range, TTypes<T, 2>::Tensor output_data); \
extern template struct RGBToHSV<GPUDevice, T>; \
template <> \
void HSVToRGB<GPUDevice, T>::operator()( \
const GPUDevice& d, TTypes<T, 2>::ConstTensor input_data, \
TTypes<T, 2>::Tensor output_data); \
extern template struct HSVToRGB<GPUDevice, T>;
TF_CALL_float(DECLARE_GPU);
TF_CALL_double(DECLARE_GPU);
}
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("RGBToHSV").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
RGBToHSVOp<GPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("HSVToRGB").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
HSVToRGBOp<GPUDevice, T>);
TF_CALL_float(REGISTER_GPU);
TF_CALL_double(REGISTER_GPU);
#endif
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
template <typename T>
class RGBToHSVOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_EXPECT_OK(NodeDefBuilder("rgb_to_hsv_op", "RGBToHSV")
.Input(FakeInput(data_type))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void CheckBlack(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, 0.0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckGray(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.5, .5, .5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, .5});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckWhite(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {1, 1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, 1.0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckRedMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.8f, .4f, .2f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * .2 / .6;
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckGreenMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.2f, .8f, .4f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (2.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckBlueMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.4f, .2f, .8f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (4.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckNegativeDifference(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0, .1f, .2f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (4.0 + (-.1 / .2));
T expected_s = .2 / .2;
T expected_v = .2 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
};
template <typename T>
class HSVToRGBOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_EXPECT_OK(NodeDefBuilder("hsv_to_rgb_op", "HSVToRGB")
.Input(FakeInput(data_type))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void CheckBlack(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, 0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0, 0, 0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckGray(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, .5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.5, .5, .5});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckWhite(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, 1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {1, 1, 1});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckRedMax(DataType data_type) {
T expected_h = 1. / 6. * .2 / .6;
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.8, .4, .2});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckGreenMax(DataType data_type) {
T expected_h = 1. / 6. * (2.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.2, .8, .4});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckBlueMax(DataType data_type) {
T expected_h = 1. / 6. * (4.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.0;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.4, .2, .8});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckNegativeDifference(DataType data_type) {
T expected_h = 1. / 6. * (4.0 + (-.1 / .2));
T expected_s = .2 / .2;
T expected_v = .2 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0, .1f, .2f});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
};
#define TEST_COLORSPACE(test, dt) \
TEST_F(test, CheckBlack) { \
MakeOp(dt); \
CheckBlack(dt); \
} \
TEST_F(test, CheckGray) { \
MakeOp(dt); \
CheckGray(dt); \
} \
TEST_F(test, CheckWhite) { \
MakeOp(dt); \
CheckWhite(dt); \
} \
TEST_F(test, CheckRedMax) { \
MakeOp(dt); \
CheckRedMax(dt); \
} \
TEST_F(test, CheckGreenMax) { \
MakeOp(dt); \
CheckGreenMax(dt); \
} \
TEST_F(test, CheckBlueMax) { \
MakeOp(dt); \
CheckBlueMax(dt); \
} \
TEST_F(test, CheckNegativeDifference) { \
MakeOp(dt); \
CheckNegativeDifference(dt); \
}
typedef RGBToHSVOpTest<float> rgb_to_hsv_float;
typedef RGBToHSVOpTest<double> rgb_to_hsv_double;
TEST_COLORSPACE(rgb_to_hsv_float, DT_FLOAT);
TEST_COLORSPACE(rgb_to_hsv_double, DT_DOUBLE);
typedef HSVToRGBOpTest<float> hsv_to_rgb_float;
typedef HSVToRGBOpTest<double> hsv_to_rgb_double;
TEST_COLORSPACE(hsv_to_rgb_float, DT_FLOAT);
TEST_COLORSPACE(hsv_to_rgb_double, DT_DOUBLE);
} |
1,526 | cpp | tensorflow/tensorflow | sampling_kernels | tensorflow/core/kernels/image/sampling_kernels.cc | tensorflow/core/kernels/image/sampling_kernels_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_SAMPLING_KERNELS_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_SAMPLING_KERNELS_H_
#include <cmath>
#include "tensorflow/core/lib/core/stringpiece.h"
namespace tensorflow {
namespace functor {
enum SamplingKernelType {
Lanczos1Kernel,
Lanczos3Kernel,
Lanczos5Kernel,
GaussianKernel,
BoxKernel,
TriangleKernel,
KeysCubicKernel,
MitchellCubicKernel,
SamplingKernelTypeEnd
};
SamplingKernelType SamplingKernelTypeFromString(const StringPiece str);
struct LanczosKernelFunc {
explicit LanczosKernelFunc(float _radius) : radius(_radius) {}
float operator()(float x) const {
constexpr float kPI = 3.14159265359;
x = std::abs(x);
if (x > radius) return 0.0;
if (x <= 1e-3) {
return 1.0;
}
return radius * std::sin(kPI * x) * std::sin(kPI * x / radius) /
(kPI * kPI * x * x);
}
float Radius() const { return radius; }
const float radius;
};
struct GaussianKernelFunc {
static constexpr float kRadiusMultiplier = 3.0f;
explicit GaussianKernelFunc(float _radius = 1.5f)
: radius(_radius), sigma(_radius / kRadiusMultiplier) {}
float operator()(float x) const {
x = std::abs(x);
if (x >= radius) return 0.0;
return std::exp(-x * x / (2.0 * sigma * sigma));
}
float Radius() const { return radius; }
const float radius;
const float sigma;
};
struct BoxKernelFunc {
float operator()(float x) const {
x = std::abs(x);
return x < 0.5f ? 1. : x == 0.5f ? 0.5f : 0.0f;
}
float Radius() const { return 1.f; }
};
struct TriangleKernelFunc {
float operator()(float x) const {
x = std::abs(x);
return x < 1.0f ? 1.0f - x : 0.0f;
}
float Radius() const { return 1.f; }
};
struct KeysCubicKernelFunc {
float operator()(float x) const {
x = std::abs(x);
if (x >= 2.0f) {
return 0.0f;
} else if (x >= 1.0f) {
return ((-0.5f * x + 2.5f) * x - 4.0f) * x + 2.0f;
} else {
return ((1.5f * x - 2.5f) * x) * x + 1.0f;
}
}
float Radius() const { return 2.f; }
};
struct MitchellCubicKernelFunc {
float operator()(float x) const {
x = std::abs(x);
if (x >= 2.0f) {
return 0.0f;
} else if (x >= 1.0f) {
return (((-7.0f / 18.0f) * x + 2.0f) * x - 10.0f / 3.0f) * x +
16.0f / 9.0f;
} else {
return (((7.0f / 6.0f) * x - 2.0f) * x) * x + 8.0f / 9.0f;
}
}
float Radius() const { return 2.f; }
};
inline LanczosKernelFunc CreateLanczos1Kernel() {
return LanczosKernelFunc(1.0);
}
inline LanczosKernelFunc CreateLanczos3Kernel() {
return LanczosKernelFunc(3.0);
}
inline LanczosKernelFunc CreateLanczos5Kernel() {
return LanczosKernelFunc(5.0);
}
inline GaussianKernelFunc CreateGaussianKernel() {
return GaussianKernelFunc(1.5);
}
inline BoxKernelFunc CreateBoxKernel() { return BoxKernelFunc(); }
inline TriangleKernelFunc CreateTriangleKernel() {
return TriangleKernelFunc();
}
inline KeysCubicKernelFunc CreateKeysCubicKernel() {
return KeysCubicKernelFunc();
}
inline MitchellCubicKernelFunc CreateMitchellCubicKernel() {
return MitchellCubicKernelFunc();
}
}
}
#endif
#include "tensorflow/core/kernels/image/sampling_kernels.h"
#include <string>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace functor {
SamplingKernelType SamplingKernelTypeFromString(const StringPiece str) {
const string lower_case = absl::AsciiStrToLower(str);
if (lower_case == "lanczos1") return Lanczos1Kernel;
if (lower_case == "lanczos3") return Lanczos3Kernel;
if (lower_case == "lanczos5") return Lanczos5Kernel;
if (lower_case == "gaussian") return GaussianKernel;
if (lower_case == "box") return BoxKernel;
if (lower_case == "triangle") return TriangleKernel;
if (lower_case == "keyscubic") return KeysCubicKernel;
if (lower_case == "mitchellcubic") return MitchellCubicKernel;
return SamplingKernelTypeEnd;
}
}
} | #include "tensorflow/core/kernels/image/sampling_kernels.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace functor {
namespace {
class KernelsTest : public ::testing::Test {
protected:
template <typename KernelType>
void TestKernelValues(const KernelType& kernel, const std::vector<float>& x,
const std::vector<float>& expected) const {
ASSERT_EQ(x.size(), expected.size());
for (int i = 0; i < x.size(); ++i) {
constexpr float kTolerance = 1e-3;
EXPECT_NEAR(kernel(x[i]), expected[i], kTolerance);
EXPECT_NEAR(kernel(-x[i]), expected[i], kTolerance);
}
}
};
TEST_F(KernelsTest, TestKernelValues) {
TestKernelValues(CreateLanczos1Kernel(), {0.0f, 0.5f, 1.0f, 1.5},
{1.0f, 0.4052f, 0.0f, 0.0f});
TestKernelValues(CreateLanczos3Kernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5f, 3.5},
{1.0f, 0.6079f, 0.0f, -0.1351f, 0.0243f, 0.0f});
TestKernelValues(
CreateLanczos5Kernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5},
{1.0f, 0.6262f, 0.0f, -0.1822f, 0.0810569f, -0.0334f, 0.0077f, 0.0f});
TestKernelValues(CreateGaussianKernel(), {0.0f, 0.5f, 1.0f, 1.5},
{1.0f, 0.6065f, 0.1353f, 0.0f});
TestKernelValues(CreateBoxKernel(), {0.0f, 0.25f, 0.5f, 1.0f},
{1.0f, 1.0f, 0.5f, 0.0f});
TestKernelValues(CreateTriangleKernel(), {0.0f, 0.5f, 1.0f},
{1.0f, 0.5f, 0.0f});
TestKernelValues(CreateKeysCubicKernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5},
{1.0f, 0.5625f, 0.0f, -0.0625f, 0.0f});
TestKernelValues(CreateMitchellCubicKernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5},
{0.8889f, 0.5347f, 0.0556f, -0.0347f, 0.0f});
}
TEST(SamplingKernelTypeFromStringTest, Works) {
EXPECT_EQ(SamplingKernelTypeFromString("lanczos1"), Lanczos1Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("lanczos3"), Lanczos3Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("lanczos5"), Lanczos5Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("gaussian"), GaussianKernel);
EXPECT_EQ(SamplingKernelTypeFromString("box"), BoxKernel);
EXPECT_EQ(SamplingKernelTypeFromString("triangle"), TriangleKernel);
EXPECT_EQ(SamplingKernelTypeFromString("mitchellcubic"), MitchellCubicKernel);
EXPECT_EQ(SamplingKernelTypeFromString("keyscubic"), KeysCubicKernel);
EXPECT_EQ(SamplingKernelTypeFromString("not a kernel"),
SamplingKernelTypeEnd);
}
}
}
} |
1,527 | cpp | tensorflow/tensorflow | non_max_suppression_op | tensorflow/core/kernels/image/non_max_suppression_op.cc | tensorflow/core/kernels/image/non_max_suppression_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_NON_MAX_SUPPRESSION_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_NON_MAX_SUPPRESSION_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
extern const int kNmsBoxesPerTread;
Status NmsGpu(const float* d_sorted_boxes_float_ptr, const int num_boxes,
const float iou_threshold, int* d_selected_indices,
int* h_num_boxes_to_keep, OpKernelContext* context,
const int max_boxes, bool flip_boxes = false);
#endif
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/non_max_suppression_op.h"
#include <cmath>
#include <functional>
#include <limits>
#include <queue>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
typedef Eigen::ThreadPoolDevice CPUDevice;
static inline void CheckScoreSizes(OpKernelContext* context, int num_boxes,
const Tensor& scores) {
OP_REQUIRES(context, scores.dims() == 1,
errors::InvalidArgument(
"scores must be 1-D", scores.shape().DebugString(),
" (Shape must be rank 1 but is rank ", scores.dims(), ")"));
OP_REQUIRES(
context, scores.dim_size(0) == num_boxes,
errors::InvalidArgument("scores has incompatible shape (Dimensions must "
"be equal, but are ",
num_boxes, " and ", scores.dim_size(0), ")"));
}
static inline void ParseAndCheckOverlapSizes(OpKernelContext* context,
const Tensor& overlaps,
int* num_boxes) {
OP_REQUIRES(context, overlaps.dims() == 2,
errors::InvalidArgument("overlaps must be 2-D",
overlaps.shape().DebugString()));
*num_boxes = overlaps.dim_size(0);
OP_REQUIRES(context, overlaps.dim_size(1) == *num_boxes,
errors::InvalidArgument("overlaps must be square",
overlaps.shape().DebugString()));
}
static inline void ParseAndCheckBoxSizes(OpKernelContext* context,
const Tensor& boxes, int* num_boxes) {
OP_REQUIRES(context, boxes.dims() == 2,
errors::InvalidArgument(
"boxes must be 2-D", boxes.shape().DebugString(),
" (Shape must be rank 2 but is rank ", boxes.dims(), ")"));
*num_boxes = boxes.dim_size(0);
OP_REQUIRES(context, boxes.dim_size(1) == 4,
errors::InvalidArgument("boxes must have 4 columns (Dimension "
"must be 4 but is ",
boxes.dim_size(1), ")"));
}
static inline void CheckCombinedNMSScoreSizes(OpKernelContext* context,
int num_boxes,
const Tensor& scores) {
OP_REQUIRES(context, scores.dims() == 3,
errors::InvalidArgument("scores must be 3-D",
scores.shape().DebugString()));
OP_REQUIRES(context, scores.dim_size(1) == num_boxes,
errors::InvalidArgument("scores has incompatible shape"));
}
static inline void ParseAndCheckCombinedNMSBoxSizes(OpKernelContext* context,
const Tensor& boxes,
int* num_boxes,
const int num_classes) {
OP_REQUIRES(context, boxes.dims() == 4,
errors::InvalidArgument("boxes must be 4-D",
boxes.shape().DebugString()));
bool box_check = boxes.dim_size(2) == 1 || boxes.dim_size(2) == num_classes;
OP_REQUIRES(context, box_check,
errors::InvalidArgument(
"third dimension of boxes must be either 1 or num classes"));
*num_boxes = boxes.dim_size(1);
OP_REQUIRES(context, boxes.dim_size(3) == 4,
errors::InvalidArgument("boxes must have 4 columns"));
}
template <typename T>
static inline float IOU(typename TTypes<T, 2>::ConstTensor boxes, int i,
int j) {
const float ymin_i = Eigen::numext::mini<float>(
static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2)));
const float xmin_i = Eigen::numext::mini<float>(
static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3)));
const float ymax_i = Eigen::numext::maxi<float>(
static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2)));
const float xmax_i = Eigen::numext::maxi<float>(
static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3)));
const float ymin_j = Eigen::numext::mini<float>(
static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2)));
const float xmin_j = Eigen::numext::mini<float>(
static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3)));
const float ymax_j = Eigen::numext::maxi<float>(
static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2)));
const float xmax_j = Eigen::numext::maxi<float>(
static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3)));
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
if (area_i <= 0 || area_j <= 0) {
return 0.0;
}
const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j);
const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j);
const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j);
const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j);
const float intersection_area =
Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) *
Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
static inline float IOU(const float* boxes, int i, int j) {
const float ymin_i = Eigen::numext::mini<float>(boxes[i], boxes[i + 2]);
const float xmin_i = Eigen::numext::mini<float>(boxes[i + 1], boxes[i + 3]);
const float ymax_i = Eigen::numext::maxi<float>(boxes[i], boxes[i + 2]);
const float xmax_i = Eigen::numext::maxi<float>(boxes[i + 1], boxes[i + 3]);
const float ymin_j = Eigen::numext::mini<float>(boxes[j], boxes[j + 2]);
const float xmin_j = Eigen::numext::mini<float>(boxes[j + 1], boxes[j + 3]);
const float ymax_j = Eigen::numext::maxi<float>(boxes[j], boxes[j + 2]);
const float xmax_j = Eigen::numext::maxi<float>(boxes[j + 1], boxes[j + 3]);
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
if (area_i <= 0 || area_j <= 0) {
return 0.0;
}
const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j);
const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j);
const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j);
const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j);
const float intersection_area =
Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) *
Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
template <typename T>
static inline T Overlap(typename TTypes<T, 2>::ConstTensor overlaps, int i,
int j) {
return overlaps(i, j);
}
template <typename T>
static inline std::function<float(int, int)> CreateIOUSimilarityFn(
const Tensor& boxes) {
typename TTypes<T, 2>::ConstTensor boxes_data = boxes.tensor<T, 2>();
return std::bind(&IOU<T>, boxes_data, std::placeholders::_1,
std::placeholders::_2);
}
template <typename T>
static inline std::function<T(int, int)> CreateOverlapSimilarityFn(
const Tensor& overlaps) {
typename TTypes<T, 2>::ConstTensor overlaps_data =
overlaps.tensor<float, 2>();
return std::bind(&Overlap<T>, overlaps_data, std::placeholders::_1,
std::placeholders::_2);
}
template <typename T>
void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores,
int num_boxes, const Tensor& max_output_size,
const T similarity_threshold,
const T score_threshold, const T soft_nms_sigma,
const std::function<float(int, int)>& similarity_fn,
bool return_scores_tensor = false,
bool pad_to_max_output_size = false,
int* ptr_num_valid_outputs = nullptr) {
const int output_size = max_output_size.scalar<int>()();
OP_REQUIRES(context, output_size >= 0,
errors::InvalidArgument("output size must be non-negative"));
std::vector<T> scores_data(num_boxes);
std::copy_n(scores.flat<T>().data(), num_boxes, scores_data.begin());
struct Candidate {
int box_index;
T score;
int suppress_begin_index;
};
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return ((bs_i.score == bs_j.score) && (bs_i.box_index > bs_j.box_index)) ||
bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::deque<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
for (int i = 0; i < scores_data.size(); ++i) {
if (scores_data[i] > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, scores_data[i], 0}));
}
}
T scale = static_cast<T>(0.0);
bool is_soft_nms = soft_nms_sigma > static_cast<T>(0.0);
if (is_soft_nms) {
scale = static_cast<T>(-0.5) / soft_nms_sigma;
}
auto suppress_weight = [similarity_threshold, scale,
is_soft_nms](const T sim) {
const T weight = Eigen::numext::exp<T>(scale * sim * sim);
return is_soft_nms || sim <= similarity_threshold ? weight
: static_cast<T>(0.0);
};
std::vector<int> selected;
std::vector<T> selected_scores;
float similarity;
T original_score;
Candidate next_candidate;
while (selected.size() < output_size && !candidate_priority_queue.empty()) {
next_candidate = candidate_priority_queue.top();
original_score = next_candidate.score;
candidate_priority_queue.pop();
bool should_hard_suppress = false;
for (int j = static_cast<int>(selected.size()) - 1;
j >= next_candidate.suppress_begin_index; --j) {
similarity = similarity_fn(next_candidate.box_index, selected[j]);
next_candidate.score *= suppress_weight(static_cast<T>(similarity));
if (!is_soft_nms && static_cast<T>(similarity) > similarity_threshold) {
should_hard_suppress = true;
break;
}
if (next_candidate.score <= score_threshold) break;
}
next_candidate.suppress_begin_index = selected.size();
if (!should_hard_suppress) {
if (next_candidate.score == original_score) {
selected.push_back(next_candidate.box_index);
selected_scores.push_back(next_candidate.score);
continue;
}
if (next_candidate.score > score_threshold) {
candidate_priority_queue.push(next_candidate);
}
}
}
int num_valid_outputs = selected.size();
if (pad_to_max_output_size) {
selected.resize(output_size, 0);
selected_scores.resize(output_size, static_cast<T>(0));
}
if (ptr_num_valid_outputs) {
*ptr_num_valid_outputs = num_valid_outputs;
}
Tensor* output_indices = nullptr;
TensorShape output_shape({static_cast<int>(selected.size())});
OP_REQUIRES_OK(context,
context->allocate_output(0, output_shape, &output_indices));
TTypes<int, 1>::Tensor output_indices_data = output_indices->tensor<int, 1>();
std::copy_n(selected.begin(), selected.size(), output_indices_data.data());
if (return_scores_tensor) {
Tensor* output_scores = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(1, output_shape, &output_scores));
typename TTypes<T, 1>::Tensor output_scores_data =
output_scores->tensor<T, 1>();
std::copy_n(selected_scores.begin(), selected_scores.size(),
output_scores_data.data());
}
}
struct ResultCandidate {
int box_index;
float score;
int class_idx;
float box_coord[4];
};
void DoNMSPerClass(int batch_idx, int class_idx, const float* boxes_data,
const float* scores_data, int num_boxes, int q,
int num_classes, const int size_per_class,
const float score_threshold, const float iou_threshold,
std::vector<ResultCandidate>& result_candidate_vec) {
struct Candidate {
int box_index;
float score;
};
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::vector<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
float temp_score;
for (int i = 0; i < num_boxes; ++i) {
temp_score = scores_data[i * num_classes + class_idx];
if (temp_score > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, temp_score}));
}
}
std::vector<int> selected;
Candidate next_candidate;
int candidate_box_data_idx, selected_box_data_idx, class_box_idx;
class_box_idx = (q > 1) ? class_idx : 0;
float iou;
while (selected.size() < size_per_class &&
!candidate_priority_queue.empty()) {
next_candidate = candidate_priority_queue.top();
candidate_priority_queue.pop();
candidate_box_data_idx = (next_candidate.box_index * q + class_box_idx) * 4;
bool should_select = true;
for (int j = selected.size() - 1; j >= 0; --j) {
selected_box_data_idx = (selected[j] * q + class_box_idx) * 4;
iou = IOU(boxes_data, candidate_box_data_idx, selected_box_data_idx);
if (iou > iou_threshold) {
should_select = false;
break;
}
}
if (should_select) {
result_candidate_vec[selected.size() + size_per_class * class_idx] = {
next_candidate.box_index,
next_candidate.score,
class_idx,
{boxes_data[candidate_box_data_idx],
boxes_data[candidate_box_data_idx + 1],
boxes_data[candidate_box_data_idx + 2],
boxes_data[candidate_box_data_idx + 3]}};
selected.push_back(next_candidate.box_index);
}
}
}
void SelectResultPerBatch(std::vector<float>& nmsed_boxes,
std::vector<float>& nmsed_scores,
std::vector<float>& nmsed_classes,
std::vector<ResultCandidate>& result_candidate_vec,
std::vector<int>& final_valid_detections,
const int batch_idx, int total_size_per_batch,
bool pad_per_class, int max_size_per_batch,
bool clip_boxes, int per_batch_size) {
auto rc_cmp = [](const ResultCandidate rc_i, const ResultCandidate rc_j) {
return rc_i.score > rc_j.score;
};
std::sort(result_candidate_vec.begin(), result_candidate_vec.end(), rc_cmp);
int max_detections = 0;
int result_candidate_size =
std::count_if(result_candidate_vec.begin(), result_candidate_vec.end(),
[](ResultCandidate rc) { return rc.box_index > -1; });
if (!pad_per_class) {
max_detections = std::min(result_candidate_size, total_size_per_batch);
} else {
max_detections = std::min(per_batch_size, result_candidate_size);
}
final_valid_detections[batch_idx] = max_detections;
int curr_total_size = max_detections;
int result_idx = 0;
while (curr_total_size > 0 && result_idx < result_candidate_vec.size()) {
ResultCandidate next_candidate = result_candidate_vec[result_idx++];
if (clip_boxes) {
const float box_min = 0.0;
const float box_max = 1.0;
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[0], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[1], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[2], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[3], box_max), box_min));
} else {
nmsed_boxes.push_back(next_candidate.box_coord[0]);
nmsed_boxes.push_back(next_candidate.box_coord[1]);
nmsed_boxes.push_back(next_candidate.box_coord[2]);
nmsed_boxes.push_back(next_candidate.box_coord[3]);
}
nmsed_scores.push_back(next_candidate.score);
nmsed_classes.push_back(next_candidate.class_idx);
curr_total_size--;
}
nmsed_boxes.resize(per_batch_size * 4, 0);
nmsed_scores.resize(per_batch_size, 0);
nmsed_classes.resize(per_batch_size, 0);
}
void BatchedNonMaxSuppressionOp(
OpKernelContext* context, const Tensor& inp_boxes, const Tensor& inp_scores,
int num_boxes, const int max_size_per_class, const int total_size_per_batch,
const float score_threshold, const float iou_threshold,
bool pad_per_class = false, bool clip_boxes = true) {
const int num_batches = inp_boxes.dim_size(0);
int num_classes = inp_scores.dim_size(2);
int q = inp_boxes.dim_size(2);
const float* scores_data =
const_cast<float*>(inp_scores.flat<float>().data());
const float* boxes_data = const_cast<float*>(inp_boxes.flat<float>().data());
int boxes_per_batch = num_boxes * q * 4;
int scores_per_batch = num_boxes * num_classes;
const int size_per_class = std::min(max_size_per_class, num_boxes);
std::vector<std::vector<ResultCandidate>> result_candidate_vec(
num_batches,
std::vector<ResultCandidate>(size_per_class * num_classes,
{-1, -1.0, -1, {0.0, 0.0, 0.0, 0.0}}));
std::vector<std::vector<float>> nmsed_boxes(num_batches);
std::vector<std::vector<float>> nmsed_scores(num_batches);
std::vector<std::vector<float>> nmsed_classes(num_batches);
std::vector<int> final_valid_detections(num_batches);
auto shard_nms = [&](int begin, int end) {
for (int idx = begin; idx < end; ++idx) {
int batch_idx = idx / num_classes;
int class_idx = idx % num_classes;
DoNMSPerClass(batch_idx, class_idx,
boxes_data + boxes_per_batch * batch_idx,
scores_data + scores_per_batch * batch_idx, num_boxes, q,
num_classes, size_per_class, score_threshold, iou_threshold,
result_candidate_vec[batch_idx]);
}
};
int length = num_batches * num_classes;
int input_bytes = num_boxes * 10 * sizeof(float);
int output_bytes = num_boxes * 10 * sizeof(float);
int compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 14 +
Eigen::TensorOpCost::MulCost<int>() * num_boxes * 9 +
Eigen::TensorOpCost::MulCost<float>() * num_boxes * 9 +
Eigen::TensorOpCost::AddCost<float>() * num_boxes * 8;
const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles);
const CPUDevice& d = context->eigen_device<CPUDevice>();
d.parallelFor(length, cost, shard_nms);
int per_batch_size = total_size_per_batch;
int max_total_size = static_cast<int>(
std::min(static_cast<int64_t>(std::numeric_limits<int>::max()),
static_cast<int64_t>(max_size_per_class) * num_classes));
if (pad_per_class) {
per_batch_size = std::min(total_size_per_batch, max_total_size);
}
Tensor* valid_detections_t = nullptr;
TensorShape valid_detections_shape({num_batches});
OP_REQUIRES_OK(context, context->allocate_output(3, valid_detections_shape,
&valid_detections_t));
auto valid_detections_flat = valid_detections_t->template flat<int>();
auto shard_result = [&](int begin, int end) {
for (int batch_idx = begin; batch_idx < end; ++batch_idx) {
SelectResultPerBatch(
nmsed_boxes[batch_idx], nmsed_scores[batch_idx],
nmsed_classes[batch_idx], result_candidate_vec[batch_idx],
final_valid_detections, batch_idx, total_size_per_batch,
pad_per_class, max_total_size, clip_boxes, per_batch_size);
valid_detections_flat(batch_idx) = final_valid_detections[batch_idx];
}
};
length = num_batches;
input_bytes =
num_boxes * 10 * sizeof(float) + per_batch_size * 6 * sizeof(float);
output_bytes =
num_boxes * 5 * sizeof(float) + per_batch_size * 6 * sizeof(float);
compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 5 +
Eigen::TensorOpCost::AddCost<float>() * num_boxes * 5;
const Eigen::TensorOpCost cost_result(input_bytes, output_bytes,
compute_cycles);
d.parallelFor(length, cost_result, shard_result);
Tensor* nmsed_boxes_t = nullptr;
TensorShape boxes_shape({num_batches, per_batch_size, 4});
OP_REQUIRES_OK(context,
context->allocate_output(0, boxes_shape, &nmsed_boxes_t));
auto nmsed_boxes_flat = nmsed_boxes_t->template flat<float>();
Tensor* nmsed_scores_t = nullptr;
TensorShape scores_shape({num_batches, per_batch_size});
OP_REQUIRES_OK(context,
context->allocate_output(1, scores_shape, &nmsed_scores_t));
auto nmsed_scores_flat = nmsed_scores_t->template flat<float>();
Tensor* nmsed_classes_t = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(2, scores_shape, &nmsed_classes_t));
auto nmsed_classes_flat = nmsed_classes_t->template flat<float>();
auto shard_copy_result = [&](int begin, int end) {
for (int idx = begin; idx < end; ++idx) {
int batch_idx = idx / per_batch_size;
int j = idx % per_batch_size;
nmsed_scores_flat(idx) = nmsed_scores[batch_idx][j];
nmsed_classes_flat(idx) = nmsed_classes[batch_idx][j];
for (int k = 0; k < 4; ++k) {
nmsed_boxes_flat(idx * 4 + k) = nmsed_boxes[batch_idx][j * 4 + k];
}
}
};
length = num_batches * per_batch_size;
input_bytes = 6 * sizeof(float);
output_bytes = 6 * sizeof(float);
compute_cycles = Eigen::TensorOpCost::AddCost<int>() * 2 +
Eigen::TensorOpCost::MulCost<int>() * 2 +
Eigen::TensorOpCost::DivCost<float>() * 2;
const Eigen::TensorOpCost cost_copy_result(input_bytes, output_bytes,
compute_cycles);
d.parallelFor(length, cost_copy_result, shard_copy_result);
}
template <typename T>
T GetScalar(const Tensor& tensor) {
switch (tensor.dtype()) {
case DT_FLOAT:
return static_cast<T>(tensor.scalar<float>()());
case DT_DOUBLE:
return static_cast<T>(tensor.scalar<double>()());
case DT_BFLOAT16:
return static_cast<T>(tensor.scalar<Eigen::bfloat16>()());
case DT_HALF:
return static_cast<T>(tensor.scalar<Eigen::half>()());
default:
DCHECK(false) << "Unsupported type " << tensor.dtype();
break;
}
return static_cast<T>(0);
}
}
template <typename Device>
class NonMaxSuppressionOp : public OpKernel {
public:
explicit NonMaxSuppressionOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("iou_threshold", &iou_threshold_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
OP_REQUIRES(context, iou_threshold_ >= 0 && iou_threshold_ <= 1,
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<float>(boxes);
const float score_threshold_val = std::numeric_limits<float>::lowest();
const float dummy_soft_nms_sigma = static_cast<float>(0.0);
DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size,
iou_threshold_, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
private:
float iou_threshold_;
};
template <typename Device, typename T>
class NonMaxSuppressionV2Op : public OpKernel {
public:
explicit NonMaxSuppressionV2Op(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const T iou_threshold_val = GetScalar<T>(iou_threshold);
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
const T score_threshold_val = std::numeric_limits<T>::lowest();
const T dummy_soft_nms_sigma = static_cast<T>(0.0);
DoNonMaxSuppressionOp<T>(context, scores, num_boxes, max_output_size,
iou_threshold_val, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
};
template <typename Device, | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class NonMaxSuppressionOpTest : public OpsTestBase {
protected:
void MakeOp(float iou_threshold) {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppression")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("iou_threshold", iou_threshold)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({6, 4}),
{1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectWithNegativeScores) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(
TensorShape({6}), {.9f - 10.0f, .75f - 10.0f, .6f - 10.0f, .95f - 10.0f,
.5f - 10.0f, .3f - 10.0f});
AddInputFromArray<int>(TensorShape({}), {6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestFirstBoxDegenerate) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({3, 4}),
{0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3});
AddInputFromArray<float>(TensorShape({3}), {.9f, .75f, .6f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {0, 1, 2});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectSingleBox) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp(.5);
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionOpTest, TestInvalidIOUThreshold) {
MakeOp(1.2);
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TEST_F(NonMaxSuppressionOpTest, TestEmptyInput) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
class NonMaxSuppressionV2OpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp();
AddInputFromArray<float>(TensorShape({6, 4}),
{1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectSingleBox) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp();
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionV2OpTest, TestInvalidIOUThreshold) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {1.2f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TEST_F(NonMaxSuppressionV2OpTest, TestEmptyInput) {
MakeOp();
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
using NmsValidTypes =
::testing::Types<std::pair<float, float>, std::pair<float, Eigen::half>,
std::pair<Eigen::half, Eigen::half>,
std::pair<Eigen::half, float> >;
template <typename InputAndThresholdTypes>
class NonMaxSuppressionV3OpTest : public OpsTestBase {
protected:
using InputType = typename InputAndThresholdTypes::first_type;
using ThresholdType = typename InputAndThresholdTypes::second_type;
void MakeOp() {
constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value;
constexpr DataType kThresholdDataType =
DataTypeToEnum<ThresholdType>::value;
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV3")
.Input(FakeInput(kInputDataType))
.Input(FakeInput(kInputDataType))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(kThresholdDataType))
.Input(FakeInput(kThresholdDataType))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TYPED_TEST_SUITE(NonMaxSuppressionV3OpTest, NmsValidTypes);
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersWithScoreThreshold) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersWithScoreThresholdZeroScores) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType, float>(TensorShape({6}),
{.1, 0, 0, .3, .2, -5.0});
this->template AddInputFromList<int>(TensorShape({}), {6});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {-3.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}), {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectAtMostTwoBoxesFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {2});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectSingleBox) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1});
this->template AddInputFromList<InputType>(TensorShape({1}), {.9f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromTenIdenticalBoxes) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
int num_boxes = 10;
std::vector<InputType> corners(num_boxes * 4);
std::vector<InputType> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = static_cast<InputType>(0);
corners[i * 4 + 1] = static_cast<InputType>(0);
corners[i * 4 + 2] = static_cast<InputType>(1);
corners[i * 4 + 3] = static_cast<InputType>(1);
scores[i] = static_cast<InputType>(.9);
}
this->template AddInputFromArray<InputType>(TensorShape({num_boxes, 4}),
corners);
this->template AddInputFromArray<InputType>(TensorShape({num_boxes}), scores);
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestInconsistentBoxAndScoreShapes) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({5}),
{.9f, .75f, .6f, .95f, .5f});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
Status s = this->RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestInvalidIOUThreshold) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1});
this->template AddInputFromList<InputType>(TensorShape({1}), {.9f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {1.2f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
Status s = this->RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestEmptyInput) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromArray<InputType>(TensorShape({0, 4}), {});
this->template AddInputFromArray<InputType>(TensorShape({0}), {});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
template <typename InputAndThresholdTypes>
class NonMaxSuppressionV4OpTest : public OpsTestBase {
protected:
using InputType = typename InputAndThresholdTypes::first_type;
using ThresholdType = typename InputAndThresholdTypes::second_type;
void MakeOp() {
constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value;
constexpr DataType kThresholdDataType =
DataTypeToEnum<ThresholdType>::value;
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV4")
.Input(FakeInput(kInputDataType))
.Input(FakeInput(kInputDataType))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(kThresholdDataType))
.Input(FakeInput(kThresholdDataType))
.Attr("pad_to_max_output_size", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TYPED_TEST_SUITE(NonMaxSuppressionV4OpTest, NmsValidTypes);
TYPED_TEST(NonMaxSuppressionV4OpTest, TestSelectFromThreeClustersPadFive) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0)));
Tensor expected_num_valid = test::AsScalar<int>(3);
test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1)));
}
TYPED_TEST(NonMaxSuppressionV4OpTest,
TestSelectFromThreeClustersPadFiveScoreThr) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {6});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f});
TF_ASSERT_OK(this->RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0)));
Tensor expected_num_valid = test::AsScalar<int>(2);
test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1)));
}
class NonMaxSuppressionV5OpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV5")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("pad_to_max_output_size", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersPadFive) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *GetOutput(0));
const auto expected_scores =
test::AsTensor<float>({.95f, .9f, .3f, 0.0f, 0.0f});
test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2);
Tensor expected_num_valid = test::AsScalar<int>(3);
test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2));
}
TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersWithSoftNMS) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {6});
AddInputFromArray<float>(TensorShape({}), {0.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {0.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int>(&expected, {3, 0, 1, 5, 4, 2});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected_scores,
{0.95, 0.9, 0.384, 0.3, 0.256, 0.197});
test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2);
Tensor expected_num_valid = test::AsScalar<int>(6);
test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2));
}
class NonMaxSuppressionWithOverlapsOpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op",
"NonMaxSuppressionWithOverlaps")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void AddIoUInput(const std::vector<float>& boxes) {
ASSERT_EQ((boxes.size() % 4), 0);
size_t num_boxes = boxes.size() / 4;
std::vector<float> iou_overlaps(num_boxes * num_boxes);
auto corner_access = [&boxes](size_t box_idx, size_t corner_idx) {
return boxes[box_idx * 4 + corner_idx];
};
for (size_t i = 0; i < num_boxes; ++ |
1,528 | cpp | tensorflow/tensorflow | resize_bilinear_op | tensorflow/core/kernels/image/resize_bilinear_op.cc | tensorflow/core/kernels/image/resize_bilinear_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_RESIZE_BILINEAR_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_RESIZE_BILINEAR_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct ResizeBilinear {
void operator()(const Device& d, typename TTypes<T, 4>::ConstTensor images,
const float height_scale, const float width_scale,
const bool half_pixel_centers,
typename TTypes<float, 4>::Tensor resized_images);
};
template <typename Device, typename T>
struct ResizeBilinearGrad {
void operator()(const Device& d,
typename TTypes<float, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
const bool half_pixel_centers,
typename TTypes<T, 4>::Tensor output_grad);
};
}
}
#endif
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/kernels/image/resize_bilinear_op.h"
#ifdef __SSE4_1__
#include <xmmintrin.h>
#endif
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/cast_op.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class ResizeBilinearOp : public OpKernel {
public:
explicit ResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor image_data(
context->input(0).tensor<T, 4>());
TTypes<float, 4>::Tensor output_data = st.output->tensor<float, 4>();
functor::ResizeBilinear<Device, T>()(
context->eigen_device<Device>(), image_data, st.height_scale,
st.width_scale, half_pixel_centers_, output_data);
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace {
struct CachedInterpolation {
int64_t lower;
int64_t upper;
float lerp;
};
template <typename Scaler>
inline void compute_interpolation_weights(const Scaler scaler,
const int64_t out_size,
const int64_t in_size,
const float scale,
CachedInterpolation* interpolation) {
interpolation[out_size].lower = 0;
interpolation[out_size].upper = 0;
for (int64_t i = out_size - 1; i >= 0; --i) {
const float in = scaler(i, scale);
const float in_f = std::floor(in);
interpolation[i].lower =
std::max(static_cast<int64_t>(in_f), static_cast<int64_t>(0));
interpolation[i].upper =
std::min(static_cast<int64_t>(std::ceil(in)), in_size - 1);
interpolation[i].lerp = in - in_f;
}
}
inline float compute_lerp(const float top_left, const float top_right,
const float bottom_left, const float bottom_right,
const float x_lerp, const float y_lerp) {
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
return top + (bottom - top) * y_lerp;
}
#ifdef __SSE4_1__
inline __m128 compute_lerp_v(const __m128 top_left, const __m128 top_right,
const __m128 bottom_left,
const __m128 bottom_right, const __m128 x_lerp,
const __m128 y_lerp) {
const __m128 top =
_mm_add_ps(top_left, _mm_mul_ps(_mm_sub_ps(top_right, top_left), x_lerp));
const __m128 bottom = _mm_add_ps(
bottom_left, _mm_mul_ps(_mm_sub_ps(bottom_right, bottom_left), x_lerp));
return _mm_add_ps(top, _mm_mul_ps(_mm_sub_ps(bottom, top), y_lerp));
}
#endif
template <typename T>
void ResizeLineChannels(const T* const ys_input_lower_ptr,
const T* const ys_input_upper_ptr,
const CachedInterpolation* const xs,
const float ys_lerp, const int64_t out_width,
float* out_y, const int channels) {
for (int64_t x = 0; x < out_width; ++x) {
const int64_t xs_lower = xs[x].lower;
const int64_t xs_upper = xs[x].upper;
const float xs_lerp = xs[x].lerp;
for (int c = 0; c < channels; ++c) {
const float top_left(ys_input_lower_ptr[xs_lower + c]);
const float top_right(ys_input_lower_ptr[xs_upper + c]);
const float bottom_left(ys_input_upper_ptr[xs_lower + c]);
const float bottom_right(ys_input_upper_ptr[xs_upper + c]);
out_y[x * channels + c] = compute_lerp(top_left, top_right, bottom_left,
bottom_right, xs_lerp, ys_lerp);
}
}
}
#ifdef __SSE4_1__
template <typename T>
inline __m128 load_3xfloat_v(T* values) {
return _mm_set_ps(0.0f, static_cast<float>(values[2]),
static_cast<float>(values[1]),
static_cast<float>(values[0]));
}
template <>
inline __m128 load_3xfloat_v(float* values) {
return _mm_loadu_ps(values);
}
template <typename T>
void ResizeLine3ChannelsVector(const T* const ys_input_lower_ptr,
const T* const ys_input_upper_ptr,
const CachedInterpolation* const xs,
const float ys_lerp, const int64_t out_width,
float* out_y) {
const __m128 ys_lerp_v = _mm_set1_ps(ys_lerp);
int64_t x = 0;
for (x = 0; x < out_width - 1; ++x) {
const int64_t xs_lower = xs[x].lower;
const int64_t xs_upper = xs[x].upper;
const __m128 xs_lerp_v = _mm_set1_ps(xs[x].lerp);
const __m128 top_left_v = load_3xfloat_v(ys_input_lower_ptr + xs_lower);
const __m128 top_right_v = load_3xfloat_v(ys_input_lower_ptr + xs_upper);
const __m128 bottom_left_v = load_3xfloat_v(ys_input_upper_ptr + xs_lower);
const __m128 bottom_right_v = load_3xfloat_v(ys_input_upper_ptr + xs_upper);
_mm_storeu_ps(out_y + x * 3,
compute_lerp_v(top_left_v, top_right_v, bottom_left_v,
bottom_right_v, xs_lerp_v, ys_lerp_v));
}
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs + out_width - 1,
ys_lerp, 1, out_y + (out_width - 1) * 3, 3);
}
#endif
template <typename T>
void resize_image(
typename TTypes<T, 4>::ConstTensor images, const int batch_size,
const int64_t in_height, const int64_t in_width, const int64_t out_height,
const int64_t out_width, const int channels,
const std::vector<CachedInterpolation>& xs,
const std::vector<CachedInterpolation>& ys,
typename TTypes<float, 4>::Tensor output) TF_ATTRIBUTE_NOINLINE;
template <typename T>
void resize_image(typename TTypes<T, 4>::ConstTensor images,
const int batch_size, const int64_t in_height,
const int64_t in_width, const int64_t out_height,
const int64_t out_width, const int channels,
const std::vector<CachedInterpolation>& xs_vec,
const std::vector<CachedInterpolation>& ys,
typename TTypes<float, 4>::Tensor output) {
const int64_t in_row_size = in_width * channels;
const int64_t in_batch_num_values = in_height * in_row_size;
const int64_t out_row_size = out_width * channels;
const T* input_b_ptr = images.data();
const CachedInterpolation* xs = xs_vec.data();
if (channels == 3) {
float* output_y_ptr = output.data();
for (int b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const T* ys_input_lower_ptr = input_b_ptr + ys[y].lower * in_row_size;
const T* ys_input_upper_ptr = input_b_ptr + ys[y].upper * in_row_size;
#ifdef __SSE4_1__
ResizeLine3ChannelsVector(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr);
#else
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr, 3);
#endif
output_y_ptr += out_row_size;
}
input_b_ptr += in_batch_num_values;
}
} else {
float* output_y_ptr = output.data();
for (int b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const T* ys_input_lower_ptr = input_b_ptr + ys[y].lower * in_row_size;
const T* ys_input_upper_ptr = input_b_ptr + ys[y].upper * in_row_size;
ResizeLineChannels(ys_input_lower_ptr, ys_input_upper_ptr, xs,
ys[y].lerp, out_width, output_y_ptr, channels);
output_y_ptr += out_row_size;
}
input_b_ptr += in_batch_num_values;
}
}
}
template <typename Device, typename T>
struct CastFloatTo {
void operator()(const Device& d, typename TTypes<float>::ConstFlat input,
typename TTypes<T>::Flat output) {
output.device(d) = input.template cast<T>();
}
};
template <typename T>
struct CastFloatTo<GPUDevice, T> {
void operator()(const GPUDevice& d, typename TTypes<float>::ConstFlat input,
typename TTypes<T>::Flat output) {
functor::CastFunctor<GPUDevice, T, float> cast;
cast(d, output, input);
}
};
}
namespace functor {
template <typename T>
struct ResizeBilinear<CPUDevice, T> {
void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor images,
const float height_scale, const float width_scale,
bool half_pixel_centers,
typename TTypes<float, 4>::Tensor output) {
const int batch_size = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
if (out_height == in_height && out_width == in_width) {
output = images.template cast<float>();
return;
}
std::vector<CachedInterpolation> ys(out_height + 1);
std::vector<CachedInterpolation> xs(out_width + 1);
if (half_pixel_centers) {
compute_interpolation_weights(HalfPixelScaler(), out_height, in_height,
height_scale, ys.data());
compute_interpolation_weights(HalfPixelScaler(), out_width, in_width,
width_scale, xs.data());
} else {
compute_interpolation_weights(LegacyScaler(), out_height, in_height,
height_scale, ys.data());
compute_interpolation_weights(LegacyScaler(), out_width, in_width,
width_scale, xs.data());
}
for (int i = 0; i < xs.size(); ++i) {
xs[i].lower *= channels;
xs[i].upper *= channels;
}
resize_image<T>(images, batch_size, in_height, in_width, out_height,
out_width, channels, xs, ys, output);
}
};
}
template <typename Device, typename T>
class ResizeBilinearOpGrad : public OpKernel {
public:
explicit ResizeBilinearOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerGradientState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
TTypes<float, 4>::ConstTensor input_grad =
context->input(0).tensor<float, 4>();
if (!std::is_same<T, Eigen::half>::value &&
!std::is_same<T, Eigen::bfloat16>::value) {
typename TTypes<T, 4>::Tensor output_grad(st.output->tensor<T, 4>());
functor::ResizeBilinearGrad<Device, T>()(
context->eigen_device<Device>(), input_grad, st.height_scale,
st.width_scale, half_pixel_centers_, output_grad);
} else {
Tensor output_grad;
OP_REQUIRES_OK(context, context->allocate_temp(
DT_FLOAT, st.output->shape(), &output_grad));
functor::ResizeBilinearGrad<Device, float>()(
context->eigen_device<Device>(), input_grad, st.height_scale,
st.width_scale, half_pixel_centers_, output_grad.tensor<float, 4>());
const Tensor& output_grad_const = output_grad;
CastFloatTo<Device, T>{}(context->template eigen_device<Device>(),
output_grad_const.template flat<float>(),
st.output->template flat<T>());
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace functor {
template <typename T>
struct ResizeBilinearGrad<CPUDevice, T> {
template <typename Scaler>
void ResizeGradCore(const Scaler& scaler,
typename TTypes<float, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output_grad) {
const Eigen::Index batch = output_grad.dimension(0);
const Eigen::Index original_height = output_grad.dimension(1);
const Eigen::Index original_width = output_grad.dimension(2);
const Eigen::Index channels = output_grad.dimension(3);
const Eigen::Index resized_height = input_grad.dimension(1);
const Eigen::Index resized_width = input_grad.dimension(2);
output_grad.setZero();
for (Eigen::Index b = 0; b < batch; ++b) {
for (Eigen::Index y = 0; y < resized_height; ++y) {
const float in_y = scaler(y, height_scale);
const Eigen::Index top_y_index =
std::max(static_cast<Eigen::Index>(floorf(in_y)),
static_cast<Eigen::Index>(0));
const Eigen::Index bottom_y_index = std::min(
static_cast<Eigen::Index>(ceilf(in_y)), original_height - 1);
const float y_lerp = in_y - floorf(in_y);
const float inverse_y_lerp = (1.0f - y_lerp);
for (Eigen::Index x = 0; x < resized_width; ++x) {
const float in_x = scaler(x, width_scale);
const Eigen::Index left_x_index =
std::max(static_cast<Eigen::Index>(floorf(in_x)),
static_cast<Eigen::Index>(0));
const Eigen::Index right_x_index = std::min(
static_cast<Eigen::Index>(ceilf(in_x)), original_width - 1);
const float x_lerp = in_x - floorf(in_x);
const float inverse_x_lerp = (1.0f - x_lerp);
for (Eigen::Index c = 0; c < channels; ++c) {
output_grad(b, top_y_index, left_x_index, c) +=
T(input_grad(b, y, x, c) * inverse_y_lerp * inverse_x_lerp);
output_grad(b, top_y_index, right_x_index, c) +=
T(input_grad(b, y, x, c) * inverse_y_lerp * x_lerp);
output_grad(b, bottom_y_index, left_x_index, c) +=
T(input_grad(b, y, x, c) * y_lerp * inverse_x_lerp);
output_grad(b, bottom_y_index, right_x_index, c) +=
T(input_grad(b, y, x, c) * y_lerp * x_lerp);
}
}
}
}
}
void operator()(const CPUDevice& d,
typename TTypes<float, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
const bool half_pixel_centers,
typename TTypes<T, 4>::Tensor output_grad) {
if (half_pixel_centers) {
return ResizeGradCore(HalfPixelScaler(), input_grad, height_scale,
width_scale, output_grad);
} else {
return ResizeGradCore(LegacyScaler(), input_grad, height_scale,
width_scale, output_grad);
}
}
};
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBilinear") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBilinearOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBilinearGrad").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
ResizeBilinearOpGrad<CPUDevice, T>);
TF_CALL_half(REGISTER_GRAD_KERNEL);
TF_CALL_float(REGISTER_GRAD_KERNEL);
TF_CALL_double(REGISTER_GRAD_KERNEL);
TF_CALL_bfloat16(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBilinear") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBilinearOp<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBilinearGrad").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
ResizeBilinearOpGrad<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
#endif
} | #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
enum class TestDevice { CPU, GPU };
class ResizeBilinearOpTestBase
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
explicit ResizeBilinearOpTestBase()
: align_corners_(false), half_pixel_centers_(false) {}
void SetUp() override {
if (GetParam() == TestDevice::GPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {}, "/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
const Tensor* SetRandomImageInput(const TensorShape& shape) {
inputs_.clear();
CHECK_EQ(shape.dims(), 4) << "All images must have 4 dimensions.";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(allocator(), DataTypeToEnum<float>::v(), shape);
input->flat<float>().setRandom();
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]),
DataTypeToEnum<float>::v());
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], DataTypeToEnum<float>::v());
inputs_.push_back({nullptr, input});
}
return input;
}
void ResizeBilinearBaseline(TTypes<float, 4>::ConstTensor images,
TTypes<float, 4>::Tensor output) {
const int batch = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
ASSERT_EQ(batch, output.dimension(0));
ASSERT_EQ(channels, output.dimension(3));
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
const float height_scale = in_height / static_cast<float>(out_height);
const float width_scale = in_width / static_cast<float>(out_width);
for (int b = 0; b < batch; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const float in_y =
half_pixel_centers_
? (static_cast<float>(y) + 0.5f) * height_scale - 0.5f
: y * height_scale;
const int64_t top_y_index = std::max(static_cast<int64_t>(floorf(in_y)),
static_cast<int64_t>(0));
const int64_t bottom_y_index =
std::min(static_cast<int64_t>(ceilf(in_y)), in_height - 1);
const float y_lerp = in_y - std::floor(in_y);
for (int64_t x = 0; x < out_width; ++x) {
const float in_x =
half_pixel_centers_
? (static_cast<float>(x) + 0.5f) * width_scale - 0.5f
: x * width_scale;
const int64_t left_x_index = std::max(
static_cast<int64_t>(floorf(in_x)), static_cast<int64_t>(0));
const int64_t right_x_index =
std::min(static_cast<int64_t>(ceilf(in_x)), in_width - 1);
const float x_lerp = in_x - std::floor(in_x);
for (int c = 0; c < channels; ++c) {
const float top_left = images(b, top_y_index, left_x_index, c);
const float top_right = images(b, top_y_index, right_x_index, c);
const float bottom_left =
images(b, bottom_y_index, left_x_index, c);
const float bottom_right =
images(b, bottom_y_index, right_x_index, c);
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
output(b, y, x, c) = top + (bottom - top) * y_lerp;
}
}
}
}
}
void TestResize(int batch_size, int input_width, int input_height,
int channels, int output_width, int output_height) {
const TensorShape shape({batch_size, input_width, input_height, channels});
const Tensor* input = SetRandomImageInput(shape);
AddInputFromArray<int32>(TensorShape({2}), {output_width, output_height});
TF_ASSERT_OK(RunOpKernel());
std::unique_ptr<Tensor> expected(new Tensor(
allocator(), DataTypeToEnum<float>::v(),
TensorShape({batch_size, output_width, output_height, channels})));
ResizeBilinearBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
test::ExpectClose(*expected, *GetOutput(0), 4e-5);
}
void RunManyRandomTests(int channels) {
for (int batch_size : {1, 2, 5}) {
for (int in_w : {2, 4, 7, 20, 165}) {
for (int in_h : {1, 3, 5, 8, 100, 233}) {
for (int target_height : {1, 2, 3, 50, 113}) {
for (int target_width : {target_height, target_height / 2 + 1}) {
TestResize(batch_size, in_w, in_h, channels, target_width,
target_height);
}
}
}
}
}
}
bool align_corners_;
bool half_pixel_centers_;
};
class ResizeBilinearOpTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearOpTest() {}
};
class ResizeBilinearHalfPixelCentersOpTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearHalfPixelCentersOpTest() { half_pixel_centers_ = true; }
};
class ResizeBilinearOpAlignCornersTest : public ResizeBilinearOpTestBase {
public:
ResizeBilinearOpAlignCornersTest() { align_corners_ = true; }
};
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes1Channel) {
RunManyRandomTests(1);
}
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes3Channels) {
RunManyRandomTests(3);
}
TEST_P(ResizeBilinearOpTest, TestResizeRandomDataSeveralInputsSizes4Channels) {
RunManyRandomTests(4);
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinearRandom2x2To1x1) {
const Tensor* input = SetRandomImageInput(TensorShape({1, 2, 2, 1}));
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
std::unique_ptr<Tensor> expected(new Tensor(
allocator(), DataTypeToEnum<float>::v(), TensorShape({1, 1, 1, 1})));
ResizeBilinearBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
EXPECT_EQ(input->flat<float>()(0), output->flat<float>()(0));
test::ExpectClose(*expected, *output);
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 5.0f / 3, 2,
7.0f / 3, 3, 10.0f / 3,
3, 11.0f / 3, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2,
2, 2.5, 3,
3, 3.5, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 2.5,
5.5, 7});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear3x3To4x4) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1.75, 2.5, 3,
3.25, 4, 4.75, 5.25,
5.5, 6.25, 7, 7.5,
7, 7.75, 8.5, 9});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 7.0f/3, 11.0f/3,
19.0f/3, 23.0f/3, 27.0f/3,
35.0f/3, 39.0f/3, 43.0f/3});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearHalfPixelCentersOpTest, TestDownsamples) {
TestResize(4, 298, 297, 3, 61, 71);
}
TEST_P(ResizeBilinearHalfPixelCentersOpTest, TestUpsamples) {
TestResize(4, 61, 71, 3, 298, 297);
}
TEST_P(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{ 1, 2.5, 4,
7, 8.5, 10,
13, 14.5, 16});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To3x3Batch2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 1}), {1, 2, 3, 4, 1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 5.0f/3, 2, 7.0f/3, 3, 10.0f/3, 3, 11.0f/3, 4,
1, 5.0f/3, 2, 7.0f/3, 3, 10.0f/3, 3, 11.0f/3, 4
});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2x2To3x3x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 2}),
{1, -1, 2, -2, 3, -3, 4, -4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 2}));
test::FillValues<float>(&expected,
{
1, -1,
5.0f/3, -5.0f/3,
2, -2,
7.0f/3, -7.0f/3,
3, -3,
10.0f/3, -10.0f/3,
3, -3,
11.0f/3, -11.0f/3,
4, -4
});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, TestBilinear2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1.5, 2, 2,
2, 2.5, 3, 3,
3, 3.5, 4, 4,
3, 3.5, 4, 4});
test::ExpectClose(expected, *GetOutput(0));
}
TEST_P(ResizeBilinearOpTest, Test1_1c) { TestResize(1, 183, 299, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test1_3c) { TestResize(1, 183, 299, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test2_1c) { TestResize(1, 141, 186, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test2_3c) { TestResize(1, 141, 186, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test3_1c) { TestResize(1, 749, 603, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test3_3c) { TestResize(1, 749, 603, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test4_1c) { TestResize(1, 299, 299, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test4_3c) { TestResize(1, 299, 299, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test5_1c) { TestResize(1, 298, 297, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test5_3c) { TestResize(1, 298, 297, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test6_1c) { TestResize(1, 304, 303, 1, 299, 299); }
TEST_P(ResizeBilinearOpTest, Test6_3c) { TestResize(1, 304, 303, 3, 299, 299); }
TEST_P(ResizeBilinearOpTest, TestInvalidOutputSize) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "output dimensions must be positive"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidInputShape) {
AddInputFromArray<float>(TensorShape({2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "input must be 4-dimensional"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidSizeDim) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {4, 4});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "shape_t must be 1-dimensional"))
<< s;
}
TEST_P(ResizeBilinearOpTest, TestInvalidSizeElements) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {4, 4, 1});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), "shape_t must have two elements"))
<< s;
}
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestCpu, ResizeBilinearOpTest,
::testing::Values(TestDevice::CPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestCpu,
ResizeBilinearHalfPixelCentersOpTest,
::testing::Values(TestDevice::CPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestCpu,
ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::CPU));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTestGpu, ResizeBilinearOpTest,
::testing::Values(TestDevice::GPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearHalfPixelCentersOpTestGpu,
ResizeBilinearHalfPixelCentersOpTest,
::testing::Values(TestDevice::GPU));
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpAlignCornersTestGpu,
ResizeBilinearOpAlignCornersTest,
::testing::Values(TestDevice::GPU));
#endif
class ResizeBM : public ResizeBilinearOpTest {
public:
void TestBody() override {}
void SetUpBenchmark(int input_width, int input_height, int num_channels,
int output_width, int output_height) {
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const TensorShape shape(
{ 1, input_width, input_height, num_channels});
SetRandomImageInput(shape);
AddInputFromArray<int32>(TensorShape({2}), {output_width, output_height});
}
using ResizeBilinearOpTest::RunOpKernel;
};
#ifdef PLATFORM_GOOGLE
void BM_Resize(benchmark::State& state) {
ResizeBM bench;
bench.SetUpBenchmark(640, 480, 3, 1024, 768);
for (const auto _ : state) {
CHECK(bench.RunOpKernel().ok());
}
}
BENCHMARK(BM_Resize);
#endif
} |
1,529 | cpp | tensorflow/tensorflow | resize_nearest_neighbor_op | tensorflow/core/kernels/image/resize_nearest_neighbor_op.cc | tensorflow/core/kernels/image/resize_nearest_neighbor_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_RESIZE_NEAREST_NEIGHBOR_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_RESIZE_NEAREST_NEIGHBOR_OP_H_
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T, bool half_pixel_centers,
bool align_corners>
struct ResizeNearestNeighbor {
bool operator()(const Device& d, typename TTypes<T, 4>::ConstTensor input,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output);
};
template <typename Device, typename T, bool half_pixel_centers,
bool align_corners>
struct ResizeNearestNeighborGrad {
bool operator()(const Device& d,
typename TTypes<T, 4>::ConstTensor input_grad,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output_grad);
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/resize_nearest_neighbor_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class ResizeNearestNeighborOp : public OpKernel {
public:
explicit ResizeNearestNeighborOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
OP_REQUIRES(context, st.in_height < (1 << 24) && st.in_width < (1 << 24),
errors::InvalidArgument("nearest neighbor requires max height "
"& width of 2^24"));
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor input_data(
context->input(0).tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>());
bool status;
if (half_pixel_centers_) {
if (align_corners_) {
status = functor::ResizeNearestNeighbor<Device, T,
true,
true>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
} else {
status = functor::ResizeNearestNeighbor<Device, T,
true,
false>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
}
} else {
if (align_corners_) {
status = functor::ResizeNearestNeighbor<Device, T,
false,
true>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
} else {
status = functor::ResizeNearestNeighbor<Device, T,
false,
false>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
}
}
if (!status) {
context->SetStatus(
errors::Internal("Failed launching ResizeNearestNeighbor"));
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
template <bool half_pixel_centers>
struct BoolToScaler {};
struct HalfPixelScalerForNN {
inline float operator()(const int x, const float scale) const {
return (static_cast<float>(x) + 0.5f) * scale;
}
};
template <>
struct BoolToScaler<true> {
typedef HalfPixelScalerForNN Scaler;
};
template <>
struct BoolToScaler<false> {
typedef LegacyScaler Scaler;
};
namespace functor {
template <typename T, bool half_pixel_centers, bool align_corners>
struct ResizeNearestNeighbor<CPUDevice, T, half_pixel_centers, align_corners> {
bool operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output) {
typename BoolToScaler<half_pixel_centers>::Scaler scaler;
const Eigen::Index batch_size = input.dimension(0);
const Eigen::Index in_height = input.dimension(1);
const Eigen::Index in_width = input.dimension(2);
const Eigen::Index channels = input.dimension(3);
const Eigen::Index out_height = output.dimension(1);
const Eigen::Index out_width = output.dimension(2);
#ifdef PLATFORM_GOOGLE
for (Eigen::Index b = 0; b < batch_size; ++b) {
for (Eigen::Index y = 0; y < out_height; ++y) {
Eigen::Index in_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
in_height - 1);
if (half_pixel_centers) {
in_y = std::max(static_cast<Eigen::Index>(0), in_y);
}
for (Eigen::Index x = 0; x < out_width; ++x) {
Eigen::Index in_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
in_width - 1);
if (half_pixel_centers) {
in_x = std::max(static_cast<Eigen::Index>(0), in_x);
}
std::copy_n(&input(b, in_y, in_x, 0), channels, &output(b, y, x, 0));
}
}
}
#else
auto ParallelResize = [&](Eigen::Index start, Eigen::Index end) {
for (Eigen::Index b = start; b < end; ++b) {
Eigen::Index x = b % out_width;
Eigen::Index y = (b / out_width) % out_height;
Eigen::Index bs = (b / out_width) / out_height;
Eigen::Index in_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
in_height - 1);
if (half_pixel_centers) {
in_y = std::max(static_cast<Eigen::Index>(0), in_y);
}
Eigen::Index in_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
in_width - 1);
if (half_pixel_centers) {
in_x = std::max(static_cast<Eigen::Index>(0), in_x);
}
std::copy_n(&input(bs, in_y, in_x, 0), channels, &output(bs, y, x, 0));
}
};
Eigen::Index N = batch_size * out_height * out_width;
const int input_bytes = channels * sizeof(T);
const int output_bytes = channels * sizeof(T);
const int compute_cycles = (Eigen::TensorOpCost::ModCost<T>() * 2 +
Eigen::TensorOpCost::DivCost<T>() * 3 +
Eigen::TensorOpCost::AddCost<T>() * 2 +
Eigen::TensorOpCost::MulCost<T>() * 2);
const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles);
d.parallelFor(N, cost, ParallelResize);
#endif
return true;
}
};
}
template <typename Device, typename T>
class ResizeNearestNeighborOpGrad : public OpKernel {
public:
explicit ResizeNearestNeighborOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
const Tensor& shape_t = context->input(1);
OP_REQUIRES(context, shape_t.dims() == 1,
errors::InvalidArgument("shape_t must be 1-dimensional",
shape_t.shape().DebugString()));
OP_REQUIRES(context, shape_t.NumElements() == 2,
errors::InvalidArgument("shape_t must have two elements",
shape_t.shape().DebugString()));
auto sizes = shape_t.vec<int32>();
OP_REQUIRES(context, sizes(0) > 0 && sizes(1) > 0,
errors::InvalidArgument("shape_t's elements must be positive"));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"A deterministic GPU implementation of ResizeNearestNeighborGrad"
" is not currently available."));
}
const int64_t batch_size = input.dim_size(0);
const int64_t in_height = input.dim_size(1);
const int64_t in_width = input.dim_size(2);
const int64_t channels = input.dim_size(3);
const int64_t out_height = sizes(0);
const int64_t out_width = sizes(1);
Tensor* output = nullptr;
TensorShape shape;
OP_REQUIRES_OK(context,
TensorShape::BuildTensorShape(
{batch_size, out_height, out_width, channels}, &shape));
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output));
if (output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor input_data(input.tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(output->tensor<T, 4>());
const float height_scale =
CalculateResizeScale(out_height, in_height, align_corners_);
const float width_scale =
CalculateResizeScale(out_width, in_width, align_corners_);
bool status;
if (half_pixel_centers_) {
if (align_corners_) {
status = functor::ResizeNearestNeighborGrad<Device, T,
true,
true>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
} else {
status = functor::ResizeNearestNeighborGrad<Device, T,
true,
false>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
}
} else {
if (align_corners_) {
status =
functor::ResizeNearestNeighborGrad<Device, T,
false,
true>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
} else {
status =
functor::ResizeNearestNeighborGrad<Device, T,
false,
false>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
}
}
if (!status) {
context->SetStatus(
errors::Internal("Failed launching ResizeNearestNeighborGrad"));
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace functor {
template <typename T, bool half_pixel_centers, bool align_corners>
struct ResizeNearestNeighborGrad<CPUDevice, T, half_pixel_centers,
align_corners> {
bool operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output) {
typename BoolToScaler<half_pixel_centers>::Scaler scaler;
const Eigen::Index batch_size = input.dimension(0);
const Eigen::Index in_height = input.dimension(1);
const Eigen::Index in_width = input.dimension(2);
const Eigen::Index channels = input.dimension(3);
const Eigen::Index out_height = output.dimension(1);
const Eigen::Index out_width = output.dimension(2);
output.setZero();
for (Eigen::Index y = 0; y < in_height; ++y) {
const Eigen::Index out_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
out_height - 1);
for (Eigen::Index x = 0; x < in_width; ++x) {
const Eigen::Index out_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
out_width - 1);
for (Eigen::Index b = 0; b < batch_size; ++b) {
for (Eigen::Index c = 0; c < channels; ++c) {
output(b, out_y, out_x, c) += input(b, y, x, c);
}
}
}
}
return true;
}
};
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighbor") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighborGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOpGrad<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighbor") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOp<GPUDevice, T>); \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighborGrad") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOpGrad<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#endif
} | #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
enum class TestDevice { kCPU, kGPU };
class ResizeNearestNeighborOpTestBase
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
explicit ResizeNearestNeighborOpTestBase(bool half_pixel_centers)
: align_corners_(false), half_pixel_centers_(half_pixel_centers) {}
void SetUp() override {
if (GetParam() == TestDevice::kGPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {},
"/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_nn_op", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
bool align_corners_;
bool half_pixel_centers_;
};
class ResizeNearestNeighborOpTest : public ResizeNearestNeighborOpTestBase {
protected:
ResizeNearestNeighborOpTest() : ResizeNearestNeighborOpTestBase(false) {}
};
class ResizeNearestNeighborHalfPixelCentersOpTest
: public ResizeNearestNeighborOpTestBase {
protected:
ResizeNearestNeighborHalfPixelCentersOpTest()
: ResizeNearestNeighborOpTestBase(true) {}
};
class ResizeNearestNeighborOpAlignCornersTest
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
ResizeNearestNeighborOpAlignCornersTest() : align_corners_(true) {}
void SetUp() override {
if (GetParam() == TestDevice::kGPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {},
"/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_nn_op", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
bool align_corners_;
};
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearest2x2AlignCornersTo1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1, 2,
1, 1, 2,
3, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestAlignCorners2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestAlignCorners3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To2x5) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {2, 5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 5, 1}));
test::FillValues<float>(&expected,
{1, 1, 1, 2, 2,
3, 3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearestNeighbor4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 3,
5, 6, 7,
9, 10, 11});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestNeighborAlignCorners4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{ 1, 3, 4,
9, 11, 12,
13, 15, 16});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To5x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {5, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 5, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
1, 2,
1, 2,
3, 4,
3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2,
1, 1, 2, 2,
3, 3, 4, 4,
3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2x2x2To2x3x3x2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 2}),
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 2}));
test::FillValues<float>(&expected,
{1, 1, 1,
1, 2, 2,
1, 1, 1,
1, 2, 2,
3, 3, 3,
3, 4, 4,
5, 5, 5,
5, 6, 6,
5, 5, 5,
5, 6, 6,
7, 7, 7,
7, 8, 8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest5x2To2x2) {
AddInputFromArray<float>(TensorShape({1, 2, 5, 1}),
{1, 2, 3, 4, 5, 1, 2, 3, 4, 5});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected, {2, 4, 2, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To2x5) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {2, 5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 5, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2, 2,
3, 3, 4, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest,
TestNearestNeighbor4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 3, 4,
9, 11, 12,
13, 15, 16});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To5x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {5, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 5, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
1, 2,
3, 4,
3, 4,
3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2,
1, 1, 2, 2,
3, 3, 4, 4,
3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest,
TestNearest2x2x2x2To2x3x3x2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 2}),
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 2}));
test::FillValues<float>(&expected,
{1, 1, 2, 2, 2, 2,
3, 3, 4, 4, 4, 4,
3, 3, 4, 4, 4, 4,
5, 5, 6, 6, 6, 6,
7, 7, 8, 8, 8, 8,
7, 7, 8, 8, 8, 8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpTestCpu,
ResizeNearestNeighborOpTest,
::testing::Values(TestDevice::kCPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborHalfPixelCentersOpTestCpu,
ResizeNearestNeighborHalfPixelCentersOpTest,
::testing::Values(TestDevice::kCPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpAlignCornersTestCpu,
ResizeNearestNeighborOpAlignCornersTest,
::testing::Values(TestDevice::kCPU));
#if GOOGLE_CUDA
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpTestGpu,
ResizeNearestNeighborOpTest,
::testing::Values(TestDevice::kGPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborHalfPixelCentersOpTestGpu,
ResizeNearestNeighborHalfPixelCentersOpTest,
::testing::Values(TestDevice::kGPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpAlignCornersTestGpu,
ResizeNearestNeighborOpAlignCornersTest,
::testing::Values(TestDevice::kGPU));
#endif
} |
1,530 | cpp | tensorflow/tensorflow | scale_and_translate_op | tensorflow/core/kernels/image/scale_and_translate_op.cc | tensorflow/core/kernels/image/scale_and_translate_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_SCALE_AND_TRANSLATE_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_SCALE_AND_TRANSLATE_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/image/sampling_kernels.h"
namespace tensorflow {
namespace functor {
struct Spans {
int span_size;
Tensor starts;
Tensor weights;
};
template <typename Device, typename T>
struct GatherSpans {
void operator()(const Device& d, int row_span_size,
typename TTypes<int32, 1>::ConstTensor row_starts,
typename TTypes<float, 1>::ConstTensor row_weights,
int col_span_size,
typename TTypes<int32, 1>::ConstTensor col_starts,
typename TTypes<float, 1>::ConstTensor col_weights,
typename TTypes<T, 4>::ConstTensor input_images,
typename TTypes<float, 4>::Tensor intermediate_buffer,
typename TTypes<float, 4>::Tensor output_images);
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/scale_and_translate_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/image/sampling_kernels.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
using strings::Printf;
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace functor {
namespace {
template <typename T>
inline const T& Clamp(const T& low, const T& high, const T& value) {
if (high < value) return high;
if (value < low) return low;
return value;
}
template <typename Kernel>
Status ComputeSpansCore(OpKernelContext* context, const Kernel& kernel,
const int64_t output_size, const int64_t input_size,
const float scale, const float translate,
const bool antialias, Spans* spans) {
const float inv_scale = 1.0 / scale;
const float inv_translate = -inv_scale * translate;
const float kernel_scale = antialias ? std::max(inv_scale, 1.0f) : 1.0f;
spans->span_size = std::min(
2 * static_cast<int>(std::ceil(kernel.Radius() * kernel_scale)) + 1,
static_cast<int>(input_size));
AllocatorAttributes alloc_attr;
alloc_attr.set_on_host(true);
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_INT32, tensorflow::TensorShape({output_size}),
&spans->starts, alloc_attr));
auto starts_vec = spans->starts.vec<int32>();
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_FLOAT,
tensorflow::TensorShape({spans->span_size * output_size}),
&spans->weights, alloc_attr));
auto weights_vec = spans->weights.vec<float>();
weights_vec.setZero();
const float one_over_kernel_scale = 1.0f / kernel_scale;
int max_span_size = 0;
std::vector<float> temp_weights;
for (int x = 0; x < output_size; ++x) {
const float col_f = x + 0.5f;
const float sample_f = col_f * inv_scale + inv_translate;
if (sample_f < 0 || sample_f > input_size) {
starts_vec(x) = 0;
continue;
}
int64_t span_start =
std::ceil(sample_f - kernel.Radius() * kernel_scale - 0.5f);
int64_t span_end =
std::floor(sample_f + kernel.Radius() * kernel_scale - 0.5f);
span_start = Clamp(static_cast<int64_t>(0), input_size - 1, span_start);
span_end = Clamp(static_cast<int64_t>(0), input_size - 1, span_end) + 1;
const int this_span_size = span_end - span_start;
if (this_span_size > spans->span_size) {
return errors::Internal(Printf("Span is too large: %d vs %d.",
this_span_size, spans->span_size));
}
float total_weight_sum = 0.0f;
temp_weights.clear();
for (int source = span_start; source < span_end; ++source) {
float kernel_pos = static_cast<float>(source) + 0.5f - sample_f;
float weight = kernel(std::abs(kernel_pos * one_over_kernel_scale));
total_weight_sum += weight;
temp_weights.push_back(weight);
}
max_span_size = std::max(max_span_size, this_span_size);
if (std::abs(total_weight_sum) >=
1000.0f * std::numeric_limits<float>::min()) {
float one_over_total_weight_sum = 1.0f / total_weight_sum;
int out_index = spans->span_size * x;
for (float weight : temp_weights) {
weights_vec(out_index) = weight * one_over_total_weight_sum;
++out_index;
}
}
starts_vec(x) = span_start;
}
return absl::OkStatus();
}
Status ComputeGradSpansCore(OpKernelContext* context, const Spans& spans,
const int64_t forward_output_size,
const int64_t forward_input_size,
Spans* grad_spans) {
struct GradComponent {
int index;
float weight;
};
std::vector<std::vector<GradComponent>> grad_components(forward_input_size);
auto weights_vec = spans.weights.vec<float>();
auto starts_vec = spans.starts.vec<int32>();
for (int output_index = 0; output_index < forward_output_size;
++output_index) {
int input_index = starts_vec(output_index);
for (int j = 0; j < spans.span_size; ++j, ++input_index) {
const float weight = weights_vec(output_index * spans.span_size + j);
if (weight != 0.0f && input_index < forward_input_size) {
grad_components[input_index].push_back(
GradComponent{output_index, weight});
}
}
}
int max_size = 0;
for (std::vector<GradComponent>& gc : grad_components) {
if (!gc.empty()) {
std::sort(gc.begin(), gc.end(),
[](const GradComponent& x1, const GradComponent& x2) {
return x1.index < x2.index;
});
max_size = std::max(gc.back().index - gc.front().index + 1, max_size);
}
}
grad_spans->span_size = max_size;
AllocatorAttributes alloc_attr;
alloc_attr.set_on_host(true);
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_INT32, tensorflow::TensorShape({forward_input_size}),
&grad_spans->starts, alloc_attr));
auto grad_starts_vec = grad_spans->starts.vec<int32>();
TF_RETURN_IF_ERROR(context->allocate_temp(
tensorflow::DT_FLOAT,
tensorflow::TensorShape({grad_spans->span_size * forward_input_size}),
&grad_spans->weights, alloc_attr));
auto grad_weights_vec = grad_spans->weights.vec<float>();
grad_weights_vec.setZero();
for (int input_index = 0; input_index < forward_input_size; ++input_index) {
if (!grad_components[input_index].empty()) {
const int start_span = grad_components[input_index].front().index;
grad_starts_vec(input_index) = start_span;
for (const GradComponent& gc : grad_components[input_index]) {
grad_weights_vec(input_index * grad_spans->span_size + gc.index -
start_span) += gc.weight;
}
} else {
grad_starts_vec(input_index) = 0;
}
}
return absl::OkStatus();
}
Status ComputeSpans(OpKernelContext* context,
const functor::SamplingKernelType kernel_type,
const int64_t output_size, const int64_t input_size,
const float scale, const float translate,
const bool antialias, Spans* spans) {
switch (kernel_type) {
case functor::Lanczos1Kernel: {
return ComputeSpansCore(context, CreateLanczos1Kernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::Lanczos3Kernel: {
return ComputeSpansCore(context, CreateLanczos3Kernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::Lanczos5Kernel: {
return ComputeSpansCore(context, CreateLanczos5Kernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::GaussianKernel: {
return ComputeSpansCore(context, CreateGaussianKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::BoxKernel: {
return ComputeSpansCore(context, CreateBoxKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::TriangleKernel: {
return ComputeSpansCore(context, CreateTriangleKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::KeysCubicKernel: {
return ComputeSpansCore(context, CreateKeysCubicKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
case functor::MitchellCubicKernel: {
return ComputeSpansCore(context, CreateMitchellCubicKernel(), output_size,
input_size, scale, translate, antialias, spans);
}
default:
return errors::InvalidArgument(Printf("Unrecognized kernel type: %d",
static_cast<int>(kernel_type)));
}
return absl::OkStatus();
}
Status ComputeGradSpans(OpKernelContext* context,
const functor::SamplingKernelType kernel_type,
const int64_t forward_output_size,
const int64_t forward_input_size, const float scale,
const float translate, const bool antialias,
Spans* grad_spans) {
Spans spans;
TF_RETURN_IF_ERROR(ComputeSpans(context, kernel_type, forward_output_size,
forward_input_size, scale, translate,
antialias, &spans));
return ComputeGradSpansCore(context, spans, forward_output_size,
forward_input_size, grad_spans);
}
void GetValues(OpKernelContext* context, int input_index, float* v_1,
float* v_2) {
const Tensor& t = context->input(input_index);
OP_REQUIRES(context, t.dims() == 1,
errors::InvalidArgument("t must be 1-dimensional",
t.shape().DebugString()));
OP_REQUIRES(context, t.NumElements() == 2,
errors::InvalidArgument("t must have two elements",
t.shape().DebugString()));
auto data_vec = t.flat<float>().data();
*v_1 = data_vec[0];
*v_2 = data_vec[1];
}
template <typename Device, typename T>
class ScaleAndTranslateOp : public OpKernel {
public:
explicit ScaleAndTranslateOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("antialias", &antialias_));
string kernel_type_str;
OP_REQUIRES_OK(context, context->GetAttr("kernel_type", &kernel_type_str));
kernel_type_ = functor::SamplingKernelTypeFromString(kernel_type_str);
OP_REQUIRES(context, kernel_type_ != functor::SamplingKernelTypeEnd,
errors::InvalidArgument("Unrecognized kernel type: " +
kernel_type_str));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
const Tensor& output_shape_t = context->input(1);
OP_REQUIRES(context, output_shape_t.dims() == 1,
errors::InvalidArgument("output_shape_t must be 1-dimensional",
output_shape_t.shape().DebugString()));
OP_REQUIRES(context, output_shape_t.NumElements() == 2,
errors::InvalidArgument("output_shape_t must have two elements",
output_shape_t.shape().DebugString()));
auto output_shape_vec = output_shape_t.vec<int32>();
const int64_t output_height = internal::SubtleMustCopy(output_shape_vec(0));
const int64_t output_width = internal::SubtleMustCopy(output_shape_vec(1));
OP_REQUIRES(
context,
FastBoundsCheck(input.dim_size(1), std::numeric_limits<int32>::max()) &&
FastBoundsCheck(input.dim_size(2),
std::numeric_limits<int32>::max()),
errors::InvalidArgument("input sizes must be between 0 and max int32"));
const int64_t batch_size = input.dim_size(0);
const int64_t input_height = input.dim_size(1);
const int64_t input_width = input.dim_size(2);
const int64_t channels = input.dim_size(3);
OP_REQUIRES(context, output_height > 0 && output_width > 0,
errors::InvalidArgument("output dimensions must be positive"));
OP_REQUIRES(
context, channels > 0,
errors::InvalidArgument("image must have at least one channel"));
OP_REQUIRES(
context, input.dim_size(1) > 0 && input.dim_size(2) > 0,
errors::InvalidArgument("input image must be of non-zero size"));
float row_scale, col_scale;
GetValues(context, 2, &row_scale, &col_scale);
OP_REQUIRES(context, row_scale > 0 && col_scale > 0,
errors::InvalidArgument("Scale must be greater than zero."));
float row_translation, col_translation;
GetValues(context, 3, &row_translation, &col_translation);
Tensor* output = nullptr;
TensorShape output_shape;
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(input.dim_size(0)));
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(output_height));
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(output_width));
OP_REQUIRES_OK(context, output_shape.AddDimWithStatus(input.dim_size(3)));
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
if (!context->status().ok()) return;
if (output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor image_data(input.tensor<T, 4>());
TTypes<float, 4>::Tensor output_data = output->tensor<float, 4>();
functor::Spans col_spans;
OP_REQUIRES_OK(
context,
ComputeSpans(context, kernel_type_, output_width, input_width,
col_scale, col_translation, antialias_, &col_spans));
functor::Spans row_spans;
OP_REQUIRES_OK(
context,
ComputeSpans(context, kernel_type_, output_height, input_height,
row_scale, row_translation, antialias_, &row_spans));
Tensor intermediate_t;
OP_REQUIRES_OK(
context, context->allocate_temp(DT_FLOAT,
TensorShape({batch_size, output_height,
input_width, channels}),
&intermediate_t));
TTypes<float, 4>::Tensor intermediate_data =
intermediate_t.tensor<float, 4>();
const functor::Spans& const_row_spans = row_spans;
typename TTypes<int32, 1>::ConstTensor row_starts(
const_row_spans.starts.tensor<int32, 1>());
typename TTypes<float, 1>::ConstTensor row_weights(
const_row_spans.weights.tensor<float, 1>());
const functor::Spans& const_col_spans = col_spans;
typename TTypes<int32, 1>::ConstTensor col_starts(
const_col_spans.starts.tensor<int32, 1>());
typename TTypes<float, 1>::ConstTensor col_weights(
const_col_spans.weights.tensor<float, 1>());
functor::GatherSpans<Device, T>()(
context->eigen_device<Device>(), row_spans.span_size, row_starts,
row_weights, col_spans.span_size, col_starts, col_weights, image_data,
intermediate_data, output_data);
}
functor::SamplingKernelType kernel_type_;
bool antialias_;
};
template <typename Device, typename T>
class ScaleAndTranslateGradOp : public OpKernel {
public:
explicit ScaleAndTranslateGradOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("antialias", &antialias_));
string kernel_type_str;
OP_REQUIRES_OK(context, context->GetAttr("kernel_type", &kernel_type_str));
kernel_type_ = functor::SamplingKernelTypeFromString(kernel_type_str);
OP_REQUIRES(context, kernel_type_ != functor::SamplingKernelTypeEnd,
errors::InvalidArgument("Unrecognized kernel type: " +
kernel_type_str));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& original_image = context->input(1);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input_grad must be 4-dimensional",
input.shape().DebugString()));
OP_REQUIRES(context, input.dtype() == DT_FLOAT,
errors::InvalidArgument("input_grad must be of type float",
DataTypeString(input.dtype())));
OP_REQUIRES(context, original_image.dims() == 4,
errors::InvalidArgument("original_image must be 4-dimensional",
original_image.shape().DebugString()));
const int64_t batch_size = input.dim_size(0);
const int64_t channels = input.dim_size(3);
const int64_t forward_input_height = original_image.dim_size(1);
const int64_t forward_input_width = original_image.dim_size(2);
OP_REQUIRES(context,
FastBoundsCheck(forward_input_height,
std::numeric_limits<int32>::max()) &&
FastBoundsCheck(forward_input_width,
std::numeric_limits<int32>::max()),
errors::InvalidArgument(
"original sizes must be between 0 and max int32"));
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
0,
TensorShape({batch_size, forward_input_height,
forward_input_width, channels}),
&output));
float row_scale, col_scale;
GetValues(context, 2, &row_scale, &col_scale);
OP_REQUIRES(context, row_scale > 0 && col_scale > 0,
errors::InvalidArgument("Scale must be greater than zero."));
float row_translation, col_translation;
GetValues(context, 3, &row_translation, &col_translation);
if (!context->status().ok()) return;
TTypes<float, 4>::ConstTensor input_grad = input.tensor<float, 4>();
typename TTypes<T, 4>::Tensor output_grad(output->tensor<T, 4>());
const int64_t forward_output_height = input_grad.dimension(1);
const int64_t forward_output_width = input_grad.dimension(2);
functor::Spans col_spans;
OP_REQUIRES_OK(context,
ComputeGradSpans(context, kernel_type_, forward_output_width,
forward_input_width, col_scale,
col_translation, antialias_, &col_spans));
functor::Spans row_spans;
OP_REQUIRES_OK(
context, ComputeGradSpans(context, kernel_type_, forward_output_height,
forward_input_height, row_scale,
row_translation, antialias_, &row_spans));
Tensor intermediate_t;
OP_REQUIRES_OK(context, context->allocate_temp(
DT_FLOAT,
TensorShape({batch_size, forward_input_height,
forward_output_width, channels}),
&intermediate_t));
TTypes<float, 4>::Tensor intermediate_data =
intermediate_t.tensor<float, 4>();
const functor::Spans& const_row_spans = row_spans;
typename TTypes<int32, 1>::ConstTensor row_starts =
const_row_spans.starts.tensor<int32, 1>();
typename TTypes<float, 1>::ConstTensor row_weights(
const_row_spans.weights.tensor<float, 1>());
const functor::Spans& const_col_spans = col_spans;
typename TTypes<int32, 1>::ConstTensor col_starts(
const_col_spans.starts.tensor<int32, 1>());
typename TTypes<float, 1>::ConstTensor col_weights(
const_col_spans.weights.tensor<float, 1>());
functor::GatherSpans<Device, T>()(
context->eigen_device<Device>(), row_spans.span_size, row_starts,
row_weights, col_spans.span_size, col_starts, col_weights, input_grad,
intermediate_data, output_grad);
}
functor::SamplingKernelType kernel_type_;
bool antialias_;
};
template <typename T>
void GatherColumns(int span_size, const int32* starts, const float* weights,
const T* image, const int64_t input_height,
const int64_t input_width, const int64_t output_height,
const int64_t output_width, const int channels,
float* output) {
const int64_t in_row_size = input_width * channels;
const int64_t out_row_size = output_width * channels;
for (int y = 0; y < output_height; ++y) {
const T* input_row_start = image + in_row_size * y;
float* out_pix = output + out_row_size * y;
for (int x = 0; x < output_width; ++x, out_pix += channels) {
const T* in_pix = input_row_start + starts[x] * channels;
const float* weights_start = weights + x * span_size;
const int real_span_size =
std::min(starts[x] + span_size, static_cast<int>(input_width)) -
starts[x];
const float* weights_end = weights_start + real_span_size;
for (int c = 0; c < channels; ++c) {
out_pix[c] = 0.0f;
}
for (const float* weight_ptr = weights_start; weight_ptr != weights_end;
++weight_ptr) {
float w = *weight_ptr;
for (int c = 0; c < channels; ++c) {
out_pix[c] += w * static_cast<float>(in_pix[c]);
}
in_pix += channels;
}
}
}
}
template <typename T>
inline void AddScaledVector(const T* in_vec, int vec_len, float weight,
float* out_vec) {
float* out_vec_end = out_vec + vec_len;
for (; out_vec != out_vec_end; ++out_vec, ++in_vec) {
*out_vec += weight * static_cast<float>(*in_vec);
}
}
template <typename T>
void GatherRows(int span_size, const int32* starts, const float* weights,
const T* image, const int64_t input_height,
const int64_t input_width, const int64_t output_height,
const int64_t output_width, const int channels, float* output) {
const int64_t in_row_size = input_width * channels;
const int64_t out_row_size = output_width * channels;
for (int y = 0; y < output_height; ++y) {
float* out_row_data = output + out_row_size * y;
std::fill(out_row_data, out_row_data + out_row_size, 0.0f);
int in_row = starts[y];
const T* in_row_data = image + in_row_size * in_row;
const float* weights_start = weights + y * span_size;
const int real_span_size =
std::min(starts[y] + span_size, static_cast<int>(input_height)) -
starts[y];
const float* const weights_end = weights_start + real_span_size;
for (const float* weight_it = weights_start; weight_it != weights_end;
++weight_it) {
AddScaledVector(in_row_data, in_row_size, *weight_it, out_row_data);
in_row_data += in_row_size;
}
}
}
}
template <typename T>
struct GatherSpans<CPUDevice, T> {
void operator()(const CPUDevice& d, int row_span_size,
typename TTypes<int32, 1>::ConstTensor row_starts,
typename TTypes<float, 1>::ConstTensor row_weights,
int col_span_size,
typename TTypes<int32, 1>::ConstTensor col_starts,
typename TTypes<float, 1>::ConstTensor col_weights,
typename TTypes<T, 4>::ConstTensor images,
typename TTypes<float, 4>::Tensor intermediate_buffer,
typename TTypes<float, 4>::Tensor resized_images) {
const int batch_size = images.dimension(0);
const int64_t input_height = images.dimension(1);
const int64_t input_width = images.dimension(2);
const int channels = images.dimension(3);
const int64_t output_height = resized_images.dimension(1);
const int64_t output_width = resized_images.dimension(2);
const int64_t input_pix_per_batch = input_width * input_height * channels;
const int64_t intermediate_pix_per_batch =
input_width * output_height * channels;
const int64_t output_pix_per_batch =
output_width * output_height * channels;
float* intermediate_ptr = intermediate_buffer.data();
const T* image_ptr = images.data();
float* out_ptr = resized_images.data();
for (int b = 0; b < batch_size; ++b, image_ptr += input_pix_per_batch,
intermediate_ptr += intermediate_pix_per_batch,
out_ptr += output_pix_per_batch) {
GatherRows(row_span_size, row_starts.data(), row_weights.data(),
image_ptr, input_height, input_width, output_height,
input_width, channels, intermediate_ptr);
GatherColumns(col_span_size, col_starts.data(), col_weights.data(),
intermediate_ptr, output_height, input_width, output_height,
output_width, channels, out_ptr);
}
}
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ScaleAndTranslate") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size") \
.HostMemory("scale") \
.HostMemory("translation"), \
ScaleAndTranslateOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ScaleAndTranslateGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("scale") \
.HostMemory("translation"), \
ScaleAndTranslateGradOp<CPUDevice, T>);
TF_CALL_float(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
}
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/image/sampling_kernels.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
using Eigen::Vector2f;
class DynamicKernel {
public:
virtual ~DynamicKernel() {}
virtual float Value(const float x) const = 0;
virtual float Radius() const = 0;
};
template <typename KernelType>
class TypedDynamicKernel : public DynamicKernel {
public:
explicit TypedDynamicKernel(const KernelType& kernel) : kernel_(kernel) {}
float Value(const float x) const override { return kernel_(x); }
float Radius() const override { return kernel_.Radius(); }
const KernelType kernel_;
};
template <typename KernelType>
std::unique_ptr<const DynamicKernel> CreateKernel(const KernelType& kernel) {
return std::make_unique<TypedDynamicKernel<KernelType>>(kernel);
}
std::unique_ptr<const DynamicKernel> Create(
functor::SamplingKernelType kernel_type) {
switch (kernel_type) {
case functor::Lanczos1Kernel:
return CreateKernel(functor::CreateLanczos1Kernel());
case functor::Lanczos3Kernel:
return CreateKernel(functor::CreateLanczos3Kernel());
case functor::Lanczos5Kernel:
return CreateKernel(functor::CreateLanczos5Kernel());
case functor::GaussianKernel:
return CreateKernel(functor::CreateGaussianKernel());
case functor::BoxKernel:
return CreateKernel(functor::CreateBoxKernel());
case functor::TriangleKernel:
return CreateKernel(functor::CreateTriangleKernel());
case functor::KeysCubicKernel:
return CreateKernel(functor::CreateKeysCubicKernel());
case functor::MitchellCubicKernel:
return CreateKernel(functor::CreateMitchellCubicKernel());
default:
LOG(FATAL) << "Unknown kernel type.";
return nullptr;
}
}
template <typename T>
inline const T& Clamp(const T& low, const T& high, const T& value) {
return std::min(high, std::max(low, value));
}
void Sample(const DynamicKernel& kernel, const bool antialias,
TTypes<float, 4>::Tensor images, const int batch,
const Vector2f& scale, const Vector2f& sample_f, float* dest) {
const Vector2f kernel_scale(antialias ? std::max(scale.x(), 1.0f) : 1.0,
antialias ? std::max(scale.y(), 1.0f) : 1.0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
const int64_t y_span_start = Clamp(
static_cast<int64_t>(0), in_height - 1,
static_cast<int64_t>(
std::ceil(sample_f.y() - kernel.Radius() * kernel_scale.y() - 0.5f)));
const int64_t y_span_end =
Clamp(static_cast<int64_t>(0), in_height - 1,
static_cast<int64_t>(std::floor(
sample_f.y() + kernel.Radius() * kernel_scale.y() - 0.5f))) +
1;
const int64_t x_span_start = Clamp(
static_cast<int64_t>(0), in_width - 1,
static_cast<int64_t>(
std::ceil(sample_f.x() - kernel.Radius() * kernel_scale.x() - 0.5f)));
const int64_t x_span_end =
Clamp(static_cast<int64_t>(0), in_width - 1,
static_cast<int64_t>(std::floor(
sample_f.x() + kernel.Radius() * kernel_scale.x() - 0.5f))) +
1;
std::fill(dest, dest + channels, 0.0f);
if (sample_f.x() < 0.0f || sample_f.y() < 0.0f || sample_f.x() > in_width ||
sample_f.y() > in_height) {
return;
}
const Vector2f one_over_kernel_scale(1.0f / kernel_scale.x(),
1.0f / kernel_scale.y());
float total_weight = 0.0f;
for (int64_t y = y_span_start; y < y_span_end; ++y) {
float y_kernel_pos = static_cast<float>(y) + 0.5f - sample_f.y();
float y_weight = kernel.Value(y_kernel_pos * one_over_kernel_scale.y());
for (int64_t x = x_span_start; x < x_span_end; ++x) {
float x_kernel_pos = static_cast<float>(x) + 0.5f - sample_f.x();
float x_weight = kernel.Value(x_kernel_pos * one_over_kernel_scale.x());
float kernel_weight = y_weight * x_weight;
total_weight += kernel_weight;
for (int c = 0; c < channels; ++c) {
dest[c] += static_cast<float>(images(batch, y, x, c)) * kernel_weight;
}
}
}
if (std::abs(total_weight) >= 1000.0f * std::numeric_limits<float>::min()) {
CHECK_NE(total_weight, 0.0f) << y_span_start << "," << y_span_end << " "
<< x_span_start << "," << x_span_end;
for (int c = 0; c < channels; ++c) {
dest[c] /= total_weight;
}
}
}
void ScaleAndTranslateBaseline(const DynamicKernel& kernel,
const bool antialias,
TTypes<float, 4>::Tensor images,
const Vector2f& orig_scale,
const Vector2f& orig_translate,
TTypes<float, 4>::Tensor output) {
const Vector2f scale(1.0f / orig_scale[0], 1.0f / orig_scale[1]);
const Vector2f translate(-orig_translate[0] / orig_scale[0],
-orig_translate[1] / orig_scale[1]);
const int batch = images.dimension(0);
const int channels = images.dimension(3);
ASSERT_EQ(batch, output.dimension(0));
ASSERT_EQ(channels, output.dimension(3));
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
for (int b = 0; b < batch; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
const float out_y_f = static_cast<float>(y) + 0.5;
const float in_y_f = out_y_f * scale.y() + translate.y();
for (int64_t x = 0; x < out_width; ++x) {
const float out_x_f = static_cast<float>(x) + 0.5;
const float in_x_f = out_x_f * scale.x() + translate.x();
if (in_x_f < 0.0f || in_y_f < 0.0f || in_x_f > in_width ||
in_y_f > in_height) {
std::fill(&output(b, y, x, 0), &output(b, y, x + 1, 0), 0.0f);
} else {
Sample(kernel, antialias, images, b, scale, Vector2f(in_x_f, in_y_f),
&output(b, y, x, 0));
}
}
}
}
}
class ScaleAndTranslateOpTest : public OpsTestBase {
protected:
void CreateOp(const string& kernel_type_str, const bool antialias) {
TF_EXPECT_OK(NodeDefBuilder("scale_and_translate_op", "ScaleAndTranslate")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("kernel_type", kernel_type_str)
.Attr("antialias", antialias)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
kernel_type_ = functor::SamplingKernelTypeFromString(kernel_type_str);
antialias_ = antialias;
}
void SetCheckerboardImageInput(int batch_size, int num_row_squares,
int num_col_squares, int square_size,
int num_channels) {
inputs_.clear();
std::vector<float> data;
const int64_t row_size = num_col_squares * square_size * num_channels;
const int64_t image_size = num_row_squares * square_size * row_size;
data.resize(batch_size * image_size);
random::PhiloxRandom philox(42);
random::SimplePhilox rnd(&philox);
std::vector<float> col(num_channels);
for (int b = 0; b < batch_size; ++b) {
for (int y = 0; y < num_row_squares; ++y) {
for (int x = 0; x < num_col_squares; ++x) {
for (int n = 0; n < num_channels; ++n) {
col[n] = rnd.RandFloat();
}
for (int r = y * square_size; r < (y + 1) * square_size; ++r) {
auto it = data.begin() + b * image_size + r * row_size +
x * square_size * num_channels;
for (int n = 0; n < square_size; ++n) {
for (int chan = 0; chan < num_channels; ++chan, ++it) {
*it = col[chan] * 255.0;
}
}
}
}
}
}
AddInputFromArray<float>(
TensorShape({batch_size, num_row_squares * square_size,
num_col_squares * square_size, num_channels}),
data);
}
void RunTest(int output_image_height, int output_image_width,
const Vector2f& scale, const Vector2f& translate) {
AddInputFromArray<int32>(TensorShape({2}),
{output_image_height, output_image_width});
AddInputFromArray<float>(TensorShape({2}), {scale[1], scale[0]});
AddInputFromArray<float>(TensorShape({2}), {translate[1], translate[0]});
Status s = RunOpKernel();
const int batch_size = GetOutput(0)->dim_size(0);
const int channels = GetOutput(0)->dim_size(3);
Tensor expected(allocator(), DT_FLOAT,
TensorShape({batch_size, output_image_height,
output_image_width, channels}));
std::unique_ptr<const DynamicKernel> kernel = Create(kernel_type_);
ScaleAndTranslateBaseline(*kernel, antialias_,
mutable_input(0)->tensor<float, 4>(), scale,
translate, expected.tensor<float, 4>());
constexpr double kAbs = 1e-2f;
test::ExpectTensorNear<float>(expected, *GetOutput(0), kAbs);
}
functor::SamplingKernelType kernel_type_;
bool antialias_;
};
TEST_F(ScaleAndTranslateOpTest, IdentityTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = kNumRowSquares * kSquareSize;
constexpr int kOutputImageWidth = kNumColSquares * kSquareSize;
const Vector2f kScale(1.0f, 1.0f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, UpsampleTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = kNumRowSquares * kSquareSize * 2;
constexpr int kOutputImageWidth = kNumColSquares * kSquareSize * 2;
const Vector2f kScale(2.0f, 2.0f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, DownsampleTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = kNumRowSquares * kSquareSize / 2;
constexpr int kOutputImageWidth = kNumColSquares * kSquareSize / 2;
const Vector2f kScale(0.5f, 0.5f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, AntiAliasedDownsampleToASinglePixelTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 1;
constexpr int kOutputImageWidth = 1;
const Vector2f kScale(1.0f / (kNumRowSquares * kSquareSize),
1.0f / (kNumColSquares * kSquareSize));
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, NonAntiAliasedDownsampleToASinglePixelTest) {
CreateOp("lanczos3", false);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 16;
constexpr int64_t kNumColSquares = 13;
constexpr int64_t kSquareSize = 12;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 1;
constexpr int kOutputImageWidth = 1;
const Vector2f kScale(1.0f / (kNumRowSquares * kSquareSize),
1.0f / (kNumColSquares * kSquareSize));
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, UsampleFromASinglePixelTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 1;
constexpr int64_t kNumColSquares = 1;
constexpr int64_t kSquareSize = 1;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 10;
constexpr int kOutputImageWidth = 17;
const Vector2f kScale(17.0f, 10.0f);
const Vector2f kTranslate(0.0f, 0.0f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, NonAntialiasedUsampleFromASinglePixelTest) {
CreateOp("lanczos3", false);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 1;
constexpr int64_t kNumColSquares = 1;
constexpr int64_t kSquareSize = 1;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 10;
constexpr int kOutputImageWidth = 17;
const Vector2f kScale(17.0f, 10.0f);
const Vector2f kTranslate(0.0f, 0.0f);
antialias_ = true;
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, AntialiasedScaleAndTranslationTest) {
CreateOp("lanczos3", true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 11;
constexpr int64_t kNumColSquares = 7;
constexpr int64_t kSquareSize = 5;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 49;
constexpr int kOutputImageWidth = 51;
const Vector2f kScale(1.25f, 0.6f);
const Vector2f kTranslate(4.1f, -3.1f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, NonAntialiasedScaleAndTranslationTest) {
CreateOp("lanczos3", false);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 11;
constexpr int64_t kNumColSquares = 7;
constexpr int64_t kSquareSize = 5;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 49;
constexpr int kOutputImageWidth = 51;
const Vector2f kScale(1.25f, 0.6f);
const Vector2f kTranslate(4.1f, -3.1f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
TEST_F(ScaleAndTranslateOpTest, TestKernelTypes) {
const std::vector<string> kKernelTypes = {
"lanczos1", "lanczos3", "lanczos5", "box",
"triangle", "keyscubic", "mitchellcubic"};
for (const string& kernel_type : kKernelTypes) {
CreateOp(kernel_type, true);
constexpr int64_t kBatchSize = 2;
constexpr int64_t kNumRowSquares = 10;
constexpr int64_t kNumColSquares = 11;
constexpr int64_t kSquareSize = 1;
constexpr int64_t kNumChannels = 3;
SetCheckerboardImageInput(kBatchSize, kNumRowSquares, kNumColSquares,
kSquareSize, kNumChannels);
constexpr int kOutputImageHeight = 9;
constexpr int kOutputImageWidth = 11;
const Vector2f kScale(1.9f, 1.9f);
const Vector2f kTranslate(0.3f, 2.1f);
RunTest(kOutputImageHeight, kOutputImageWidth, kScale, kTranslate);
}
}
} |
1,531 | cpp | tensorflow/tensorflow | adjust_contrast_op | tensorflow/core/kernels/image/adjust_contrast_op.cc | tensorflow/core/kernels/image/adjust_contrast_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_ADJUST_CONTRAST_OP_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_ADJUST_CONTRAST_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct AdjustContrast {
void operator()(const Device& d, typename TTypes<T, 4>::ConstTensor input,
typename TTypes<float>::ConstScalar contrast_factor,
typename TTypes<float>::ConstScalar min_value,
typename TTypes<float>::ConstScalar max_value,
typename TTypes<float, 4>::Tensor mean_values,
typename TTypes<float, 4>::Tensor output) {
const int batch = input.dimension(0);
const int height = input.dimension(1);
const int width = input.dimension(2);
const int channels = input.dimension(3);
Eigen::array<int, 4> scalar_broadcast;
scalar_broadcast[0] = batch;
scalar_broadcast[1] = height;
scalar_broadcast[2] = width;
scalar_broadcast[3] = channels;
Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2> >
reduction_axis;
Eigen::IndexList<Eigen::type2index<1>, int, int, Eigen::type2index<1> >
broadcast_dims;
broadcast_dims.set(1, height);
broadcast_dims.set(2, width);
Eigen::IndexList<int, Eigen::type2index<1>, Eigen::type2index<1>, int>
reshape_dims;
reshape_dims.set(0, batch);
reshape_dims.set(3, channels);
Eigen::Sizes<1, 1, 1, 1> scalar;
float num_reduced_coeffs = height * width;
mean_values.device(d) =
(input.template cast<float>().sum(reduction_axis).eval() /
num_reduced_coeffs)
.reshape(reshape_dims)
.broadcast(broadcast_dims);
auto contrast_factor_tensor =
contrast_factor.reshape(scalar).broadcast(scalar_broadcast);
auto adjusted =
(input.template cast<float>() - mean_values) * contrast_factor_tensor +
mean_values;
auto min_bcast = min_value.reshape(scalar).broadcast(scalar_broadcast);
auto max_bcast = max_value.reshape(scalar).broadcast(scalar_broadcast);
output.device(d) = adjusted.cwiseMin(max_bcast).cwiseMax(min_bcast);
}
};
template <typename Device, typename T>
struct AdjustContrastv2 {
void operator()(const Device& d, typename TTypes<T, 4>::ConstTensor input,
typename TTypes<float>::ConstScalar contrast_factor,
typename TTypes<T, 4>::Tensor output) {
const int batch = input.dimension(0);
const int height = input.dimension(1);
const int width = input.dimension(2);
const int channels = input.dimension(3);
Eigen::array<int, 4> scalar_broadcast;
scalar_broadcast[0] = batch;
scalar_broadcast[1] = height;
scalar_broadcast[2] = width;
scalar_broadcast[3] = channels;
Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1> >
reduction_axis;
Eigen::IndexList<Eigen::type2index<1>, int, int, Eigen::type2index<1> >
broadcast_dims;
broadcast_dims.set(1, height);
broadcast_dims.set(2, width);
Eigen::IndexList<int, Eigen::type2index<1>, Eigen::type2index<1>, int>
reshape_dims;
reshape_dims.set(0, batch);
reshape_dims.set(3, channels);
Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>,
Eigen::type2index<0>, Eigen::type2index<3> >
reduced_dims_first;
Eigen::Sizes<1, 1, 1, 1> scalar;
float num_reduced_coeffs = height * width;
output.device(d) = (input.template cast<float>()
.shuffle(reduced_dims_first)
.sum(reduction_axis)
.eval() /
num_reduced_coeffs)
.template cast<T>()
.reshape(reshape_dims)
.broadcast(broadcast_dims);
auto contrast_factor_tensor =
contrast_factor.reshape(scalar).broadcast(scalar_broadcast);
auto adjusted =
(input - output).template cast<float>() * contrast_factor_tensor;
output.device(d) += adjusted.template cast<T>();
}
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/adjust_contrast_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/determinism.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class AdjustContrastOp : public OpKernel {
public:
explicit AdjustContrastOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& factor = context->input(1);
const Tensor& min_value = context->input(2);
const Tensor& max_value = context->input(3);
OP_REQUIRES(context, input.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input.shape().DebugString()));
const int64_t height = input.dim_size(input.dims() - 3);
const int64_t width = input.dim_size(input.dims() - 2);
const int64_t channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(factor.shape()),
errors::InvalidArgument("contrast_factor must be scalar: ",
factor.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_value.shape()),
errors::InvalidArgument("min_value must be scalar: ",
min_value.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_value.shape()),
errors::InvalidArgument("max_value must be scalar: ",
max_value.shape().DebugString()));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"A deterministic GPU implementation of AdjustContrast is not"
" currently available."));
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
Tensor mean_values;
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::value,
TensorShape(input.shape()),
&mean_values));
if (input.NumElements() > 0) {
const int64_t batch = input.NumElements() / (height * width * channels);
const int64_t shape[4] = {batch, height, width, channels};
functor::AdjustContrast<Device, T>()(
context->eigen_device<Device>(), input.shaped<T, 4>(shape),
factor.scalar<float>(), min_value.scalar<float>(),
max_value.scalar<float>(), mean_values.shaped<float, 4>(shape),
output->shaped<float, 4>(shape));
}
}
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("AdjustContrast").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
AdjustContrastOp<CPUDevice, T>);
REGISTER_KERNEL(uint8);
REGISTER_KERNEL(int8);
REGISTER_KERNEL(int16);
REGISTER_KERNEL(int32);
REGISTER_KERNEL(float);
REGISTER_KERNEL(double);
#undef REGISTER_KERNEL
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU_SPEC(T) \
template <> \
void AdjustContrast<GPUDevice, T>::operator()( \
const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, \
typename TTypes<float>::ConstScalar contrast_factor, \
typename TTypes<float>::ConstScalar min_value, \
typename TTypes<float>::ConstScalar max_value, \
typename TTypes<float, 4>::Tensor mean_values, \
typename TTypes<float, 4>::Tensor output); \
extern template struct AdjustContrast<GPUDevice, T>;
DECLARE_GPU_SPEC(uint8);
DECLARE_GPU_SPEC(int8);
DECLARE_GPU_SPEC(int16);
DECLARE_GPU_SPEC(int32);
DECLARE_GPU_SPEC(float);
DECLARE_GPU_SPEC(double);
#undef DECLARE_GPU_SPEC
}
#define REGISTER_GPU_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("AdjustContrast").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
AdjustContrastOp<GPUDevice, T>);
REGISTER_GPU_KERNEL(uint8);
REGISTER_GPU_KERNEL(int8);
REGISTER_GPU_KERNEL(int16);
REGISTER_GPU_KERNEL(int32);
REGISTER_GPU_KERNEL(float);
REGISTER_GPU_KERNEL(double);
#undef REGISTER_GPU_KERNEL
#endif
class AdjustContrastOpV2Base : public OpKernel {
protected:
explicit AdjustContrastOpV2Base(OpKernelConstruction* context)
: OpKernel(context) {}
struct ComputeOptions {
const Tensor* input = nullptr;
const Tensor* factor = nullptr;
Tensor* output = nullptr;
int64_t batch = 0;
int64_t height = 0;
int64_t width = 0;
int64_t channels = 0;
};
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& factor = context->input(1);
OP_REQUIRES(context, input.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input.shape().DebugString()));
const int64_t height = input.dim_size(input.dims() - 3);
const int64_t width = input.dim_size(input.dims() - 2);
const int64_t channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(factor.shape()),
errors::InvalidArgument("contrast_factor must be scalar: ",
factor.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
if (input.NumElements() > 0) {
const int64_t batch = input.NumElements() / (height * width * channels);
ComputeOptions options;
options.input = &input;
options.factor = &factor;
options.output = output;
options.batch = batch;
options.height = height;
options.width = width;
options.channels = channels;
DoCompute(context, options);
}
}
virtual void DoCompute(OpKernelContext* context,
const ComputeOptions& options) = 0;
};
template <typename Device, typename T>
class AdjustContrastOpv2;
template <>
class AdjustContrastOpv2<CPUDevice, float> : public AdjustContrastOpV2Base {
public:
explicit AdjustContrastOpv2(OpKernelConstruction* context)
: AdjustContrastOpV2Base(context) {}
void DoCompute(OpKernelContext* context,
const ComputeOptions& options) override {
const int64_t batch = options.batch;
const int64_t height = options.height;
const int64_t width = options.width;
const int64_t channels = options.channels;
const int64_t image_size = height * width;
const Tensor* input = options.input;
const Tensor* factor = options.factor;
Tensor* output = options.output;
Tensor mean_values;
OP_REQUIRES_OK(context, context->allocate_temp(
DataTypeToEnum<float>::value,
TensorShape({batch, channels}), &mean_values));
auto input_data = input->shaped<float, 3>({batch, image_size, channels});
auto mean_data = mean_values.tensor<float, 2>();
auto output_data = output->shaped<float, 3>({batch, image_size, channels});
ReduceMeanAcrossImage(input_data, mean_data, output_data);
BroadcastAcrossImage(mean_data, output_data);
IncrementWithScaling(input_data, factor->scalar<float>(), output_data);
}
private:
void ReduceMeanAcrossImage(typename TTypes<float, 3>::ConstTensor input,
typename TTypes<float, 2>::Tensor mean,
typename TTypes<float, 3>::Tensor scratch) {
const int64_t batch = input.dimension(0);
const int64_t image_size = input.dimension(1);
const int64_t channels = input.dimension(2);
TTypes<float, 1>::ConstTensor input_flat(&input(0, 0, 0), input.size());
TTypes<float, 1>::Tensor mean_flat(&mean(0, 0), mean.size());
TTypes<float, 1>::Tensor summation_scratch(&scratch(0, 0, 0),
scratch.size());
using Eigen::DenseIndex;
typedef Eigen::array<Eigen::DenseIndex, 1> Index;
const int64_t plane_size = image_size * channels;
for (int64_t i = 0; i < batch; i++) {
auto input_plane = input_flat.slice(Index{DenseIndex(i * plane_size)},
Index{DenseIndex(plane_size)});
auto summation_plane = summation_scratch.slice(
Index{DenseIndex(i * plane_size)}, Index{DenseIndex(plane_size)});
int64_t remaining_size = image_size;
int round = 0;
do {
int64_t right_size = remaining_size / 2;
int64_t left_size = remaining_size - right_size;
DCHECK(left_size == right_size || left_size == right_size + 1);
if (round == 0) {
summation_plane.slice(Index{0},
Index{DenseIndex(right_size * channels)}) =
input_plane.slice(Index{DenseIndex(left_size * channels)},
Index{DenseIndex(right_size * channels)}) +
input_plane.slice(Index{0},
Index{DenseIndex(right_size * channels)});
if (left_size > right_size) {
DCHECK_EQ(left_size - right_size, 1);
summation_plane.slice(Index{DenseIndex(right_size * channels)},
Index{DenseIndex(channels)}) =
input_plane.slice(Index{DenseIndex(right_size * channels)},
Index{DenseIndex(channels)});
}
} else {
summation_plane.slice(Index{0},
Index{DenseIndex(right_size * channels)}) +=
summation_plane.slice(Index{DenseIndex(left_size * channels)},
Index{DenseIndex(right_size * channels)});
}
remaining_size = left_size;
round++;
} while (remaining_size > 1);
const float mean_scaling = 1.0f / image_size;
auto mean_plane = mean_flat.slice(Index{DenseIndex(i * channels)},
Index{DenseIndex(channels)});
mean_plane =
summation_plane.slice(Index{0}, Index{DenseIndex(channels)}) *
mean_scaling;
}
}
void BroadcastAcrossImage(typename TTypes<float, 2>::Tensor inputs,
typename TTypes<float, 3>::Tensor outputs) {
int64_t batch = outputs.dimension(0);
int64_t image_size = outputs.dimension(1);
int64_t channels = outputs.dimension(2);
for (int64_t i = 0; i < batch; i++) {
const float* mean_p = &inputs(i, 0);
float* output_p = &outputs(i, 0, 0);
memcpy(output_p, mean_p, sizeof(float) * channels);
int64_t copied = 1;
while (copied < image_size) {
const int64_t kMaxToCopy = 1024;
int64_t to_copy = std::min({copied, image_size - copied, kMaxToCopy});
memcpy(output_p + channels * copied, output_p,
to_copy * channels * sizeof(float));
copied += to_copy;
}
}
}
void IncrementWithScaling(typename TTypes<float, 3>::ConstTensor input,
typename TTypes<float>::ConstScalar factor,
typename TTypes<float, 3>::Tensor output) {
const float factor_value = factor();
float* p = output.data();
const float* q = input.data();
for (int64_t n = 0; n < input.size(); ++n) {
p[n] += factor_value * (q[n] - p[n]);
}
}
};
REGISTER_KERNEL_BUILDER(
Name("AdjustContrastv2").Device(DEVICE_CPU).TypeConstraint<float>("T"),
AdjustContrastOpv2<CPUDevice, float>);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU_SPEC(T) \
template <> \
void AdjustContrastv2<GPUDevice, T>::operator()( \
const GPUDevice& d, typename TTypes<T, 4>::ConstTensor input, \
typename TTypes<float>::ConstScalar contrast_factor, \
typename TTypes<T, 4>::Tensor output); \
extern template struct AdjustContrastv2<GPUDevice, T>;
DECLARE_GPU_SPEC(float);
DECLARE_GPU_SPEC(Eigen::half);
#undef DECLARE_GPU_SPEC
}
template <typename T>
class AdjustContrastOpv2<GPUDevice, T> : public AdjustContrastOpV2Base {
public:
explicit AdjustContrastOpv2(OpKernelConstruction* context)
: AdjustContrastOpV2Base(context) {}
void DoCompute(OpKernelContext* context,
const ComputeOptions& options) override {
const int64_t shape[4] = {options.batch, options.height, options.width,
options.channels};
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"A deterministic GPU implementation of AdjustContrastv2 is not"
" currently available."));
functor::AdjustContrastv2<GPUDevice, T>()(
context->eigen_device<GPUDevice>(), options.input->shaped<T, 4>(shape),
options.factor->scalar<float>(), options.output->shaped<T, 4>(shape));
}
};
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("AdjustContrastv2").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
AdjustContrastOpv2<GPUDevice, T>);
REGISTER_GPU(float)
REGISTER_GPU(Eigen::half)
#undef REGISTER_GPU
#endif
} | #include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class AdjustContrastOpTest : public OpsTestBase {};
TEST_F(AdjustContrastOpTest, Simple_1113) {
TF_EXPECT_OK(NodeDefBuilder("adjust_contrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 1, 3}), {-1, 2, 3});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 3}));
test::FillValues<float>(&expected, {-1, 2, 3});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(AdjustContrastOpTest, Simple_1223) {
TF_EXPECT_OK(NodeDefBuilder("adjust_contrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2, 2, 3}),
{1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12});
AddInputFromArray<float>(TensorShape({}), {0.2f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 3}));
test::FillValues<float>(&expected, {2.2, 6.2, 10.2, 2.4, 6.4, 10.4, 2.6, 6.6,
10.6, 2.8, 6.8, 10.8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(AdjustContrastOpTest, Big_99x99x3) {
TF_EXPECT_OK(NodeDefBuilder("adjust_contrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
std::vector<float> values;
values.reserve(99 * 99 * 3);
for (int i = 0; i < 99 * 99 * 3; ++i) {
values.push_back(i % 255);
}
AddInputFromArray<float>(TensorShape({1, 99, 99, 3}), values);
AddInputFromArray<float>(TensorShape({}), {0.2f});
TF_ASSERT_OK(RunOpKernel());
}
} |
1,532 | cpp | tensorflow/tensorflow | text_line_dataset_op | tensorflow/core/kernels/data/text_line_dataset_op.cc | tensorflow/core/kernels/data/text_line_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_TEXT_LINE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_TEXT_LINE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class TextLineDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "TextLine";
static constexpr const char* const kFileNames = "filenames";
static constexpr const char* const kCompressionType = "compression_type";
static constexpr const char* const kBufferSize = "buffer_size";
explicit TextLineDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
};
}
}
#endif
#include "tensorflow/core/kernels/data/text_line_dataset_op.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
namespace tensorflow {
namespace data {
constexpr const char* const TextLineDatasetOp::kDatasetType;
constexpr const char* const TextLineDatasetOp::kFileNames;
constexpr const char* const TextLineDatasetOp::kCompressionType;
constexpr const char* const TextLineDatasetOp::kBufferSize;
constexpr char kZLIB[] = "ZLIB";
constexpr char kGZIP[] = "GZIP";
constexpr char kCurrentFileIndex[] = "current_file_index";
constexpr char kCurrentPos[] = "current_pos";
class TextLineDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<string> filenames,
const string& compression_type,
const io::ZlibCompressionOptions& options)
: DatasetBase(DatasetContext(ctx)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
use_compression_(!compression_type.empty()),
options_(options) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this,
name_utils::IteratorPrefix(TextLineDatasetOp::kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* filenames = nullptr;
Node* compression_type = nullptr;
Node* buffer_size = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames));
TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type));
TF_RETURN_IF_ERROR(b->AddScalar(options_.input_buffer_size, &buffer_size));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {filenames, compression_type, buffer_size}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (buffered_input_stream_) {
Tensor line_contents(tstring{});
tstring& line_contents_str = line_contents.scalar<tstring>()();
Status s = buffered_input_stream_->ReadLine(&line_contents_str);
if (s.ok()) {
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(
name_utils::OpName(TextLineDatasetOp::kDatasetType));
bytes_counter->IncrementBy(line_contents_str.size());
out_tensors->push_back(std::move(line_contents));
*end_of_sequence = false;
return absl::OkStatus();
} else if (!errors::IsOutOfRange(s)) {
return s;
}
ResetStreamsLocked();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
if (buffered_input_stream_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentPos,
buffered_input_stream_->Tell()));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
ResetStreamsLocked();
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
if (reader->Contains(prefix(), kCurrentPos)) {
int64_t current_pos;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentPos, ¤t_pos));
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
TF_RETURN_IF_ERROR(buffered_input_stream_->Seek(current_pos));
}
return absl::OkStatus();
}
private:
Status SetupStreamsLocked(Env* env) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (current_file_index_ >= dataset()->filenames_.size()) {
return errors::InvalidArgument(
"current_file_index_:", current_file_index_,
" >= filenames_.size():", dataset()->filenames_.size());
}
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
input_stream_ =
std::make_unique<io::RandomAccessInputStream>(file_.get(), false);
if (dataset()->use_compression_) {
zlib_input_stream_ = std::make_unique<io::ZlibInputStream>(
input_stream_.get(), dataset()->options_.input_buffer_size,
dataset()->options_.input_buffer_size, dataset()->options_);
buffered_input_stream_ = std::make_unique<io::BufferedInputStream>(
zlib_input_stream_.get(), dataset()->options_.input_buffer_size,
false);
} else {
buffered_input_stream_ = std::make_unique<io::BufferedInputStream>(
input_stream_.get(), dataset()->options_.input_buffer_size, false);
}
return absl::OkStatus();
}
void ResetStreamsLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
input_stream_.reset();
zlib_input_stream_.reset();
buffered_input_stream_.reset();
file_.reset();
}
mutex mu_;
std::unique_ptr<io::RandomAccessInputStream> input_stream_
TF_GUARDED_BY(mu_);
std::unique_ptr<io::ZlibInputStream> zlib_input_stream_ TF_GUARDED_BY(mu_);
std::unique_ptr<io::BufferedInputStream> buffered_input_stream_
TF_GUARDED_BY(mu_);
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_
TF_GUARDED_BY(mu_);
};
const std::vector<string> filenames_;
const tstring compression_type_;
const bool use_compression_;
const io::ZlibCompressionOptions options_;
};
TextLineDatasetOp::TextLineDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {}
void TextLineDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
const Tensor* filenames_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kFileNames, &filenames_tensor));
OP_REQUIRES(
ctx, filenames_tensor->dims() <= 1,
errors::InvalidArgument("`filenames` must be a scalar or a vector."));
tstring compression_type;
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kCompressionType,
&compression_type));
int64_t buffer_size = -1;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(
ctx, buffer_size >= 0,
errors::InvalidArgument("`buffer_size` must be >= 0 (0 == default)"));
io::ZlibCompressionOptions zlib_compression_options =
io::ZlibCompressionOptions::DEFAULT();
if (compression_type == kZLIB) {
zlib_compression_options = io::ZlibCompressionOptions::DEFAULT();
} else if (compression_type == kGZIP) {
zlib_compression_options = io::ZlibCompressionOptions::GZIP();
} else {
OP_REQUIRES(ctx, compression_type.empty(),
errors::InvalidArgument("Unsupported compression_type."));
}
if (buffer_size != 0) {
zlib_compression_options.input_buffer_size = buffer_size;
}
std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<tstring>()(i));
metrics::RecordTFDataFilename(kDatasetType, filenames[i]);
}
LogFilenames(filenames);
*output = new Dataset(ctx, std::move(filenames), compression_type,
zlib_compression_options);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TextLineDataset").Device(DEVICE_CPU),
TextLineDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/text_line_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "text_line_dataset";
tstring LocalTempFilename() {
std::string path;
CHECK(Env::Default()->LocalTempFilename(&path));
return tstring(path);
}
class TextLineDatasetParams : public DatasetParams {
public:
TextLineDatasetParams(std::vector<tstring> filenames,
CompressionType compression_type, int64_t buffer_size,
string node_name)
: DatasetParams({DT_STRING}, {PartialTensorShape({})},
std::move(node_name)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
buffer_size_(buffer_size) {}
std::vector<Tensor> GetInputTensors() const override {
int num_files = filenames_.size();
return {
CreateTensor<tstring>(TensorShape({num_files}), filenames_),
CreateTensor<tstring>(TensorShape({}), {ToString(compression_type_)}),
CreateTensor<int64_t>(TensorShape({}), {buffer_size_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
*input_names = {
TextLineDatasetOp::kFileNames,
TextLineDatasetOp::kCompressionType,
TextLineDatasetOp::kBufferSize,
};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return TextLineDatasetOp::kDatasetType;
}
private:
std::vector<tstring> filenames_;
CompressionType compression_type_;
int64_t buffer_size_;
};
class TextLineDatasetOpTest : public DatasetOpsTestBase {};
Status CreateTestFiles(const std::vector<tstring>& filenames,
const std::vector<tstring>& contents,
CompressionType compression_type) {
if (filenames.size() != contents.size()) {
return tensorflow::errors::InvalidArgument(
"The number of files does not match with the contents");
}
CompressionParams params;
params.output_buffer_size = 10;
params.compression_type = compression_type;
for (int i = 0; i < filenames.size(); ++i) {
TF_RETURN_IF_ERROR(
WriteDataToFile(filenames[i], contents[i].data(), params));
}
return absl::OkStatus();
}
TextLineDatasetParams TextLineDatasetParams1() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<tstring> contents = {
absl::StrCat("hello world\n", "11223334455\n"),
absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")};
CompressionType compression_type = CompressionType::ZLIB;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TextLineDatasetParams(filenames,
compression_type,
10,
kNodeName);
}
TextLineDatasetParams TextLineDatasetParams2() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<tstring> contents = {
absl::StrCat("hello world\n", "11223334455\n"),
absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")};
CompressionType compression_type = CompressionType::GZIP;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TextLineDatasetParams(filenames,
compression_type,
10,
kNodeName);
}
TextLineDatasetParams TextLineDatasetParams3() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<tstring> contents = {
absl::StrCat("hello world\n", "11223334455\n"),
absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TextLineDatasetParams(filenames,
compression_type,
10,
kNodeName);
}
std::vector<GetNextTestCase<TextLineDatasetParams>> GetNextTestCases() {
return {{TextLineDatasetParams1(),
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams2(),
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams3(),
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})}};
}
ITERATOR_GET_NEXT_TEST_P(TextLineDatasetOpTest, TextLineDatasetParams,
GetNextTestCases())
TEST_F(TextLineDatasetOpTest, DatasetNodeName) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TextLineDatasetOpTest, DatasetTypeString) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TextLineDatasetOp::kDatasetType)));
}
TEST_F(TextLineDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_STRING}));
}
TEST_F(TextLineDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(TextLineDatasetOpTest, Cardinality) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(TextLineDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_STRING}));
}
TEST_F(TextLineDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(TextLineDatasetOpTest, IteratorPrefix) {
auto dataset_params = TextLineDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
TextLineDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TextLineDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{TextLineDatasetParams1(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams2(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})},
{TextLineDatasetParams3(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}), {{"hello world"},
{"11223334455"},
{"abcd, EFgH"},
{" "},
{"$%^&*()"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(TextLineDatasetOpTest, TextLineDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,533 | cpp | tensorflow/tensorflow | parallel_interleave_dataset_op | tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc | tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_PARALLEL_INTERLEAVE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_PARALLEL_INTERLEAVE_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class ParallelInterleaveDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "LegacyParallelInterleave";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kCycleLength = "cycle_length";
static constexpr const char* const kBlockLength = "block_length";
static constexpr const char* const kDeterministic = "deterministic";
static constexpr const char* const kSloppy = "sloppy";
static constexpr const char* const kBufferOutputElements =
"buffer_output_elements";
static constexpr const char* const kPrefetchInputElements =
"prefetch_input_elements";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit ParallelInterleaveDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int op_version_;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
DeterminismPolicy deterministic_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.h"
#include <atomic>
#include <deque>
#include <functional>
#include <string>
#include <utility>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const
ParallelInterleaveDatasetOp::kDatasetType;
constexpr const char* const
ParallelInterleaveDatasetOp::kInputDataset;
constexpr const char* const
ParallelInterleaveDatasetOp::kOtherArguments;
constexpr const char* const
ParallelInterleaveDatasetOp::kCycleLength;
constexpr const char* const
ParallelInterleaveDatasetOp::kBlockLength;
constexpr const char* const
ParallelInterleaveDatasetOp::kDeterministic;
constexpr const char* const ParallelInterleaveDatasetOp::kSloppy;
constexpr const char* const
ParallelInterleaveDatasetOp::kBufferOutputElements;
constexpr const char* const
ParallelInterleaveDatasetOp::kPrefetchInputElements;
constexpr const char* const ParallelInterleaveDatasetOp::kFunc;
constexpr const char* const
ParallelInterleaveDatasetOp::kTarguments;
constexpr const char* const
ParallelInterleaveDatasetOp::kOutputTypes;
constexpr const char* const
ParallelInterleaveDatasetOp::kOutputShapes;
constexpr char kInputExhausted[] = "input_exhausted";
constexpr char kNextIndex[] = "next_index";
constexpr char kBlockCount[] = "block_count";
constexpr char kWorkersSize[] = "workers_size";
constexpr char kInterleaveSize[] = "interleave_size";
constexpr char kInterleaveIndices[] = "interleave_indices";
constexpr char kStagingSize[] = "staging_size";
constexpr char kStagingIndices[] = "staging_indices";
constexpr char kWorkerThreadsRunning[] = "worker_threads_running";
constexpr char kDataParallelInterleaveWorker[] =
"data_parallel_interleave_worker";
constexpr char kWorker[] = "worker";
constexpr char kInputSize[] = "input_size";
constexpr char kInput[] = "input";
constexpr char kOutputsSize[] = "outputs_size";
constexpr char kOutputs[] = "outputs";
constexpr char kIsProducing[] = "is_producing";
constexpr char kWorkerThread[] = "worker_thread";
constexpr char kIteratorExhausted[] = "iterator_exhausted";
constexpr char kIteratorCreationStatus[] = "iterator_creation_status";
constexpr char kOutput[] = "output";
constexpr char kEndOfSequence[] = "end_of_sequence";
constexpr char kStatus[] = "status";
constexpr char kOutputSize[] = "output_size";
constexpr char kCode[] = "code";
constexpr char KMessage[] = "msg";
class ParallelInterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func, int64_t cycle_length,
int64_t block_length, DeterminismPolicy deterministic,
int64_t buffer_output_elements, int64_t prefetch_input_elements,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes, int op_version)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
cycle_length_(cycle_length),
block_length_(block_length),
deterministic_(deterministic),
buffer_output_elements_(buffer_output_elements),
prefetch_input_elements_(prefetch_input_elements),
output_types_(output_types),
output_shapes_(output_shapes),
traceme_metadata_(
{{"block_length",
strings::Printf("%lld", static_cast<long long>(block_length))},
{"cycle_length",
strings::Printf("%lld", static_cast<long long>(cycle_length))},
{"deterministic",
deterministic.IsDeterministic() || deterministic.IsDefault()
? "true"
: "false"}}),
op_version_(op_version) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
bool deterministic =
deterministic_.IsDeterministic() || deterministic_.IsDefault();
return std::make_unique<Iterator>(
Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)},
deterministic);
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(kDatasetType, params);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<std::pair<size_t, Node*>> inputs;
std::vector<std::pair<size_t, gtl::ArraySlice<Node*>>> list_inputs;
int input_index = 0;
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
inputs.emplace_back(input_index++, input_node);
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
list_inputs.emplace_back(input_index++, other_arguments);
Node* cycle_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(cycle_length_, &cycle_length_node));
inputs.emplace_back(input_index++, cycle_length_node);
Node* block_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(block_length_, &block_length_node));
inputs.emplace_back(input_index++, block_length_node);
if (op_version_ == 1) {
Node* sloppy_node;
TF_RETURN_IF_ERROR(
b->AddScalar(deterministic_.IsNondeterministic(), &sloppy_node));
inputs.emplace_back(input_index++, sloppy_node);
}
Node* buffer_output_elements_node;
TF_RETURN_IF_ERROR(
b->AddScalar(buffer_output_elements_, &buffer_output_elements_node));
inputs.emplace_back(input_index++, buffer_output_elements_node);
Node* prefetch_input_elements_node;
TF_RETURN_IF_ERROR(
b->AddScalar(prefetch_input_elements_, &prefetch_input_elements_node));
inputs.emplace_back(input_index++, prefetch_input_elements_node);
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
attrs.emplace_back(kFunc, f);
if (op_version_ == 2) {
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
}
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
attrs.emplace_back(kTarguments, other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(this, inputs, list_inputs, attrs, output));
return absl::OkStatus();
}
private:
int64_t num_threads() const {
return cycle_length_ + prefetch_input_elements_;
}
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params, bool deterministic)
: DatasetIterator<Dataset>(params),
deterministic_(deterministic),
workers_(dataset()->num_threads()),
worker_thread_states_(dataset()->num_threads()) {}
~Iterator() override {
CancelThreads();
if (deregister_fn_) deregister_fn_();
}
Status Initialize(IteratorContext* ctx) override {
cancellation_manager_ = std::make_unique<CancellationManager>();
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
IteratorContext(params), this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(EnsureWorkerThreadsStarted(ctx));
while (!cancelled_) {
bool can_produce_elements = false;
bool must_wait_for_input = true;
for (int64_t i = 0; i < interleave_indices_.size(); ++i) {
int64_t index = (next_index_ + i) % interleave_indices_.size();
int64_t current_worker_index = interleave_indices_[index];
if (current_worker_index < 0) {
continue;
}
WorkerState* current_worker = &workers_[current_worker_index];
can_produce_elements |= current_worker->MayHaveElements();
if (!current_worker->outputs.empty()) {
next_index_ = index;
const bool element_acquired_sloppily = !deterministic_ && i > 1;
if (!element_acquired_sloppily) {
block_count_++;
if (block_count_ == dataset()->block_length_) {
next_index_ = (index + 1) % interleave_indices_.size();
block_count_ = 0;
}
} else {
block_count_ = 0;
}
*end_of_sequence = false;
Status s = current_worker->outputs.front().status;
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode(
"ParallelInterleaveConsume",
{{"element_id", current_worker->outputs.front().id}});
});
current_worker->outputs.front().output.swap(*out_tensors);
current_worker->outputs.pop_front();
current_worker->cond_var.notify_one();
return s;
} else if (current_worker->is_producing && deterministic_) {
if (next_index_ != index) {
next_index_ = index;
block_count_ = 0;
}
break;
} else if (!current_worker->is_producing) {
interleave_indices_[index] = -1;
if (input_impl_) {
std::vector<Tensor> args;
bool end_of_input = false;
Status s = input_impl_->GetNext(ctx, &args, &end_of_input);
if (end_of_input) {
input_impl_.reset();
} else {
current_worker->SetInputs(s, std::move(args));
staging_indices_.emplace_back(current_worker_index);
}
}
if (!staging_indices_.empty()) {
interleave_indices_[index] = staging_indices_.front();
staging_indices_.pop_front();
next_index_ = (index + 1) % interleave_indices_.size();
block_count_ = 0;
can_produce_elements = true;
must_wait_for_input = false;
break;
}
}
}
if (!can_produce_elements && !input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (must_wait_for_input) {
RecordStop(ctx);
if (deterministic_) {
workers_[interleave_indices_[next_index_]].cond_var.wait(l);
} else {
any_element_available_cond_var_.wait(l);
}
RecordStart(ctx);
}
}
return errors::Cancelled(
"ParallelInterleaveDatasetOp::Dataset::Iterator::GetNext");
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncInterleaveManyNode(
std::move(args), {model::MakeNonTunableParameter(
kCycleLength, dataset()->cycle_length_),
model::MakeNonTunableParameter(
kDeterministic, deterministic_ ? 1.0 : 0.0)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputExhausted, ""));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBlockCount, block_count_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kWorkersSize, workers_.size()));
for (int i = 0; i < workers_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteWorkerStateLocked(writer, i));
}
for (int i = 0; i < worker_thread_states_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteWorkerThreadStateLocked(ctx, writer, i));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInterleaveSize,
interleave_indices_.size()));
for (int i = 0; i < interleave_indices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kInterleaveIndices, "_", i),
interleave_indices_[i]));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kStagingSize, staging_indices_.size()));
for (int i = 0; i < staging_indices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kStagingIndices, "_", i),
staging_indices_[i]));
}
if (!worker_threads_.empty()) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kWorkerThreadsRunning, ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
{
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (!reader->Contains(prefix(), kInputExhausted)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNextIndex, &temp));
next_index_ = size_t(temp);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kBlockCount, &temp));
block_count_ = size_t(temp);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kWorkersSize, &temp));
if (temp != dataset()->num_threads()) {
return errors::Internal("Expected ", dataset()->num_threads(),
" worker states but found ", temp, ".");
}
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
TF_RETURN_IF_ERROR(ReadWorkerStateLocked(ctx, reader, i));
}
}
std::unique_ptr<thread::ThreadPool> threadpool = ctx->CreateThreadPool(
"read_worker_thread_state", dataset()->num_threads());
Status s = absl::OkStatus();
BlockingCounter counter(dataset()->num_threads());
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
threadpool->Schedule([this, i, ctx, reader, &s, &counter] {
WorkerThreadState state;
Status result = ReadWorkerThreadStateLocked(ctx, reader, i, &state);
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (!result.ok()) {
s.Update(result);
counter.DecrementCount();
return;
}
worker_thread_states_[i] = std::move(state);
counter.DecrementCount();
});
}
counter.Wait();
if (!s.ok()) {
return s;
}
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
std::set<int64_t> all_indices;
{
int64_t interleave_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInterleaveSize, &interleave_size));
interleave_indices_.reserve(interleave_size);
for (int64_t i = 0; i < interleave_size; ++i) {
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kInterleaveIndices, "_", i), &temp));
if (temp >= 0 && all_indices.find(temp) != all_indices.end()) {
return errors::Internal(
"Duplicate entry for ", temp,
" found when reading interleave and staging indices.");
}
if (temp >= 0) {
all_indices.insert(temp);
}
interleave_indices_.emplace_back(temp);
}
}
{
int64_t staging_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kStagingSize, &staging_size));
for (int i = 0; i < staging_size; ++i) {
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kStagingIndices, "_", i), &temp));
if (all_indices.find(temp) != all_indices.end()) {
return errors::Internal(
"Duplicate entry for ", temp,
" found when reading interleave and staging indices.");
}
if (temp >= 0) {
all_indices.insert(temp);
}
staging_indices_.emplace_back(temp);
}
}
if (reader->Contains(prefix(), kWorkerThreadsRunning)) {
worker_threads_.reserve(dataset()->num_threads());
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
std::shared_ptr<IteratorContext> new_ctx(new IteratorContext(*ctx));
worker_threads_.emplace_back(ctx->StartThread(
strings::StrCat(kDataParallelInterleaveWorker, "_", i),
[this, new_ctx, i]() { WorkerThread(new_ctx, i); }));
}
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
struct OutputElem {
Status status;
std::vector<Tensor> output;
int64_t id = -1;
explicit OutputElem(const Status& s) : status(s) {}
OutputElem(const Status& s, int64_t id) : status(s), id(id) {}
};
struct WorkerState {
std::vector<Tensor> input;
std::deque<OutputElem> outputs;
bool is_producing = false;
condition_variable cond_var;
inline bool MayHaveElements() const {
return is_producing || !outputs.empty();
}
void SetInputs(const Status& s, std::vector<Tensor> input_arguments) {
if (s.ok()) {
DCHECK(!MayHaveElements())
<< "Tried to start inputs, despite already producing!";
input = std::move(input_arguments);
is_producing = true;
cond_var.notify_one();
} else {
outputs.emplace_back(s);
}
}
};
struct WorkerThreadState {
OutputElem output_elem;
bool end_of_sequence = false;
Status iterator_creation_status;
std::vector<Tensor> input;
std::unique_ptr<IteratorBase> iterator;
WorkerThreadState() : output_elem(absl::OkStatus()) {}
};
void CancelThreads() TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(mu_);
cancelled_ = true;
for (auto& worker : workers_) {
worker.cond_var.notify_all();
}
}
Status EnsureWorkerThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (worker_threads_.empty() && input_impl_) {
worker_threads_.reserve(dataset()->num_threads());
for (int64_t i = 0; i < dataset()->num_threads(); ++i) {
std::vector<Tensor> args;
bool end_of_input = false;
Status s = input_impl_->GetNext(ctx, &args, &end_of_input);
if (end_of_input) {
input_impl_.reset();
return absl::OkStatus();
}
if (i < dataset()->cycle_length_) {
interleave_indices_.push_back(i);
} else {
staging_indices_.push_back(i);
}
workers_[i].SetInputs(s, std::move(args));
std::shared_ptr<IteratorContext> new_ctx(new IteratorContext(*ctx));
worker_threads_.push_back(ctx->StartThread(
strings::StrCat(kDataParallelInterleaveWorker, "_", i),
[this, new_ctx, i]() { WorkerThread(new_ctx, i); }));
}
DCHECK(interleave_indices_.size() == dataset()->cycle_length_);
DCHECK(staging_indices_.size() == dataset()->prefetch_input_elements_);
}
return absl::OkStatus();
}
void WorkerThread(const std::shared_ptr<IteratorContext>& ctx,
const int64_t thread_index) { | #include "tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "parallel_interleave_dataset";
constexpr int kOpVersion = 2;
class ParallelInterleaveDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelInterleaveDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int64_t cycle_length, int64_t block_length,
const std::string& deterministic, int64_t buffer_output_elements,
int64_t prefetch_input_elements, FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
cycle_length_(cycle_length),
block_length_(block_length),
deterministic_(deterministic),
buffer_output_elements_(buffer_output_elements),
prefetch_input_elements_(prefetch_input_elements),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix(), params);
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {cycle_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {block_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {buffer_output_elements_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {prefetch_input_elements_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(ParallelInterleaveDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelInterleaveDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelInterleaveDatasetOp::kCycleLength);
input_names->emplace_back(ParallelInterleaveDatasetOp::kBlockLength);
input_names->emplace_back(
ParallelInterleaveDatasetOp::kBufferOutputElements);
input_names->emplace_back(
ParallelInterleaveDatasetOp::kPrefetchInputElements);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"deterministic", deterministic_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelInterleaveDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int64_t cycle_length_;
int64_t block_length_;
std::string deterministic_;
int64_t buffer_output_elements_;
int64_t prefetch_input_elements_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class ParallelInterleaveDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MakeTensorSliceDatasetFunc(
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
return FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{TensorSliceDatasetOp::kToutputTypes, output_types},
{TensorSliceDatasetOp::kOutputShapes, output_shapes}});
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
1,
DeterminismPolicy::kDeterministic,
1,
0,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
3,
1,
DeterminismPolicy::kNondeterministic,
3,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
5,
1,
DeterminismPolicy::kNondeterministic,
1,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(
TensorShape{3, 3, 1}, {"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
2,
DeterminismPolicy::kDeterministic,
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams EmptyInputParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{Tensor{}},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
2,
DeterminismPolicy::kNondeterministic,
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_FLOAT}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_FLOAT},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidCycleLengthParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
0,
1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidBlockLengthParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
-1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidBufferOutputElementsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
0,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidPrefetchInputElementsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
1,
-1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<ParallelInterleaveDatasetParams>>
GetNextTestCases() {
return {{ParallelInterleaveDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams3(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams5(),
CreateTensors<tstring>(
TensorShape{1},
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}}),
false},
{EmptyInputParams(),
CreateTensors<tstring>(TensorShape{1}, {}),
true}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams, GetNextTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelInterleaveDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({1})}));
}
std::vector<CardinalityTestCase<ParallelInterleaveDatasetParams>>
CardinalityTestCases() {
return {{ParallelInterleaveDatasetParams1(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams2(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams3(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams4(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams5(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams,
CardinalityTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ParallelInterleaveDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({1})}));
}
TEST_F(ParallelInterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelInterleaveDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelInterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelInterleaveDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams5(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape{1},
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}}),
false}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, InvalidArguments) {
std::vector<ParallelInterleaveDatasetParams> invalid_params = {
InvalidCycleLengthParams(), InvalidBlockLengthParams(),
InvalidBufferOutputElementsParams(),
InvalidPrefetchInputElementsParams()};
for (auto& dataset_params : invalid_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
}
} |
1,534 | cpp | tensorflow/tensorflow | parallel_map_dataset_op | tensorflow/core/kernels/data/parallel_map_dataset_op.cc | tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_MAP_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_MAP_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ParallelMapDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "ParallelMap";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kNumParallelCalls = "num_parallel_calls";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kUseInterOpParallelism =
"use_inter_op_parallelism";
static constexpr const char* const kDeterministic = "deterministic";
static constexpr const char* const kSloppy = "sloppy";
static constexpr const char* const kPreserveCardinality =
"preserve_cardinality";
explicit ParallelMapDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int op_version_;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
bool sloppy_;
bool preserve_cardinality_;
DeterminismPolicy deterministic_;
friend std::unique_ptr<DatasetBase> MakeDataServiceUncompressDataset(
DatasetBase* input, std::unique_ptr<CapturedFunction> captured_function,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes);
};
std::unique_ptr<DatasetBase> MakeDataServiceUncompressDataset(
DatasetBase* input, std::unique_ptr<CapturedFunction> captured_function,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes);
}
}
#endif
#include "tensorflow/core/kernels/data/parallel_map_dataset_op.h"
#include <cstddef>
#include <deque>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelMapDatasetOp::kDatasetType;
constexpr const char* const ParallelMapDatasetOp::kInputDataset;
constexpr const char* const ParallelMapDatasetOp::kOtherArguments;
constexpr const char* const
ParallelMapDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelMapDatasetOp::kFunc;
constexpr const char* const ParallelMapDatasetOp::kTarguments;
constexpr const char* const ParallelMapDatasetOp::kOutputTypes;
constexpr const char* const ParallelMapDatasetOp::kOutputShapes;
constexpr const char* const
ParallelMapDatasetOp::kUseInterOpParallelism;
constexpr const char* const ParallelMapDatasetOp::kDeterministic;
constexpr const char* const ParallelMapDatasetOp::kSloppy;
constexpr const char* const
ParallelMapDatasetOp::kPreserveCardinality;
namespace {
constexpr char kParallelMapDatasetV1[] = "ParallelMapDataset";
constexpr char kParallelMapDatasetV2[] = "ParallelMapDatasetV2";
constexpr char kInvocationResults[] = "invocation_results";
constexpr char kSize[] = "size";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kErrorCode[] = "code";
constexpr char kErrorMessage[] = "error_message";
constexpr int kStatsReportingPeriodMillis = 1000;
}
class ParallelMapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
int64_t num_parallel_calls, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality, int op_version)
: Dataset(DatasetContext(ctx), input, num_parallel_calls, output_types,
output_shapes, deterministic, std::move(captured_func),
preserve_cardinality, op_version) {}
Dataset(DatasetContext dataset_context, const DatasetBase* input,
int64_t num_parallel_calls, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality, int op_version)
: DatasetBase(std::move(dataset_context)),
input_(input),
num_parallel_calls_(num_parallel_calls),
output_types_(output_types),
output_shapes_(output_shapes),
deterministic_(deterministic),
preserve_cardinality_(preserve_cardinality),
captured_func_(std::move(captured_func)),
op_version_(op_version) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(ParallelMapDatasetOp::kDatasetType,
params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (preserve_cardinality_) {
return input_->Cardinality(options);
} else {
return kUnknownCardinality;
}
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(input_->Get(ctx, index, &args));
if (!instantiated_captured_func_) {
TF_RETURN_IF_ERROR(
captured_func_->Instantiate(InstantiateCapturedFunctionParams(ctx),
&instantiated_captured_func_));
}
return instantiated_captured_func_->RunInstantiated(args, out_tensors);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
Node* num_parallel_calls = nullptr;
if (op_version_ == 1) {
TF_RETURN_IF_ERROR(b->AddScalar(static_cast<int32>(num_parallel_calls_),
&num_parallel_calls));
} else {
TF_RETURN_IF_ERROR(
b->AddScalar(num_parallel_calls_, &num_parallel_calls));
}
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue f_attr;
b->BuildAttrValue(captured_func_->func(), &f_attr);
attrs.emplace_back(kFunc, f_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
attrs.emplace_back(kTarguments, other_arguments_types_attr);
AttrValue use_inter_op_parallelism_attr;
b->BuildAttrValue(captured_func_->use_inter_op_parallelism(),
&use_inter_op_parallelism_attr);
attrs.emplace_back(kUseInterOpParallelism, use_inter_op_parallelism_attr);
if (op_version_ == 1) {
AttrValue sloppy_attr;
b->BuildAttrValue(deterministic_.IsNondeterministic(), &sloppy_attr);
attrs.emplace_back(kSloppy, sloppy_attr);
}
if (op_version_ == 2) {
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
}
AttrValue preserve_cardinality_attr;
b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);
attrs.emplace_back(kPreserveCardinality, preserve_cardinality_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node),
std::make_pair(2, num_parallel_calls)},
{std::make_pair(1, other_arguments)},
attrs, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()),
preserve_cardinality_(params.dataset->preserve_cardinality_),
autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {}
~Iterator() override {
CancelThreads(true);
input_impl_.reset();
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override {
return deterministic_;
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
auto params = std::make_unique<IteratorContext::Params>(ctx);
params->cancellation_manager = cancellation_manager_.get();
auto iter_ctx = std::make_unique<IteratorContext>(*params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
iter_ctx.get(), this, prefix(), &input_impl_));
ctx->MergeCheckpoint(iter_ctx->checkpoint());
TF_RETURN_IF_ERROR(dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_));
if (ctx->warm_start() && !ctx->is_restoring()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<InvocationResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(&result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
RecordStop(ctx);
result->notification.WaitForNotification();
RecordStart(ctx);
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelMapConsume",
{{"element_id", result->uid}});
});
return ProcessResult(ctx, result, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
std::shared_ptr<model::Parameter> parameter;
if (num_parallel_calls_ &&
dataset()->num_parallel_calls_ == model::kAutotune) {
parameter = model::MakeParameter(
"parallelism", num_parallel_calls_, 1,
ctx->runner_threadpool_size(),
GetAutotuneDefaultParallelism(ctx));
} else {
parameter =
model::MakeParameter("parallelism", num_parallel_calls_, 1,
ctx->runner_threadpool_size());
}
std::optional<int64_t> estimated_element_size =
dataset()->GetEstimatedElementSize();
if (!estimated_element_size) {
VLOG(2) << absl::StrFormat(
"Cannot estimate the size of the output tensor because the "
"output shape of node %s(id:%d) is only partially known.",
args.name, args.id);
}
return model::MakeAsyncKnownRatioNode(
std::move(args),
1, {std::move(parameter)},
false, estimated_element_size);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
if (num_calls_ != 0) {
return errors::FailedPrecondition(
"Unexpected outstanding calls encountered.");
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrCat(kInvocationResults, "_", kSize),
invocation_results_.size()));
for (size_t i = 0; i < invocation_results_.size(); i++) {
const auto& result = *(invocation_results_[i]);
std::string element_prefix =
absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]");
TF_RETURN_IF_ERROR(
WriteStatusLocked(writer, element_prefix, result.status));
TF_RETURN_IF_ERROR(writer->WriteScalar(element_prefix, kSize,
result.return_values.size()));
for (size_t j = 0; j < result.return_values.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(element_prefix,
absl::StrCat("[", j, "]"),
result.return_values[j]));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kEndOfInput,
static_cast<int64_t>(result.end_of_input)));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return RestoreInput(ctx, reader, input_impl_);
}
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
DCHECK(invocation_results_.empty());
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
int64_t invocation_results_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), absl::StrCat(kInvocationResults, "_", kSize),
&invocation_results_size));
for (size_t i = 0; i < invocation_results_size; i++) {
invocation_results_.push_back(std::make_shared<InvocationResult>(ctx));
auto& result = *invocation_results_.back();
std::string element_prefix =
absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]");
TF_RETURN_IF_ERROR(
ReadStatusLocked(reader, element_prefix, &result.status));
size_t num_return_values;
{
int64_t size;
TF_RETURN_IF_ERROR(reader->ReadScalar(element_prefix, kSize, &size));
num_return_values = static_cast<size_t>(size);
if (num_return_values != size) {
return errors::InvalidArgument(
element_prefix, ",", kSize, ": ", size,
" is not a valid value of type size_t.");
}
}
result.return_values.reserve(num_return_values);
for (size_t j = 0; j < num_return_values; j++) {
result.return_values.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), element_prefix,
absl::StrCat("[", j, "]"),
&result.return_values.back()));
}
int64_t end_of_input;
TF_RETURN_IF_ERROR(
reader->ReadScalar(element_prefix, kEndOfInput, &end_of_input));
result.end_of_input = static_cast<bool>(end_of_input);
RecordBufferEnqueue(ctx, result.return_values);
result.notification.Notify();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
data::TraceMeMetadata result;
result.push_back(
std::make_pair("autotune", autotune_ ? "true" : "false"));
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct InvocationResult {
explicit InvocationResult(IteratorContext* ctx)
: uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
Notification notification;
Status status;
std::vector<Tensor> return_values;
bool end_of_input = false;
const int64_t uid;
MemoryCheckpoint checkpoint;
};
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto ctx_copy = std::make_shared<IteratorContext>(*ctx);
runner_thread_ = ctx->StartThread(
"tf_data_parallel_map",
std::bind(&Iterator::RunnerThread, this, ctx_copy));
if (ctx->stats_aggregator()) {
stats_thread_ = ctx->StartThread(
"tf_data_parallel_map_stats",
std::bind(&Iterator::StatsThread, this, ctx_copy));
}
}
}
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->notification.Notify();
cond_var_->notify_all();
}
void CallFunction(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelMapProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
result->status = input_impl_->GetNext(ctx.get(), &input_element,
&result->end_of_input);
result->checkpoint.Merge(ctx->checkpoint());
if (result->end_of_input || !result->status.ok()) {
CallCompleted(ctx, result);
return;
}
auto done = [this, ctx, result](Status status) {
if (!status.ok()) {
result->status = AddErrorContext(status);
}
RecordBufferEnqueue(ctx.get(), result->return_values);
CallCompleted(ctx, result);
};
if (dataset()->captured_func_->use_inter_op_parallelism()) {
instantiated_captured_func_->RunAsync(
ctx.get(), std::move(input_element), &result->return_values,
std::move(done), model_node());
} else {
auto fn = std::bind(
[this, ctx, result](std::vector<Tensor> input_element) {
return instantiated_captured_func_->Run(
ctx.get(), std::move(input_element), &result->return_values,
model_node());
},
std::move(input_element));
(*ctx->runner())(
[this, ctx, fn = std::move(fn), done = std::move(done)]() {
Status s;
if (IsRecording(ctx.get())) {
s = fn();
} else {
RecordStart(ctx.get());
s = fn();
RecordStop(ctx.get());
}
done(s);
});
}
}
Status ProcessResult(IteratorContext* ctx,
const std::shared_ptr<InvocationResult>& result,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) {
ctx->MergeCheckpoint(&result->checkpoint);
if (!result->end_of_input && result->status.ok()) {
*out_tensors = std::move(result->return_values);
RecordBufferDequeue(ctx, *out_tensors);
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(result->status)) {
if (preserve_cardinality_) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
result->status.message());
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
*end_of_sequence = result->end_of_input;
return result->status;
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
std::vector<std::shared_ptr<InvocationResult>> new_calls;
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
invocation_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
invocation_results_.push_back(
std::make_shared<InvocationResult>(ctx.get()));
new_calls.push_back(invocation_results_.back());
num_calls_++;
}
cond_var_->notify_all();
}
for (const auto& call : new_calls) {
CallFunction(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(std::shared_ptr<InvocationResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
if (!deterministic_) {
for (auto it = invocation_results_.begin();
it != invocation_results_.end(); ++it) {
if ((*it)->notification.HasBeenNotified() &&
(it == invocation_results_.begin() || !(*it)->end_of_input)) {
std::swap(*result, *it);
invocation_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else if (!invocation_results_.empty()) {
std::swap(*result, invocation_results_.front());
invocation_results_.pop_front();
cond_var_->notify_all();
return false;
}
return true;
}
void StatsThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
for (int64_t step = 0;; ++step) {
int num_calls;
int num_parallel_calls;
{
mutex_lock l(*mu_);
if (step != 0 && !cancelled_) {
cond_var_->wait_for(
l, std::chrono::milliseconds(kStatsReportingPeriodMillis));
}
if (cancelled_) {
return;
}
num_calls = num_calls_;
num_parallel_calls = num_parallel_calls_->value;
}
if (num_parallel_calls == 0) {
num_parallel_calls = 1;
}
ctx->stats_aggregator()->AddScalar(
stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),
static_cast<float>(num_calls) /
static_cast<float>(num_parallel_calls),
step);
}
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const std::string& prefix, const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix, absl::StrCat("_", kErrorCode),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix,
absl::StrCat("_", kErrorMessage),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader,
const std::string& prefix, Status* status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix, absl::StrCat("_", kErrorCode), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix, absl::StrCat("_", kErrorMessage), &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_; | #include "tensorflow/core/kernels/data/parallel_map_dataset_op.h"
#include <gtest/gtest.h>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_map_dataset";
constexpr int kOpVersion = 2;
class ParallelMapDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelMapDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int num_parallel_calls, FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes,
bool use_inter_op_parallelism, const std::string& deterministic,
bool preserve_cardinality, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
num_parallel_calls_(num_parallel_calls),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)),
use_inter_op_parallelism_(use_inter_op_parallelism),
deterministic_(deterministic),
preserve_cardinality_(preserve_cardinality) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix(), params);
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(ParallelMapDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelMapDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelMapDatasetOp::kNumParallelCalls);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"use_inter_op_parallelism", use_inter_op_parallelism_},
{"deterministic", deterministic_},
{"preserve_cardinality", preserve_cardinality_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelMapDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int num_parallel_calls_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
bool use_inter_op_parallelism_;
std::string deterministic_;
bool preserve_cardinality_;
};
class ParallelMapDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MapFunc(const string& func_name,
const DataType& dtype) {
return FunctionDefHelper::FunctionRef(func_name, {{"T", dtype}});
}
ParallelMapDatasetParams ParallelMapDatasetParams1() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
1,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams2() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
2,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams3() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
3,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams4() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
4,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams5() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
model::kAutotune,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams6() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
4,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams7() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
2,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams8() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
model::kAutotune,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams9() {
return ParallelMapDatasetParams(
BatchDatasetParams(RangeDatasetParams(0, 4, 1),
3,
false,
false,
{DT_INT64},
{PartialTensorShape({-1})},
"batch_dataset"),
{},
1,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({-1})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParamsWithInvalidNumParallelCalls() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
-4,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
std::vector<GetNextTestCase<ParallelMapDatasetParams>> GetNextTestCases() {
return {{ParallelMapDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
false},
{ParallelMapDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{ParallelMapDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
false},
{
ParallelMapDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{
ParallelMapDatasetParams9(),
{CreateTensor<int64_t>(TensorShape{3}, {0, 2, 4}),
CreateTensor<int64_t>(TensorShape{1}, {6})},
true}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,
GetNextTestCases())
TEST_F(ParallelMapDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelMapDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelMapDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelMapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ParallelMapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeHasValue) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
auto element_size = dataset_->GetEstimatedElementSize();
ASSERT_TRUE(element_size.has_value());
EXPECT_GT(element_size.value(), 0);
}
TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeNoValue) {
auto dataset_params = ParallelMapDatasetParams9();
TF_ASSERT_OK(Initialize(dataset_params));
EXPECT_FALSE(dataset_->GetEstimatedElementSize().has_value());
}
std::vector<CardinalityTestCase<ParallelMapDatasetParams>>
CardinalityTestCases() {
return {{ParallelMapDatasetParams1(),
kUnknownCardinality},
{ParallelMapDatasetParams2(),
4},
{ParallelMapDatasetParams3(),
kUnknownCardinality},
{ParallelMapDatasetParams4(),
kUnknownCardinality},
{ParallelMapDatasetParams5(),
4},
{ParallelMapDatasetParams6(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,
CardinalityTestCases())
TEST_F(ParallelMapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ParallelMapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ParallelMapDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelMapDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelMapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelMapDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
false},
{ParallelMapDatasetParams3(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{ParallelMapDatasetParams4(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
false},
{
ParallelMapDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelMapDatasetOpTest,
ParallelMapDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelMapDatasetOpTest, InvalidNumParallelCalls) {
auto dataset_params = ParallelMapDatasetParamsWithInvalidNumParallelCalls();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} |
1,535 | cpp | tensorflow/tensorflow | repeat_dataset_op | tensorflow/core/kernels/data/repeat_dataset_op.cc | tensorflow/core/kernels/data/repeat_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_REPEAT_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_REPEAT_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class RepeatDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Repeat";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kCount = "count";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit RepeatDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
};
}
}
#endif
#include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const RepeatDatasetOp::kDatasetType;
constexpr const char* const RepeatDatasetOp::kInputDataset;
constexpr const char* const RepeatDatasetOp::kCount;
constexpr const char* const RepeatDatasetOp::kOutputTypes;
constexpr const char* const RepeatDatasetOp::kOutputShapes;
namespace {
constexpr char kForeverRepeat[] = "ForeverRepeat";
constexpr char kEmptyRepeat[] = "EmptyRepeat";
constexpr char kFiniteRepeat[] = "FiniteRepeat";
constexpr char kCurIteration[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kUninitialized[] = "uninitialized";
constexpr int64_t kKnownRatio = 1;
std::string nested_prefix(const std::string& prefix, int64_t epoch) {
return strings::StrCat(prefix, "[", epoch, "]");
}
bool HasDataServiceInput(const DatasetBase* dataset) {
DCHECK(dataset != nullptr);
if (absl::StartsWith(dataset->type_string(), "DataServiceDataset")) {
return true;
}
std::vector<const DatasetBase*> inputs;
Status s = dataset->InputDatasets(&inputs);
if (!s.ok()) {
return false;
}
for (const DatasetBase* input : inputs) {
if (HasDataServiceInput(input)) {
return true;
}
}
return false;
}
class RepeatedSplitProvider : public SplitProvider {
public:
explicit RepeatedSplitProvider(std::unique_ptr<SplitProvider> split_provider,
int64_t count)
: split_provider_(std::move(split_provider)), count_(count) {}
int64_t Cardinality() const override {
if (split_provider_->Cardinality() == 0 || count_ == 0) {
return 0;
}
if (count_ < 0) {
return kInfiniteCardinality;
}
if (split_provider_->Cardinality() < 0) {
return split_provider_->Cardinality();
}
return split_provider_->Cardinality() * count_;
}
absl::Status GetNext(Tensor* split, bool* end_of_splits) override {
return split_provider_->GetNext(split, end_of_splits);
}
absl::Status Reset() override { return split_provider_->Reset(); }
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override {
return split_provider_->Save(full_name, writer);
}
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override {
return split_provider_->Restore(full_name, reader);
}
void Cancel() override { split_provider_->Cancel(); }
private:
const std::unique_ptr<SplitProvider> split_provider_;
const int64_t count_;
};
}
class RepeatDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
if (input_ != nullptr && !input_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
} else if (count <= 0) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("`repeat(", count,
")` does not support random access of tf.data "
"datasets."));
} else {
random_indexing_compatible_ = absl::OkStatus();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
if (count_ < 0) {
return std::make_unique<ForeverIterator>(ForeverIterator::Params{
this, name_utils::IteratorPrefix(kForeverRepeat, prefix)});
} else if (count_ == 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptyRepeat, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteRepeat, prefix)});
}
}
absl::Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
std::vector<std::unique_ptr<SplitProvider>> input_split_providers;
TF_RETURN_IF_ERROR(input_->MakeSplitProviders(&input_split_providers));
split_providers->clear();
for (auto& split_provider : input_split_providers) {
split_providers->push_back(std::make_unique<RepeatedSplitProvider>(
std::move(split_provider), count_));
}
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(RepeatDatasetOp::kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (count_ < 0) {
if (n == 0) {
return 0;
}
return kInfiniteCardinality;
}
if (count_ == 0) {
return 0;
}
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return count_ * n;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index % input_->Cardinality(), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
private:
class EmptyIterator : public DatasetIterator<Dataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class FiniteIterator : public DatasetIterator<Dataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (i_ < dataset()->count_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
ctx_with_index_mapper.MergeCheckpoint();
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
input_impl_.reset();
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(IndexMapperFn parent_index_mapper)
const override TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t input_cardinality = dataset()->input_->Cardinality();
int64_t repeat_count = i_;
return [parent_index_mapper, input_cardinality,
repeat_count](size_t element_position) -> absl::StatusOr<size_t> {
if (element_position >= input_cardinality) {
return element_position;
}
size_t repeated_element_position =
repeat_count * input_cardinality + element_position;
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(repeated_element_position));
return shuffled_element_position % input_cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
CardinalityOptions options;
options.set_compute_level(
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE);
const int64_t input_cardinality =
dataset()->input_->Cardinality(std::move(options));
i_ = *ctx->restored_element_count() / input_cardinality;
IteratorContext::Params params(ctx);
params.restored_element_count =
*ctx->restored_element_count() % input_cardinality;
params.index_mapper = GetIndexMapper(ctx->index_mapper());
IteratorContext ctx_with_restored_element_count(params);
return RestoreInput(&ctx_with_restored_element_count, reader,
input_impl_);
}
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(!input_empty)) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
class ForeverIterator : public DatasetIterator<Dataset> {
public:
explicit ForeverIterator(const Params& params)
: DatasetIterator<Dataset>(params),
has_data_service_input_(HasDataServiceInput(dataset())),
input_impl_(nullptr),
i_(0),
first_call_(true) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (!input_impl_) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
DCHECK(!*end_of_sequence || out_tensors->empty());
if (first_call_ && *end_of_sequence && ctx->split_providers().empty()) {
if (!has_data_service_input_) {
input_impl_.reset();
return absl::OkStatus();
}
}
first_call_ = false;
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
input_impl_.reset();
first_call_ = true;
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(input_empty)) {
input_impl_.reset();
first_call_ = true;
} else {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
first_call_ = false;
}
return absl::OkStatus();
}
private:
const bool has_data_service_input_;
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t i_ TF_GUARDED_BY(mu_);
bool first_call_ TF_GUARDED_BY(mu_);
};
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
RepeatDatasetOp::RepeatDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RepeatDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new Dataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RepeatDataset").Device(DEVICE_CPU),
RepeatDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "repeat_dataset";
class RepeatDatasetParams : public DatasetParams {
public:
template <typename T>
RepeatDatasetParams(T input_dataset_params, int64_t count,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
count_(count) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {count_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(RepeatDatasetOp::kInputDataset);
input_names->emplace_back(RepeatDatasetOp::kCount);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return RepeatDatasetOp::kDatasetType; }
private:
int64_t count_;
};
class RepeatDatasetOpTest : public DatasetOpsTestBase {};
RepeatDatasetParams FiniteRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
2,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams EmptyRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
0,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams ForeverRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 1}, {1, 2})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
-1,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<RepeatDatasetParams>> GetNextTestCases() {
return {{FiniteRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{}},
{
ForeverRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorGetNextOpTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorGetNextOpTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
if (dataset_->Cardinality() == kInfiniteCardinality) {
for (int i = 0; i < 100; ++i) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
for (const auto& tensor : out_tensors) {
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
if (expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
}
EXPECT_FALSE(end_of_sequence);
} else {
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
}
INSTANTIATE_TEST_SUITE_P(RepeatDatasetOpTest,
ParameterizedIteratorGetNextOpTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(RepeatDatasetOpTest, DatasetNodeName) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(RepeatDatasetOpTest, DatasetTypeString) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(RepeatDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<RepeatDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<RepeatDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<RepeatDatasetParams>>
DatasetCardinalityTestCases() {
return {{FiniteRepeatDatasetParams(), 4},
{EmptyRepeatDatasetParams(), 0},
{ForeverRepeatDatasetParams(),
kInfiniteCardinality}};
}
DATASET_CARDINALITY_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<RepeatDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<RepeatDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<RepeatDatasetParams>>
IteratorPrefixTestCases() {
return {
{FiniteRepeatDatasetParams(),
name_utils::IteratorPrefix(
"FiniteRepeat", FiniteRepeatDatasetParams().iterator_prefix())},
{EmptyRepeatDatasetParams(),
name_utils::IteratorPrefix(
"EmptyRepeat", EmptyRepeatDatasetParams().iterator_prefix())},
{ForeverRepeatDatasetParams(),
name_utils::IteratorPrefix(
"ForeverRepeat", ForeverRepeatDatasetParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<RepeatDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FiniteRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{0, 1, 3},
{}},
{
ForeverRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, Roundtrip) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = dataset_->Cardinality() == 0;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
std::vector<int> breakpoints = GetParam().breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration < breakpoint) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
cur_iteration++;
if (dataset_->Cardinality() == kInfiniteCardinality &&
expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
if (breakpoint >= dataset_->Cardinality()) {
if (dataset_->Cardinality() == kInfiniteCardinality) {
EXPECT_FALSE(end_of_sequence);
} else {
EXPECT_TRUE(end_of_sequence);
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
} else {
EXPECT_FALSE(end_of_sequence);
}
}
}
INSTANTIATE_TEST_SUITE_P(
RepeatDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} |
1,536 | cpp | tensorflow/tensorflow | optimize_dataset_op | tensorflow/core/kernels/data/optimize_dataset_op.cc | tensorflow/core/kernels/data/optimize_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_OPTIMIZE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_OPTIMIZE_DATASET_OP_H_
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/platform/platform.h"
#if !defined(IS_MOBILE_PLATFORM)
namespace tensorflow {
namespace data {
class OptimizeDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Optimize";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOptimizations = "optimizations";
static constexpr const char* const kOptimizationsEnabled =
"optimizations_enabled";
static constexpr const char* const kOptimizationsDisabled =
"optimizations_disabled";
static constexpr const char* const kOptimizationsDefault =
"optimizations_default";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kOptimizationConfigs =
"optimization_configs";
static constexpr const char* const kOptimizeDatasetV1 = "OptimizeDataset";
static constexpr const char* const kOptimizeDatasetV2 = "OptimizeDatasetV2";
static void MakeDatasetFromOptions(
OpKernelContext* ctx, DatasetBase* input,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase** output);
explicit OptimizeDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
absl::flat_hash_set<tstring> optimization_configs_;
int op_version_ = 0;
};
}
}
#else
namespace tensorflow {
namespace data {
class OptimizeDatasetOp : public UnaryDatasetOpKernel {
public:
static void MakeDatasetFromOptions(
OpKernelContext* ctx, DatasetBase* input,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase** output);
explicit OptimizeDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
};
}
}
#endif
#endif
#include "tensorflow/core/kernels/data/optimize_dataset_op.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <map>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
constexpr const char* const OptimizeDatasetOp::kDatasetType;
constexpr const char* const OptimizeDatasetOp::kInputDataset;
constexpr const char* const OptimizeDatasetOp::kOptimizations;
constexpr const char* const
OptimizeDatasetOp::kOptimizationsEnabled;
constexpr const char* const
OptimizeDatasetOp::kOptimizationsDisabled;
constexpr const char* const
OptimizeDatasetOp::kOptimizationsDefault;
constexpr const char* const OptimizeDatasetOp::kOutputTypes;
constexpr const char* const OptimizeDatasetOp::kOutputShapes;
constexpr const char* const
OptimizeDatasetOp::kOptimizationConfigs;
constexpr const char* const OptimizeDatasetOp::kOptimizeDatasetV1;
constexpr const char* const OptimizeDatasetOp::kOptimizeDatasetV2;
namespace {
void MakeDatasetHelper(OpKernelContext* ctx,
absl::flat_hash_set<tstring>& optimizations,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase* input, DatasetBase** output) {
std::vector<string> graduated_experiments = {
"disable_intra_op_parallelism",
"use_private_thread_pool"
};
for (auto& experiment : graduated_experiments) {
if (!optimizations.contains(experiment)) {
optimizations.insert(experiment);
}
VLOG(1) << "The graduated experiment \"" << experiment << "\" is applied.";
}
if (optimizations.empty()) {
*output = input;
input->Ref();
return;
}
auto config_factory = [&optimizations, &optimization_configs]() {
return CreateRewriterConfig(optimizations, optimization_configs);
};
core::RefCountPtr<DatasetBase> rewritten;
Status s = RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten);
*output = rewritten.release();
if (errors::IsDeadlineExceeded(s)) {
LOG(WARNING) << s.ToString();
*output = input;
input->Ref();
return;
}
OP_REQUIRES_OK(ctx, s);
}
}
void OptimizeDatasetOp::MakeDatasetFromOptions(
OpKernelContext* ctx, DatasetBase* input,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase** output) {
auto experiments = GetExperiments();
LogAndRecordExperiments(experiments);
auto optimizations =
SelectOptimizations(experiments, optimizations_enabled,
optimizations_disabled, optimizations_default);
MakeDatasetHelper(ctx, optimizations, optimization_configs, input, output);
}
OptimizeDatasetOp::OptimizeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kOptimizeDatasetV1) {
op_version_ = 1;
} else if (op_name == kOptimizeDatasetV2) {
op_version_ = 2;
}
std::vector<tstring> optimization_configs;
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kOptimizationConfigs, &optimization_configs));
optimization_configs_.insert(optimization_configs.begin(),
optimization_configs.end());
}
void OptimizeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
absl::flat_hash_set<tstring> optimizations;
if (op_version_ == 1) {
std::vector<tstring> optimizations_enabled;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kOptimizations,
&optimizations_enabled));
optimizations.insert(optimizations_enabled.begin(),
optimizations_enabled.end());
} else if (op_version_ == 2) {
std::vector<tstring> optimizations_enabled, optimizations_disabled,
optimizations_default;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kOptimizationsEnabled,
&optimizations_enabled));
OP_REQUIRES_OK(ctx,
ParseVectorArgument<tstring>(ctx, kOptimizationsDisabled,
&optimizations_disabled));
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kOptimizationsDefault,
&optimizations_default));
auto experiments = GetExperiments();
LogAndRecordExperiments(experiments);
optimizations = SelectOptimizations(
experiments,
{optimizations_enabled.begin(), optimizations_enabled.end()},
{optimizations_disabled.begin(), optimizations_disabled.end()},
{optimizations_default.begin(), optimizations_default.end()});
}
MakeDatasetHelper(
ctx, optimizations,
{optimization_configs_.begin(), optimization_configs_.end()}, input,
output);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("OptimizeDataset").Device(DEVICE_CPU),
OptimizeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("OptimizeDatasetV2").Device(DEVICE_CPU),
OptimizeDatasetOp);
}
}
}
#else
namespace tensorflow {
namespace data {
void OptimizeDatasetOp::MakeDatasetFromOptions(
OpKernelContext* ctx, DatasetBase* input,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default,
const absl::flat_hash_set<tstring>& optimization_configs,
DatasetBase** output) {
input->Ref();
*output = input;
}
OptimizeDatasetOp::OptimizeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void OptimizeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
input->Ref();
*output = input;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("OptimizeDataset").Device(DEVICE_CPU),
OptimizeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("OptimizeDatasetV2").Device(DEVICE_CPU),
OptimizeDatasetOp);
}
}
}
#endif | #include "tensorflow/core/kernels/data/optimize_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "optimize_dataset";
constexpr char kNoopElimination[] = "noop_elimination";
class OptimizeDatasetParams : public DatasetParams {
public:
template <typename T>
OptimizeDatasetParams(T input_dataset_params, string optimizations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
std::vector<tstring> optimization_configs,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
optimizations_(std::move(optimizations)),
optimization_configs_(std::move(optimization_configs)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<tstring>(TensorShape({1}), {optimizations_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {OptimizeDatasetOp::kInputDataset,
OptimizeDatasetOp::kOptimizations};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{OptimizeDatasetOp::kOutputShapes, output_shapes_},
{OptimizeDatasetOp::kOutputTypes, output_dtypes_},
{OptimizeDatasetOp::kOptimizationConfigs, optimization_configs_}};
return absl::OkStatus();
}
string dataset_type() const override {
return OptimizeDatasetOp::kDatasetType;
}
private:
string optimizations_;
std::vector<tstring> optimization_configs_;
};
class OptimizeDatasetOpTest : public DatasetOpsTestBase {};
TEST_F(OptimizeDatasetOpTest, NoopElimination) {
auto take_dataset_parmas =
TakeDatasetParams(RangeDatasetParams(-3, 3, 1),
-3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
auto optimize_dataset_params =
OptimizeDatasetParams(std::move(take_dataset_parmas),
{kNoopElimination},
{DT_INT64},
{PartialTensorShape({})},
{},
kNodeName);
std::vector<Tensor> expected_outputs = CreateTensors<int64_t>(
TensorShape({}), {{-3}, {-2}, {-1}, {0}, {1}, {2}});
TF_ASSERT_OK(Initialize(optimize_dataset_params));
TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true));
}
}
}
} |
1,537 | cpp | tensorflow/tensorflow | iterator_ops | tensorflow/core/kernels/data/iterator_ops.cc | tensorflow/core/kernels/data/iterator_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_ITERATOR_OPS_H_
#define TENSORFLOW_CORE_KERNELS_DATA_ITERATOR_OPS_H_
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/metric_utils.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/data/unbounded_thread_pool.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function_handle_cache.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
namespace tensorflow {
namespace data {
class IteratorResource : public ResourceBase {
public:
IteratorResource(Env* env, const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes,
std::unique_ptr<DeviceMgr> device_mgr,
std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* flr);
~IteratorResource() override;
Status GetNext(OpKernelContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence);
absl::Status GetModelProto(std::string& model_proto);
Status Save(OpKernelContext* ctx, ExternalStatePolicy external_state_policy,
IteratorStateWriter* writer);
Status Restore(OpKernelContext* ctx, IteratorStateReader* reader);
Status SetIteratorFromDataset(OpKernelContext* ctx,
const DatasetBase* dataset);
string DebugString() const override { return "Iterator resource"; }
const DataTypeVector& output_dtypes() const { return output_dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const {
return output_shapes_;
}
private:
class State {
public:
State(std::shared_ptr<FunctionLibraryDefinition> flib_def,
std::shared_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* flr,
std::unique_ptr<DatasetBaseIterator> iterator)
: flib_def_(std::move(flib_def)),
flr_(flr),
pflr_(std::move(pflr)),
function_handle_cache_(std::make_unique<FunctionHandleCache>(flr)),
iterator_(std::move(iterator)),
id_registry_(std::make_shared<MemoryCheckpoint::IdRegistry>()),
checkpoint_(MemoryCheckpoint::CreateRootCheckpoint(id_registry_)) {}
~State() { cancellation_manager_.StartCancel(); }
std::shared_ptr<FunctionLibraryDefinition> flib_def() { return flib_def_; }
FunctionLibraryRuntime* flr() { return flr_; }
std::shared_ptr<ProcessFunctionLibraryRuntime> pflr() { return pflr_; }
FunctionHandleCache* function_handle_cache() {
return function_handle_cache_.get();
}
ResourceMgr* resource_mgr() { return &resource_mgr_; }
CancellationManager* cancellation_manager() {
return &cancellation_manager_;
}
DatasetBaseIterator* iterator() { return iterator_.get(); }
std::shared_ptr<model::Model> model() { return model_; }
const MemoryCheckpoint& checkpoint() const { return checkpoint_; }
DatasetBase* dataset() { return dataset_.get(); }
void DowncastAndSetIteratorAndDataset(std::unique_ptr<IteratorBase> it,
const DatasetBase* dataset);
void MergeCheckpoint(MemoryCheckpoint* other);
void SetModel(std::shared_ptr<model::Model> model);
std::shared_ptr<MemoryCheckpoint::IdRegistry> id_registry() {
return id_registry_;
}
private:
std::shared_ptr<FunctionLibraryDefinition> flib_def_;
FunctionLibraryRuntime* flr_ = nullptr;
std::shared_ptr<ProcessFunctionLibraryRuntime> pflr_;
std::unique_ptr<FunctionHandleCache> function_handle_cache_;
ResourceMgr resource_mgr_;
CancellationManager cancellation_manager_;
std::unique_ptr<DatasetBaseIterator> iterator_;
core::RefCountPtr<DatasetBase> dataset_;
std::shared_ptr<MemoryCheckpoint::IdRegistry> id_registry_;
MemoryCheckpoint checkpoint_;
std::shared_ptr<model::Model> model_;
};
IteratorMetricsCollector metrics_collector_;
std::shared_ptr<TfDatazMetricsCollector> tf_dataz_metrics_collector_;
UnboundedThreadPool unbounded_thread_pool_;
mutex mu_;
const Env& env_;
const std::unique_ptr<DeviceMgr> device_mgr_ TF_GUARDED_BY(mu_);
std::shared_ptr<State> iterator_state_ TF_GUARDED_BY(mu_);
const DataTypeVector output_dtypes_;
const std::vector<PartialTensorShape> output_shapes_;
};
class IteratorHandleOp : public OpKernel {
public:
explicit IteratorHandleOp(OpKernelConstruction* ctx);
~IteratorHandleOp() override;
void Compute(OpKernelContext* context) override TF_LOCKS_EXCLUDED(mu_);
private:
Status VerifyResource(IteratorResource* resource);
FunctionLibraryRuntime* CreatePrivateFLR(
OpKernelContext* ctx, std::unique_ptr<DeviceMgr>* device_mgr,
std::unique_ptr<FunctionLibraryDefinition>* flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime>* pflr);
mutex mu_;
ContainerInfo cinfo_;
IteratorResource* resource_ TF_GUARDED_BY(mu_) = nullptr;
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
const int graph_def_version_;
string name_;
};
class AnonymousIteratorHandleOp : public AnonymousResourceOp<IteratorResource> {
public:
explicit AnonymousIteratorHandleOp(OpKernelConstruction* context);
private:
string name() override;
Status CreateResource(OpKernelContext* ctx,
std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* lib,
IteratorResource** resource) override;
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
const int graph_def_version_;
};
class HybridAsyncOpKernel : public AsyncOpKernel {
public:
HybridAsyncOpKernel(OpKernelConstruction* ctx,
const char* background_worker_name);
void Compute(OpKernelContext* ctx) final;
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) final;
protected:
virtual Status DoCompute(OpKernelContext* ctx) = 0;
private:
BackgroundWorker background_worker_;
};
class MakeIteratorOp : public HybridAsyncOpKernel {
public:
explicit MakeIteratorOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_make_iterator") {}
protected:
Status DoCompute(OpKernelContext* ctx) override;
};
class IteratorGetNextOp : public HybridAsyncOpKernel {
public:
explicit IteratorGetNextOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_iterator_get_next") {
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
}
AsyncOpKernel* AsAsync() override;
protected:
Status DoCompute(OpKernelContext* ctx) override;
private:
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
class IteratorGetModelProtoOp : public HybridAsyncOpKernel {
public:
explicit IteratorGetModelProtoOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(
ctx,
"tf_data_iterator_get_model_proto") {}
protected:
Status DoCompute(OpKernelContext* ctx) override;
};
class DeleteIteratorOp : public HybridAsyncOpKernel {
public:
explicit DeleteIteratorOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_delete_iterator") {}
protected:
Status DoCompute(OpKernelContext* ctx) override;
};
class IteratorGetNextAsOptionalOp : public HybridAsyncOpKernel {
public:
explicit IteratorGetNextAsOptionalOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_iterator_get_next_as_optional") {
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
}
protected:
Status DoCompute(OpKernelContext* ctx) override;
private:
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
class IteratorToStringHandleOp : public OpKernel {
public:
explicit IteratorToStringHandleOp(OpKernelConstruction* ctx)
: OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override;
};
class IteratorFromStringHandleOp : public OpKernel {
public:
explicit IteratorFromStringHandleOp(OpKernelConstruction* ctx);
void Compute(OpKernelContext* ctx) override;
private:
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
};
class SerializeIteratorOp : public OpKernel {
public:
static constexpr const char* const kExternalStatePolicy =
"external_state_policy";
explicit SerializeIteratorOp(OpKernelConstruction* ctx);
void Compute(OpKernelContext* ctx) override;
private:
ExternalStatePolicy external_state_policy_ = ExternalStatePolicy::POLICY_WARN;
};
class DeserializeIteratorOp : public OpKernel {
public:
explicit DeserializeIteratorOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override;
};
}
}
#endif
#include "tensorflow/core/kernels/data/iterator_ops.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/activity_watcher/activity_utils.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/finalization_utils.h"
#include "tensorflow/core/data/metric_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/tf_data_memory_logger.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/model.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/kernels/data/optional_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
const char kAnonymousIterator[] = "AnonymousIterator";
const char kAnonymousIteratorV2[] = "AnonymousIteratorV2";
const char kAnonymousIteratorV3[] = "AnonymousIteratorV3";
const char kIteratorVariantTypeName[] = "tensorflow::Iterator";
const char kOutputShapes[] = "output_shapes";
const char kOutputTypes[] = "output_types";
bool SymbolicCheckpointEnabled(const Options& options) {
return options.optional_symbolic_checkpoint_case() ==
Options::kSymbolicCheckpoint &&
options.symbolic_checkpoint();
}
}
constexpr const char* const
SerializeIteratorOp::kExternalStatePolicy;
IteratorResource::IteratorResource(
Env* env, const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes,
std::unique_ptr<DeviceMgr> device_mgr,
std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* flr)
: metrics_collector_(flr->device()->device_type(), *env),
unbounded_thread_pool_(env, "tf_data_iterator_resource"),
env_(*env),
device_mgr_(std::move(device_mgr)),
iterator_state_(std::make_shared<State>(std::move(flib_def),
std::move(pflr), flr,
nullptr)),
output_dtypes_(output_dtypes),
output_shapes_(output_shapes) {
VLOG(2) << "creating iterator resource";
}
IteratorResource::~IteratorResource() {
TfDatazMetricsRegistry::Deregister(tf_dataz_metrics_collector_);
VLOG(2) << "destroying iterator resource";
}
Status IteratorResource::GetNext(OpKernelContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"GetNext() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this iterator "
"before getting the next element.");
}
auto* dataset = captured_state->dataset();
IteratorContext::Params params(ctx);
params.cancellation_manager = captured_state->cancellation_manager();
params.flr = captured_state->flr();
params.function_handle_cache = captured_state->function_handle_cache();
params.resource_mgr = captured_state->resource_mgr();
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = captured_state->id_registry();
params.warm_start = dataset->options().warm_start();
params.model = captured_state->model();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(std::move(params));
const absl::Time start_time = metrics_collector_.RecordStart();
auto status = iterator->GetNext(&iter_ctx, out_tensors, end_of_sequence);
metrics_collector_.RecordStop(start_time, *out_tensors);
const int64_t get_next_latency_micros =
env_.NowMicros() - absl::ToUnixMicros(start_time);
tf_dataz_metrics_collector_->RecordGetNextLatency(get_next_latency_micros);
captured_state->MergeCheckpoint(iter_ctx.checkpoint());
return status;
}
absl::Status IteratorResource::GetModelProto(std::string& model_proto) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return absl::FailedPreconditionError(
"GetModelProto() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this iterator "
"before getting the next element.");
}
model::ModelProto proto;
if (auto model = captured_state->model(); model) {
TF_RETURN_IF_ERROR(model->ToProto(&proto));
} else {
return absl::NotFoundError(
"Cannot find this iterator's analytical model. Did you disable "
"autotune for the dataset used to create this iterator? See more "
"information at "
"https:
"AutotuneOptions .");
}
model_proto = proto.SerializeAsString();
return absl::OkStatus();
}
Status IteratorResource::Save(OpKernelContext* ctx,
ExternalStatePolicy external_state_policy,
IteratorStateWriter* writer) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"Save() failed because the iterator has not been initialized. Ensure "
"that you have run the initializer operation for this iterator before "
"saving it.");
}
auto* dataset = captured_state->dataset();
if (SymbolicCheckpointEnabled(dataset->options())) {
const auto& checkpoint = captured_state->checkpoint();
if (!checkpoint.GetStatus().ok()) {
LOG(WARNING) << "Symbolic checkpointing failed: "
<< checkpoint.GetStatus();
return checkpoint.GetStatus();
}
LOG(INFO) << "Saving symbolic checkpoint";
TF_RETURN_IF_ERROR(checkpoint.Save(writer));
return absl::OkStatus();
}
SerializationContext::Params params(ctx);
params.external_state_policy = external_state_policy;
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
SerializationContext serialization_ctx(params);
return iterator->Save(&serialization_ctx, writer);
}
Status IteratorResource::Restore(OpKernelContext* ctx,
IteratorStateReader* reader) {
const DatasetBase* dataset;
std::shared_ptr<State> new_state;
const DatasetBase* input_dataset;
{
tf_shared_lock l(mu_);
auto iterator = iterator_state_->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"Restore() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this "
"iterator before restoring it.");
}
dataset = iterator->dataset();
dataset->Ref();
new_state =
std::make_shared<State>(iterator_state_->flib_def(),
iterator_state_->pflr(), iterator_state_->flr(),
nullptr);
input_dataset = iterator_state_->dataset();
iterator_state_->cancellation_manager()->StartCancel();
}
core::ScopedUnref scoped_unref(dataset);
IteratorContext::Params params(ctx);
params.cancellation_manager = new_state->cancellation_manager();
params.flr = new_state->flr();
params.function_handle_cache = new_state->function_handle_cache();
params.resource_mgr = new_state->resource_mgr();
params.symbolic_checkpoint =
SymbolicCheckpointEnabled(input_dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = new_state->id_registry();
params.warm_start = dataset->options().warm_start();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(IteratorContext(std::move(params)));
std::unique_ptr<IteratorBase> iterator_base;
TF_RETURN_IF_ERROR(dataset->MakeIteratorFromCheckpoint(
&iter_ctx, "Iterator", reader, &iterator_base));
new_state->DowncastAndSetIteratorAndDataset(std::move(iterator_base),
input_dataset);
new_state->MergeCheckpoint(iter_ctx.checkpoint());
mutex_lock l(mu_);
std::swap(iterator_state_, new_state);
return absl::OkStatus();
}
Status IteratorResource::SetIteratorFromDataset(OpKernelContext* ctx,
const DatasetBase* dataset) {
std::shared_ptr<State> new_state;
{
tf_shared_lock l(mu_);
new_state =
std::make_shared<State>(iterator_state_->flib_def(),
iterator_state_->pflr(), iterator_state_->flr(),
nullptr);
}
IteratorContext::Params params(ctx);
params.cancellation_manager = new_state->cancellation_manager();
params.flr = new_state->flr();
params.function_handle_cache = new_state->function_handle_cache();
params.resource_mgr = new_state->resource_mgr();
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = new_state->id_registry();
params.warm_start = dataset->options().warm_start();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(IteratorContext(std::move(params)));
std::unique_ptr<IteratorBase> iterator;
if (ctx->function_library()->device()->device_type() == DEVICE_CPU) {
DatasetBase* finalized_dataset;
TF_ASSIGN_OR_RETURN(finalized_dataset, GetFinalizedDataset(ctx, dataset));
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(&iter_ctx,
nullptr,
"Iterator", &iterator));
} else {
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iter_ctx,
nullptr, "Iterator",
&iterator));
}
TF_RETURN_IF_ERROR(
VerifyTypesMatch(output_dtypes_, iterator->output_dtypes()));
TF_RETURN_IF_ERROR(
VerifyShapesCompatible(output_shapes_, iterator->output_shapes()));
new_state->DowncastAndSetIteratorAndDataset(std::move(iterator), dataset);
new_state->SetModel(iter_ctx.model());
new_state->MergeCheckpoint(iter_ctx.checkpoint());
mutex_lock l(mu_);
std::swap(iterator_state_, new_state);
tf_dataz_metrics_collector_ = std::make_shared<TfDatazMetricsCollector>(
env_, iterator_state_->iterator(), iterator_state_->model());
EnsureIteratorMemoryLoggerStarted();
TfDatazMetricsRegistry::Register(tf_dataz_metrics_collector_);
return absl::OkStatus();
}
void IteratorResource::State::DowncastAndSetIteratorAndDataset(
std::unique_ptr<IteratorBase> it, const DatasetBase* dataset) {
iterator_.reset(static_cast<DatasetBaseIterator*>(it.release()));
if (dataset) {
dataset->Ref();
dataset_.reset(const_cast<DatasetBase*>(dataset));
}
}
void IteratorResource::State::MergeCheckpoint(MemoryCheckpoint* other) {
if (SymbolicCheckpointEnabled(dataset_->options())) {
checkpoint_.Merge(other);
}
}
void IteratorResource::State::SetModel(std::shared_ptr<model::Model> model) {
model_ = model;
}
namespace {
class IteratorVariantSerializer {
public:
IteratorVariantSerializer() = default;
Status InitializeFromIterator(OpKernelContext* ctx,
ExternalStatePolicy external_state_policy,
IteratorResource* iterator_resource) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(
iterator_resource->Save(ctx, external_state_policy, &writer));
std::vector<std::unique_ptr<VariantTensorData>> data;
writer.ReleaseData(&data);
variants_.clear();
variants_.reserve(data.size());
for (auto& it : data) {
IteratorStateVariant v;
TF_RETURN_IF_ERROR(v.InitializeFromVariantData(std::move(it)));
variants_.push_back(v);
}
num_tensors_ = variants_.size();
can_serialize_ = true;
return absl::OkStatus();
}
Status InitFromTensor(const Tensor* serialized_t) {
int64_t num_tensors = serialized_t->dim_size(0);
auto serialized_vec = serialized_t->vec<Variant>();
std::vector<const VariantTensorData*> data;
data.reserve(num_tensors);
for (int i = 0; i < num_tensors; ++i) {
auto* w = serialized_vec(i).get<IteratorStateVariant>();
if (!w) {
return errors::Internal(
"Cannot initialize an iterator from tensor ",
serialized_vec(i).DebugString(),
". Expected a variant tensor of type IteratorStateVariant");
}
data.push_back(w->GetData());
}
reader_ = std::make_unique<VariantTensorDataReader>(data);
num_tensors_ = data.size();
return absl::OkStatus();
}
int64_t NumTensors() { return num_tensors_; }
Status Serialize(Tensor* serialized) {
if (!can_serialize_) {
return errors::InvalidArgument(
"Please call InitializeFromIterator before calling Serialize.");
}
int64_t size = variants_.size();
for (int64_t i = 0; i < size; ++i) {
if (variants_[i].GetData() == nullptr) {
return errors::Internal(
"Cannot serialize an empty IteratorStateVariant");
}
serialized->vec<Variant>()(i) = variants_[i];
}
return absl::OkStatus();
}
IteratorStateReader* GetReader() { return reader_.get(); }
private:
bool can_serialize_ = false;
int64_t num_tensors_;
std::vector<IteratorStateVariant> variants_;
std::unique_ptr<IteratorStateReader> reader_;
};
}
IteratorHandleOp::IteratorHandleOp(OpKernelConstruction* ctx)
: OpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("shared_name", &name_));
}
IteratorHandleOp::~IteratorHandleOp() {
if (resource_ != nullptr) {
resource_->Unref();
if (cinfo_.resource_is_private_to_kernel()) {
if (!cinfo_.resource_manager()
->template Delete<IteratorResource>(cinfo_.container(),
cinfo_.name())
.ok()) {
}
}
}
}
void IteratorHandleOp::Compute(OpKernelContext* context)
TF_LOCKS_EXCLUDED(mu_) {
{
mutex_lock l(mu_);
if (resource_ == nullptr) {
FunctionLibraryRuntime* flr;
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr); | #include "tensorflow/core/kernels/data/iterator_ops.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::monitoring::testing::Histogram;
class IteratorOpsTest : public DatasetOpsTestBase {
public:
absl::StatusOr<core::RefCountPtr<IteratorResource>> GetIteratorResource() {
FunctionLibraryRuntime* flr = nullptr;
std::unique_ptr<DeviceMgr> device_mgr;
std::unique_ptr<FunctionLibraryDefinition> flib_def;
std::unique_ptr<ProcessFunctionLibraryRuntime> plfr;
TF_RETURN_IF_ERROR(dataset_ctx_->function_library()->Clone(
&flib_def, &plfr, &flr, true));
core::RefCountPtr<IteratorResource> iter_resource(
new IteratorResource(dataset_ctx_->env(), dataset_->output_dtypes(),
dataset_->output_shapes(), std::move(device_mgr),
std::move(flib_def), std::move(plfr), flr));
TF_RETURN_IF_ERROR(
iter_resource->SetIteratorFromDataset(dataset_ctx_.get(), dataset_));
return iter_resource;
}
absl::StatusOr<std::vector<std::vector<Tensor>>> GetIteratorOutput(
IteratorResource& iterator) {
std::vector<std::vector<Tensor>> output;
for (bool end_of_sequence = false; !end_of_sequence;) {
std::vector<Tensor> tensors;
TF_RETURN_IF_ERROR(
iterator.GetNext(dataset_ctx_.get(), &tensors, &end_of_sequence));
if (end_of_sequence) {
break;
}
output.push_back(std::move(tensors));
}
return output;
}
};
TEST_F(IteratorOpsTest, CollectMetrics) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<Histogram> iterator_gap("/tensorflow/data/iterator_gap");
CellReader<int64_t> throughput("/tensorflow/data/bytes_fetched");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_FLOAT_EQ(iterator_gap.Delta().num(), 0.0);
EXPECT_EQ(throughput.Delta(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0.0);
EXPECT_EQ(iterator_busy.Delta(), 0.0);
RangeDatasetParams dataset_params = RangeDatasetParams(0, 10, 3);
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK_AND_ASSIGN(core::RefCountPtr<IteratorResource> iter_resource,
GetIteratorResource());
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::vector<Tensor>> output,
GetIteratorOutput(*iter_resource));
EXPECT_EQ(output.size(), 4);
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 5.0);
EXPECT_GT(latency_histogram.sum(), 0.0);
Histogram iterator_gap_histogram = iterator_gap.Delta();
EXPECT_FLOAT_EQ(iterator_gap_histogram.num(), 5.0);
EXPECT_GT(iterator_gap_histogram.sum(), 0.0);
EXPECT_GT(throughput.Delta(), 0);
EXPECT_GT(iterator_lifetime.Delta(), 0);
EXPECT_GT(iterator_busy.Delta(), 0.0);
}
}
}
} |
1,538 | cpp | tensorflow/tensorflow | finalize_dataset_op | tensorflow/core/kernels/data/finalize_dataset_op.cc | tensorflow/core/kernels/data/finalize_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_FINALIZE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_FINALIZE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
namespace data {
class FinalizeDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Finalize";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kHasCapturedRef = "has_captured_ref";
explicit FinalizeDatasetOp(OpKernelConstruction* ctx);
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
bool has_captured_ref_;
};
class FinalizeDatasetNoopOp : public UnaryDatasetOpKernel {
public:
explicit FinalizeDatasetNoopOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override {
LOG(WARNING) << "FinalizeDataset is only supported on CPU. Using it on "
"devices other than CPU has no effect.";
input->Ref();
*output = input;
}
};
}
}
#endif
#include "tensorflow/core/kernels/data/finalize_dataset_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/experimental/threadpool_dataset_op.h"
#include "tensorflow/core/kernels/data/model_dataset_op.h"
#include "tensorflow/core/kernels/data/optimize_dataset_op.h"
namespace tensorflow {
namespace data {
constexpr const char* const FinalizeDatasetOp::kDatasetType;
constexpr const char* const FinalizeDatasetOp::kInputDataset;
constexpr const char* const FinalizeDatasetOp::kOutputTypes;
constexpr const char* const FinalizeDatasetOp::kOutputShapes;
constexpr const char* const FinalizeDatasetOp::kHasCapturedRef;
namespace {
void GetModelDatasetParams(const Options& options,
model::AutotuneAlgorithm* algorithm,
int64_t* cpu_budget, int64_t* ram_budget) {
*algorithm = model::AutotuneAlgorithm::HILL_CLIMB;
*cpu_budget = options.autotune_options().cpu_budget();
*ram_budget = options.autotune_options().ram_budget();
}
void MakeDatasetHelper(OpKernelContext* ctx, bool has_captured_ref,
DatasetBase* input, DatasetBase** output) {
*output = input;
input->Ref();
const Options& options = input->options();
if (ShouldConfigureMaxIntraOpParallelism(options)) {
experimental::MaxIntraOpParallelismDatasetOp::MakeDatasetFromOptions(
ctx, input, options.threading_options().max_intra_op_parallelism(),
output);
input->Unref();
input = *output;
}
if (ShouldUsePrivateThreadPool(options)) {
experimental::PrivateThreadPoolDatasetOp::MakeDatasetFromOptions(
ctx, input, options.threading_options().private_threadpool_size(),
output);
input->Unref();
input = *output;
}
if (ShouldUseAutotuning(options)) {
model::AutotuneAlgorithm algorithm;
int64_t cpu_budget;
int64_t ram_budget;
GetModelDatasetParams(options, &algorithm, &cpu_budget, &ram_budget);
ModelDatasetOp::MakeDatasetFromOptions(ctx, input, algorithm, cpu_budget,
ram_budget, output);
input->Unref();
input = *output;
}
absl::flat_hash_set<tstring> optimizations_enabled;
absl::flat_hash_set<tstring> optimizations_disabled;
absl::flat_hash_set<tstring> optimizations_default;
GetOptimizations(options, &optimizations_enabled, &optimizations_disabled,
&optimizations_default);
if (ShouldApplyOptimizations(options, optimizations_enabled,
optimizations_default)) {
if (has_captured_ref &&
(!optimizations_enabled.empty() || !optimizations_default.empty())) {
LOG(WARNING)
<< "tf.data graph rewrites are not compatible with reference "
"variables. The following rewrites will be disabled: "
<< absl::StrJoin(optimizations_enabled, ", ") << ", "
<< absl::StrJoin(optimizations_default, ", ") << ". "
<< "To enable rewrites, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program.";
} else {
auto optimization_configs = CreateGraphRewriteConfigs(options);
OptimizeDatasetOp::MakeDatasetFromOptions(
ctx, input, optimizations_enabled, optimizations_disabled,
optimizations_default, optimization_configs, output);
input->Unref();
input = *output;
}
}
}
}
FinalizeDatasetOp::FinalizeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kHasCapturedRef)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kHasCapturedRef, &has_captured_ref_));
} else {
has_captured_ref_ = false;
}
}
void FinalizeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
MakeDatasetHelper(ctx, has_captured_ref_, input, output);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FinalizeDataset").Device(DEVICE_CPU).Priority(2),
FinalizeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("FinalizeDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
FinalizeDatasetNoopOp);
}
}
} | #include "tensorflow/core/kernels/data/finalize_dataset_op.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
class FinalizeDatasetParams : public DatasetParams {
public:
template <typename T>
FinalizeDatasetParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
has_captured_ref_(false) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(FinalizeDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{FinalizeDatasetOp::kHasCapturedRef, has_captured_ref_},
{FinalizeDatasetOp::kOutputTypes, output_dtypes_},
{FinalizeDatasetOp::kOutputShapes, output_shapes_}};
return absl::OkStatus();
}
string dataset_type() const override { return "Finalize"; }
private:
bool has_captured_ref_;
};
class FinalizeDatasetOpTest : public DatasetOpsTestBase {
public:
void CheckDatasetPipelineTypeStrings(
const std::vector<std::string>& type_strings) {
CheckDatasetPipelineTypeString(dataset_, type_strings, 0);
}
void CheckDatasetPipelineTypeString(
const DatasetBase* dataset, const std::vector<std::string>& type_strings,
int index) {
EXPECT_GT(type_strings.size(), index);
EXPECT_EQ(dataset->type_string(), type_strings[index]);
std::vector<const DatasetBase*> input_datasets;
TF_ASSERT_OK(dataset->InputDatasets(&input_datasets));
if (input_datasets.empty()) {
return;
}
EXPECT_EQ(1, input_datasets.size());
CheckDatasetPipelineTypeString(input_datasets[0], type_strings, index + 1);
}
};
constexpr char kNoOptimizationOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
)pb";
constexpr char kMaxIntraOpParallelismOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
threading_options { max_intra_op_parallelism: 10 }
)pb";
constexpr char kPrivateThreadPoolOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
threading_options { private_threadpool_size: 10 }
)pb";
constexpr char kModelOptions[] = R"pb(
optimization_options { apply_default_optimizations: false }
)pb";
constexpr char kOptimizationsDefaultOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: true }
)pb";
constexpr char kAllChainedDatasetsOptions[] = R"pb(
autotune_options { enabled: true }
optimization_options { apply_default_optimizations: true }
threading_options { max_intra_op_parallelism: 10 private_threadpool_size: 10 }
)pb";
OptionsDatasetParams NoOptimizationOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kNoOptimizationOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams MaxIntraOpParallelismOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kMaxIntraOpParallelismOptions,
&options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams PrivateThreadPoolOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kPrivateThreadPoolOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams ModelOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kModelOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams OptimizationsDefaultOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kOptimizationsDefaultOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams AllChainedDatasetsOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kAllChainedDatasetsOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
FinalizeDatasetParams NoOptimizationFinalizeParams() {
return FinalizeDatasetParams(NoOptimizationOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
FinalizeDatasetParams MaxIntraOpParallelismParams() {
return FinalizeDatasetParams(MaxIntraOpParallelismOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"MaxIntraOpParallelismDatasetOp");
}
FinalizeDatasetParams PrivateThreadPoolParams() {
return FinalizeDatasetParams(PrivateThreadPoolOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"PrivateThreadPoolDatasetOp");
}
FinalizeDatasetParams ModelParams() {
return FinalizeDatasetParams(ModelOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"ModelDatasetOp");
}
FinalizeDatasetParams OptimizationsDefaultParams() {
return FinalizeDatasetParams(OptimizationsDefaultOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"private_thread_pool");
}
FinalizeDatasetParams AllChainedDatasetsParams() {
return FinalizeDatasetParams(AllChainedDatasetsOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"inject/prefetch_ModelDataset/_9");
}
TEST_F(FinalizeDatasetOpTest, NoOptimizationNodeName) {
auto test_case_params = NoOptimizationFinalizeParams();
TF_ASSERT_OK(Initialize(test_case_params));
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings({"OptionsDataset", "RangeDataset"});
}
std::vector<GetNextTestCase<FinalizeDatasetParams>> GetNextTestCases() {
return {{NoOptimizationFinalizeParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{MaxIntraOpParallelismParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{PrivateThreadPoolParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{ModelParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{OptimizationsDefaultParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{AllChainedDatasetsParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})}};
}
ITERATOR_GET_NEXT_TEST_P(FinalizeDatasetOpTest, FinalizeDatasetParams,
GetNextTestCases())
TEST_F(FinalizeDatasetOpTest, MaxIntraOpParallelismNodeName) {
auto test_case_params = MaxIntraOpParallelismParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"MaxIntraOpParallelismDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, PrivateThreadPoolNodeName) {
auto test_case_params = PrivateThreadPoolParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"PrivateThreadPoolDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, ModelNodeName) {
auto test_case_params = ModelParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"ModelDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, OptimizationsDefaultNodeName) {
auto test_case_params = OptimizationsDefaultParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings({"PrivateThreadPoolDataset",
"MaxIntraOpParallelismDataset",
"OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, AllChainedDatasetsNodeName) {
auto test_case_params = AllChainedDatasetsParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"PrefetchDataset", "ModelDataset", "PrivateThreadPoolDataset",
"MaxIntraOpParallelismDataset", "OptionsDataset", "RangeDataset"});
}
}
}
} |
1,539 | cpp | tensorflow/tensorflow | concatenate_dataset_op | tensorflow/core/kernels/data/concatenate_dataset_op.cc | tensorflow/core/kernels/data/concatenate_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_CONCATENATE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_CONCATENATE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ConcatenateDatasetOp : public BinaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Concatenate";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kAnotherDataset = "another_dataset";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit ConcatenateDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase* to_concatenate, DatasetBase** output) override;
private:
class Dataset;
};
}
}
#endif
#include "tensorflow/core/kernels/data/concatenate_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
constexpr const char* const ConcatenateDatasetOp::kDatasetType;
constexpr const char* const ConcatenateDatasetOp::kInputDataset;
constexpr const char* const ConcatenateDatasetOp::kAnotherDataset;
constexpr const char* const ConcatenateDatasetOp::kOutputTypes;
constexpr const char* const ConcatenateDatasetOp::kOutputShapes;
constexpr char kIndex[] = "i";
constexpr char kInputImplUninitialized[] = "input_impl_uninitialized";
class ConcatenateDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, const DatasetBase* input,
const DatasetBase* to_concatenate)
: DatasetBase(DatasetContext(ctx)),
input_(input),
to_concatenate_(to_concatenate),
input_cardinality_(input->Cardinality()),
to_concatenate_cardinality_(to_concatenate_->Cardinality()) {
input_->Ref();
to_concatenate_->Ref();
auto os_input = input->output_shapes();
auto os_concatenate = to_concatenate->output_shapes();
for (int i = 0; i < os_input.size(); i++) {
PartialTensorShape output_tensorshape({});
OP_REQUIRES_OK(ctx,
MostSpecificCompatibleShape(os_input[i], os_concatenate[i],
&output_tensorshape));
output_shapes_.push_back(output_tensorshape);
}
}
~Dataset() override {
input_->Unref();
to_concatenate_->Unref();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t input_cardinality = input_->Cardinality(options);
int64_t to_concatenate_cardinality = to_concatenate_->Cardinality(options);
if (input_cardinality == kInfiniteCardinality ||
to_concatenate_cardinality == kInfiniteCardinality) {
return kInfiniteCardinality;
}
if (input_cardinality == kUnknownCardinality ||
to_concatenate_cardinality == kUnknownCardinality) {
return kUnknownCardinality;
}
return input_cardinality + to_concatenate_cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
inputs->push_back(to_concatenate_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(input_->CheckExternalState());
return to_concatenate_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
if (index < input_cardinality_) {
TF_RETURN_IF_ERROR(input_->Get(ctx, index, out_tensors));
} else {
TF_RETURN_IF_ERROR(
to_concatenate_->Get(ctx, index - input_cardinality_, out_tensors));
}
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph));
Node* to_concatenate_graph = nullptr;
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, to_concatenate_, &to_concatenate_graph));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph, to_concatenate_graph}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&input_contexts_[0], this, strings::StrCat(prefix(), "[0]"),
&input_impl_));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (i_ < 2) {
TF_RETURN_IF_ERROR(input_impl_->GetNext(&input_contexts_[i_],
out_tensors, end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[i_].checkpoint());
if (!*end_of_sequence) {
return absl::OkStatus();
}
if (++i_ < 2) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[i_], this, strings::StrCat(prefix(), "[1]"),
&input_impl_));
}
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kIndex, i_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputImplUninitialized,
static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kIndex, &i_));
int64_t input_uninitialized;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kInputImplUninitialized,
&input_uninitialized));
if (static_cast<bool>(input_uninitialized)) {
input_impl_.reset();
return absl::OkStatus();
}
if (!TF_PREDICT_TRUE(i_ >= 0 && i_ <= 2))
return errors::InvalidArgument("i_ must be in range [0, 2].");
if (i_ == 1) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
ctx, this, strings::StrCat(prefix(), "[1]"), &input_impl_));
} else if (i_ == 2) {
input_impl_.reset();
}
if (input_impl_) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::vector<IteratorContext> input_contexts_;
};
Status MostSpecificCompatibleShape(const PartialTensorShape& ts1,
const PartialTensorShape& ts2,
PartialTensorShape* output_tensorshape) {
if (ts1.dims() != ts2.dims() || ts1.unknown_rank() || ts2.unknown_rank())
return absl::OkStatus();
auto dims1 = ts1.dim_sizes();
auto dims2 = ts2.dim_sizes();
for (int d = 0; d < ts1.dims(); d++) {
if (dims1[d] == dims2[d])
TF_RETURN_IF_ERROR(output_tensorshape->AddDimWithStatus(dims1[d]));
else
TF_RETURN_IF_ERROR(output_tensorshape->AddDimWithStatus(-1));
}
return absl::OkStatus();
}
const DatasetBase* input_;
const DatasetBase* to_concatenate_;
const int64_t input_cardinality_;
const int64_t to_concatenate_cardinality_;
std::vector<PartialTensorShape> output_shapes_;
};
ConcatenateDatasetOp::ConcatenateDatasetOp(OpKernelConstruction* ctx)
: BinaryDatasetOpKernel(ctx) {}
void ConcatenateDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase* to_concatenate,
DatasetBase** output) {
OP_REQUIRES(ctx, input->output_dtypes() == to_concatenate->output_dtypes(),
errors::InvalidArgument(
"input dataset and dataset to concatenate"
" have different output_types %s and %s",
(DataTypeVectorString(input->output_dtypes()),
DataTypeVectorString(to_concatenate->output_dtypes()))));
*output = new Dataset(ctx, input, to_concatenate);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ConcatenateDataset").Device(DEVICE_CPU),
ConcatenateDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/concatenate_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "concatenate_dataset";
ConcatenateDatasetParams SameShapeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{2, 2},
{{1, 2, 3, 4}, {5, 6, 7, 8}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(
TensorShape{2, 2}, {{11, 12, 13, 14}, {15, 16, 17, 18}}),
"tensor_slice_1");
return ConcatenateDatasetParams(
std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64, DT_INT64},
{PartialTensorShape({2}), PartialTensorShape({2})},
kNodeName);
}
ConcatenateDatasetParams DifferentShapeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 3}, {1, 2, 3, 4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2, 2}, {7, 8, 9, 10})},
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {11, 12, 13, 14}),
CreateTensor<int64_t>(TensorShape{2, 1}, {15, 16})},
"tensor_slice_1");
return ConcatenateDatasetParams(
std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64, DT_INT64},
{PartialTensorShape({-1}), PartialTensorShape({-1})},
kNodeName);
}
ConcatenateDatasetParams DifferentDtypeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{2, 2}, {{1, 2, 3, 4}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<double>(TensorShape{2, 2}, {{1.0, 2.0, 3.0, 4.0}}),
"tensor_slice_1");
return ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
class ConcatenateDatasetOpTest : public DatasetOpsTestBase {};
std::vector<GetNextTestCase<ConcatenateDatasetParams>> GetNextTestCases() {
return {{SameShapeConcatenateDatasetParams(),
CreateTensors<int64_t>(TensorShape({2}), {{1, 2},
{5, 6},
{3, 4},
{7, 8},
{11, 12},
{15, 16},
{13, 14},
{17, 18}})},
{DifferentShapeConcatenateDatasetParams(),
{CreateTensor<int64_t>(TensorShape{3}, {1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2}, {7, 8}),
CreateTensor<int64_t>(TensorShape{3}, {4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2}, {9, 10}),
CreateTensor<int64_t>(TensorShape{2}, {11, 12}),
CreateTensor<int64_t>(TensorShape{1}, {15}),
CreateTensor<int64_t>(TensorShape{2}, {13, 14}),
CreateTensor<int64_t>(TensorShape{1}, {16})}}};
}
ITERATOR_GET_NEXT_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
GetNextTestCases())
TEST_F(ConcatenateDatasetOpTest, DifferentDtypes) {
auto dataset_params = DifferentDtypeConcatenateDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ConcatenateDatasetOpTest, DatasetNodeName) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ConcatenateDatasetOpTest, DatasetTypeString) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ConcatenateDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ConcatenateDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_dtypes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<ConcatenateDatasetParams>>
DatasetOutputShapesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_shapes()},
{
DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ConcatenateDatasetParams>>
CardinalityTestCases() {
return {{SameShapeConcatenateDatasetParams(),
4},
{DifferentShapeConcatenateDatasetParams(),
4}};
}
DATASET_CARDINALITY_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ConcatenateDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_dtypes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<ConcatenateDatasetParams>>
IteratorOutputShapesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_shapes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ConcatenateDatasetOpTest, IteratorPrefix) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ConcatenateDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ConcatenateDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{SameShapeConcatenateDatasetParams(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({2}), {{1, 2},
{5, 6},
{3, 4},
{7, 8},
{11, 12},
{15, 16},
{13, 14},
{17, 18}})},
{DifferentShapeConcatenateDatasetParams(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{3}, {1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2}, {7, 8}),
CreateTensor<int64_t>(TensorShape{3}, {4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2}, {9, 10}),
CreateTensor<int64_t>(TensorShape{2}, {11, 12}),
CreateTensor<int64_t>(TensorShape{1}, {15}),
CreateTensor<int64_t>(TensorShape{2}, {13, 14}),
CreateTensor<int64_t>(TensorShape{1}, {16})}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,540 | cpp | tensorflow/tensorflow | prefetch_autotuner | tensorflow/core/kernels/data/prefetch_autotuner.cc | tensorflow/core/kernels/data/prefetch_autotuner_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_AUTOTUNER_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_AUTOTUNER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
class PrefetchAutotuner {
public:
explicit PrefetchAutotuner(
int64_t initial_buffer_size, int64_t buffer_size_min,
std::shared_ptr<model::RamBudgetManager> ram_budget_manager);
int64_t buffer_limit() const { return buffer_limit_; }
bool HasElementSize() const { return element_size_bytes_.has_value(); }
void SetElementSize(int64_t element_size_bytes);
void RecordConsumption(size_t current_buffer_size);
void RecordEmpty() { RecordConsumption(0); }
private:
enum class Mode {
kDisabled,
kUpswing,
kDownswing,
};
int64_t buffer_limit_;
std::optional<int64_t> element_size_bytes_;
Mode mode_ = Mode::kDisabled;
std::shared_ptr<model::RamBudgetManager> ram_budget_manager_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/prefetch_autotuner.h"
#include <cstdint>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/model.h"
namespace tensorflow {
namespace data {
PrefetchAutotuner::PrefetchAutotuner(
int64_t initial_buffer_size, int64_t buffer_size_min,
std::shared_ptr<model::RamBudgetManager> ram_budget_manager)
: buffer_limit_(initial_buffer_size),
ram_budget_manager_(ram_budget_manager) {
if (initial_buffer_size == model::kAutotune) {
mode_ = Mode::kUpswing;
buffer_limit_ = std::max(int64_t{1}, buffer_size_min);
}
}
namespace {
size_t kBufferLimitThreshold = 2048;
}
void PrefetchAutotuner::SetElementSize(int64_t element_size_bytes) {
if (ram_budget_manager_ && !ram_budget_manager_->RequestLegacyPrefetchBytes(
element_size_bytes * buffer_limit_)) {
LOG(WARNING)
<< "Prefetch autotuner tried to allocate "
<< element_size_bytes * buffer_limit_ << " bytes "
<< "after encountering the first element of size " << element_size_bytes
<< " bytes."
<< "This already causes the autotune ram budget to be exceeded. To "
<< "stay within the ram budget, either increase the ram budget or "
<< "reduce element size";
}
element_size_bytes_ = element_size_bytes;
}
void PrefetchAutotuner::RecordConsumption(size_t current_buffer_size) {
switch (mode_) {
case Mode::kDisabled:
return;
case Mode::kUpswing:
if (static_cast<int64_t>(current_buffer_size) == buffer_limit_) {
mode_ = Mode::kDownswing;
}
return;
case Mode::kDownswing:
if (current_buffer_size == 0) {
if (!element_size_bytes_.has_value()) {
return;
}
int64_t element_size_bytes = *element_size_bytes_;
int64_t attempt_new_buffer_limit;
if (buffer_limit_ >= static_cast<int64_t>(kBufferLimitThreshold)) {
attempt_new_buffer_limit = buffer_limit_ + kBufferLimitThreshold;
} else {
attempt_new_buffer_limit = buffer_limit_ * 2;
}
int64_t delta_bytes =
(attempt_new_buffer_limit - buffer_limit_) * element_size_bytes;
if (!ram_budget_manager_ ||
ram_budget_manager_->RequestLegacyPrefetchBytes(delta_bytes)) {
buffer_limit_ = attempt_new_buffer_limit;
}
mode_ = Mode::kUpswing;
}
return;
}
}
}
} | #include "tensorflow/core/kernels/data/prefetch_autotuner.h"
#include <vector>
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(PrefetchAutotuner, Disabled) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(2, 0, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
t.RecordConsumption(2);
t.RecordConsumption(0);
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
}
TEST(PrefetchAutotuner, Enabled) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(model::kAutotune, 0, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(1);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
}
TEST(PrefetchAutotuner, EnabledSteady) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(model::kAutotune, 0, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(1);
EXPECT_EQ(1, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
std::vector<size_t> consumption_values = {2, 3, 1, 4, 1, 2, 3, 1};
for (int i = 0; i < consumption_values.size(); ++i) {
t.RecordConsumption(consumption_values[i]);
EXPECT_EQ(4, t.buffer_limit())
<< "Failed at index " << i << " with value: " << consumption_values[i];
}
}
TEST(PrefetchAutotuner, StartWithMin) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(100);
PrefetchAutotuner t(model::kAutotune, 2, ram_manager);
t.SetElementSize(1);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
std::vector<size_t> consumption_values = {3, 5, 7, 1, 4, 6, 8, 3, 5, 1, 2, 4};
for (int i = 0; i < consumption_values.size(); ++i) {
t.RecordConsumption(consumption_values[i]);
EXPECT_EQ(8, t.buffer_limit())
<< "Failed at index " << i << " with value: " << consumption_values[i];
}
}
TEST(PrefetchAutotuner, RespectRamManager) {
auto ram_manager = std::make_shared<model::RamBudgetManager>(200);
PrefetchAutotuner t(model::kAutotune, 2, ram_manager);
t.SetElementSize(50);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
}
TEST(PrefetchAutotuner, RespectRamManagerWhenThereIsModelAllocation) {
int64_t model_allocation = 100000;
auto ram_manager = std::make_shared<model::RamBudgetManager>(
200 + model_allocation);
ASSERT_TRUE(ram_manager->RequestModelAllocation(model_allocation));
PrefetchAutotuner t(model::kAutotune, 2, ram_manager);
t.SetElementSize(50);
EXPECT_EQ(2, t.buffer_limit());
t.RecordConsumption(2);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
t.RecordConsumption(4);
t.RecordConsumption(0);
EXPECT_EQ(4, t.buffer_limit());
ASSERT_TRUE(ram_manager->RequestModelAllocation(0));
t.RecordConsumption(4);
t.RecordConsumption(0);
EXPECT_EQ(8, t.buffer_limit());
t.RecordConsumption(8);
t.RecordConsumption(0);
EXPECT_EQ(16, t.buffer_limit());
}
}
}
} |
1,541 | cpp | tensorflow/tensorflow | tensor_slice_dataset_op | tensorflow/core/kernels/data/tensor_slice_dataset_op.cc | tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_TENSOR_SLICE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_TENSOR_SLICE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class TensorSliceDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "TensorSlice";
static constexpr const char* const kComponents = "components";
static constexpr const char* const kToutputTypes = "Toutput_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kIsFiles = "is_files";
static constexpr const char* const kReplicateOnSplit = "replicate_on_split";
explicit TensorSliceDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
bool is_files_ = false;
bool replicate_on_split_ = false;
};
}
}
#endif
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const TensorSliceDatasetOp::kDatasetType;
constexpr const char* const TensorSliceDatasetOp::kComponents;
constexpr const char* const TensorSliceDatasetOp::kToutputTypes;
constexpr const char* const TensorSliceDatasetOp::kOutputShapes;
constexpr const char* const TensorSliceDatasetOp::kIsFiles;
constexpr const char* const
TensorSliceDatasetOp::kReplicateOnSplit;
class TensorSliceDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors,
bool is_files, bool replicate_on_split)
: DatasetBase(DatasetContext(ctx)),
tensors_(std::move(tensors)),
is_files_(is_files),
replicate_on_split_(replicate_on_split) {
for (const Tensor& t : tensors_) {
dtypes_.push_back(t.dtype());
gtl::InlinedVector<int64_t, 4> element_dim_sizes;
for (int i = 1; i < t.dims(); ++i) {
element_dim_sizes.push_back(t.dim_size(i));
}
partial_shapes_.emplace_back(element_dim_sizes);
shapes_.emplace_back(std::move(element_dim_sizes));
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<IndexSplitProvider>(tensors_[0].dim_size(0)));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return partial_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return tensors_[0].dim_size(0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->clear();
out_tensors->reserve(tensors_.size());
for (int i = 0; i < tensors_.size(); ++i) {
out_tensors->push_back(MaybeCopySubSlice(tensors_[i], index));
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> components;
components.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
if (is_files_) {
Node* file_node;
TF_RETURN_IF_ERROR(
b->AddIdentity(ctx, "FileIdentity", &node, &file_node));
}
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
components.emplace_back(node);
}
AttrValue dtypes;
b->BuildAttrValue(dtypes_, &dtypes);
AttrValue is_files;
b->BuildAttrValue(is_files_, &is_files);
AttrValue replicate_on_split;
b->BuildAttrValue(replicate_on_split_, &replicate_on_split);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}},
{{kToutputTypes, dtypes},
{kIsFiles, is_files},
{kReplicateOnSplit, replicate_on_split}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty() || dataset()->replicate_on_split_) {
split_provider_ = std::make_shared<IndexSplitProvider>(
dataset()->tensors_[0].dim_size(0));
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
int64_t index = split.scalar<int64_t>()();
out_tensors->reserve(dataset()->tensors_.size());
for (size_t i = 0; i < dataset()->tensors_.size(); ++i) {
out_tensors->push_back(
MaybeCopySubSlice(dataset()->tensors_[i], index));
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return split_provider_->Save(
[this](const std::string& key) { return full_name(key); }, writer);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(ctx);
}
return split_provider_->Restore(
[this](const std::string& key) { return full_name(key); }, reader);
}
private:
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
DataTypeVector dtypes_;
std::vector<TensorShape> shapes_;
std::vector<PartialTensorShape> partial_shapes_;
const bool is_files_;
const bool replicate_on_split_;
};
TensorSliceDatasetOp::TensorSliceDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
if (ctx->HasAttr(kIsFiles)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kIsFiles, &is_files_));
}
if (ctx->HasAttr(kReplicateOnSplit)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kReplicateOnSplit, &replicate_on_split_));
}
}
void TensorSliceDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs));
std::vector<Tensor> components;
components.reserve(inputs.size());
OP_REQUIRES(
ctx, inputs[0].dims() > 0,
errors::InvalidArgument("All components must be at least 1-dimensional"));
const int64_t num_slices = inputs[0].dim_size(0);
for (const Tensor& t : inputs) {
components.push_back(t);
OP_REQUIRES(ctx, t.dims() > 0,
errors::InvalidArgument(
"All components must be at least 1-dimensional"));
OP_REQUIRES(
ctx, t.dim_size(0) == num_slices,
errors::InvalidArgument(
"All components must have the same size in the 0th dimension"));
}
*output =
new Dataset(ctx, std::move(components), is_files_, replicate_on_split_);
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TensorSliceDataset").Device(DEVICE_CPU),
TensorSliceDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_slice_dataset";
class TensorSliceDatasetOpTest : public DatasetOpsTestBase {};
TensorSliceDatasetParams PlainTensorSliceDatasetParams() {
std::vector<Tensor> components = {
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<int64_t>(TensorShape({2, 2}), {1, 2, 3, 4}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint32>(TensorShape({2, 2}), {2, 3, 4, 5}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<uint64>(TensorShape({2, 2}), {3, 4, 5, 6}),
CreateTensor<double>(TensorShape({2, 1}), {37.0, 38.0}),
CreateTensor<tstring>(TensorShape({2, 1}), {"a", "b"})};
return {std::move(components), kNodeName};
}
TensorSliceDatasetParams NestedTensorSliceDatasetParams() {
std::vector<Tensor> components = {
CreateTensor<Variant>(
TensorShape({2, 1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0}),
CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({2, 1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"}),
CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6})};
return {std::move(components), kNodeName};
}
std::vector<GetNextTestCase<TensorSliceDatasetParams>> GetNextTestCases() {
return {
{PlainTensorSliceDatasetParams(),
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedTensorSliceDatasetParams(),
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedGetNextTest
: public TensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<TensorSliceDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::vector<string> input_names;
TF_ASSERT_OK(test_case.dataset_params.GetInputNames(&input_names));
size_t num_tensors_per_slice = input_names.size();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_slice = 0;
while (true) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (end_of_sequence) {
EXPECT_TRUE(out_tensors.empty());
break;
}
for (int i = 0; i < out_tensors.size(); ++i) {
EXPECT_LT(i + num_tensors_per_slice * cur_slice,
test_case.expected_outputs.size());
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]));
}
}
cur_slice++;
}
}
INSTANTIATE_TEST_SUITE_P(TensorSliceDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(TensorSliceDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TensorSliceDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TensorSliceDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<TensorSliceDatasetParams>>
DatasetOutputTypesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_dtypes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetOutputTypesTestCases())
std::vector<DatasetOutputShapesTestCase<TensorSliceDatasetParams>>
DatasetOutputShapesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_shapes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<TensorSliceDatasetParams>>
DatasetCardinalityTestCases() {
return {{PlainTensorSliceDatasetParams(), 2},
{NestedTensorSliceDatasetParams(), 2}};
}
DATASET_CARDINALITY_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<TensorSliceDatasetParams>>
IteratorOutputTypesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_dtypes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest,
TensorSliceDatasetParams,
IteratorOutputTypesTestCases())
std::vector<IteratorOutputShapesTestCase<TensorSliceDatasetParams>>
IteratorOutputShapesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_shapes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest,
TensorSliceDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(TensorSliceDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
TensorSliceDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PlainTensorSliceDatasetParams(),
{0, 1, 2},
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedTensorSliceDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public TensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_context;
TF_ASSERT_OK(CreateSerializationContext(&serialization_context));
int cur_iteration = 0;
bool end_of_sequence = false;
auto params =
static_cast<TensorSliceDatasetParams&>(test_case.dataset_params);
int64_t num_slices = params.num_slices();
size_t num_tensors_per_slice = params.num_tensors_per_slice();
std::vector<Tensor> out_tensors;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_slices) {
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output =
out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i +
num_tensors_per_slice * (cur_iteration - 1)]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_slice *
(cur_iteration - 1)]));
}
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, "Iterator",
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_SUITE_P(
TensorSliceDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(TensorSliceDatasetOpTest, SplitProvider) {
auto params = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape({7}), {{6, 2, 3, 8, 7, 0, 10}}),
kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}),
{{6}, {2}, {3}, {8}, {7}, {0}, {10}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {{2}, {7}})));
}
TEST_F(TensorSliceDatasetOpTest, SplitProviderEmpty) {
auto params = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape({0}), {{}}), kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {})));
}
}
}
} |
1,542 | cpp | tensorflow/tensorflow | tf_record_dataset_op | tensorflow/core/kernels/data/tf_record_dataset_op.cc | tensorflow/core/kernels/data/tf_record_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_TF_RECORD_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_TF_RECORD_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class TFRecordDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "TFRecord";
static constexpr const char* const kFileNames = "filenames";
static constexpr const char* const kCompressionType = "compression_type";
static constexpr const char* const kBufferSize = "buffer_size";
static constexpr const char* const kByteOffsets = "byte_offsets";
explicit TFRecordDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
int op_version_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/tf_record_dataset_op.h"
#include <cstdint>
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace data {
constexpr const char* const TFRecordDatasetOp::kDatasetType;
constexpr const char* const TFRecordDatasetOp::kFileNames;
constexpr const char* const TFRecordDatasetOp::kCompressionType;
constexpr const char* const TFRecordDatasetOp::kBufferSize;
constexpr const char* const TFRecordDatasetOp::kByteOffsets;
constexpr char kTFRecordDataset[] = "TFRecordDataset";
constexpr char kCurrentFileIndex[] = "current_file_index";
constexpr char kOffset[] = "offset";
constexpr char kGcsFsPrefix[] = "gs:
constexpr char kS3FsPrefix[] = "s3:
constexpr int64_t kUnspecifiedBufferSize = -1;
constexpr int64_t kDefaultBufferSize = 256LL << 10;
constexpr int64_t kCloudTpuBlockSize = 127LL << 20;
constexpr int64_t kS3BlockSize = kCloudTpuBlockSize;
bool is_cloud_tpu_gcs_fs() {
#if (defined(PLATFORM_CLOUD_TPU) && defined(TPU_GCS_FS)) || \
defined(LIBTPU_ON_GCE)
return true;
#endif
return false;
}
class TFRecordDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, std::vector<string> filenames,
const string& compression_type, int64_t buffer_size,
std::vector<int64_t> byte_offsets, int op_version)
: DatasetBase(DatasetContext(ctx)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
options_(io::RecordReaderOptions::CreateRecordReaderOptions(
compression_type)),
byte_offsets_(std::move(byte_offsets)),
op_version_(op_version) {
if (buffer_size > 0) {
options_.buffer_size = buffer_size;
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(kDatasetType, params);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* filenames = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames));
Node* compression_type = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type));
Node* buffer_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(options_.buffer_size, &buffer_size));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {filenames, compression_type, buffer_size}, output));
Node* byte_offsets = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(byte_offsets_, &byte_offsets));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
out_tensors->reserve(1);
mutex_lock l(mu_);
do {
if (reader_) {
out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({}));
Status s =
reader_->ReadRecord(&out_tensors->back().scalar<tstring>()());
if (s.ok()) {
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(kDatasetType);
bytes_counter->IncrementBy(
out_tensors->back().scalar<tstring>()().size());
*end_of_sequence = false;
return absl::OkStatus();
}
out_tensors->pop_back();
if (!errors::IsOutOfRange(s)) {
ResetStreamsLocked();
++current_file_index_;
return s;
}
ResetStreamsLocked();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
} while (true);
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
*num_skipped = 0;
mutex_lock l(mu_);
do {
if (reader_) {
int last_num_skipped;
Status s = reader_->SkipRecords(num_to_skip - *num_skipped,
&last_num_skipped);
*num_skipped += last_num_skipped;
if (s.ok()) {
*end_of_sequence = false;
return absl::OkStatus();
}
if (!errors::IsOutOfRange(s)) {
ResetStreamsLocked();
++current_file_index_;
return s;
}
ResetStreamsLocked();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
if (reader_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kOffset, reader_->TellOffset()));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
ResetStreamsLocked();
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
if (reader->Contains(prefix(), kOffset)) {
int64_t offset;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kOffset, &offset));
TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
TF_RETURN_IF_ERROR(reader_->SeekOffset(offset));
}
return absl::OkStatus();
}
private:
Status SetupStreamsLocked(Env* env) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (current_file_index_ >= dataset()->filenames_.size()) {
return errors::InvalidArgument(
"current_file_index_:", current_file_index_,
" >= filenames_.size():", dataset()->filenames_.size());
}
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
reader_ = std::make_unique<io::SequentialRecordReader>(
file_.get(), dataset()->options_);
if (!dataset()->byte_offsets_.empty()) {
TF_RETURN_IF_ERROR(
reader_->SeekOffset(dataset()->byte_offsets_[current_file_index_]));
}
return absl::OkStatus();
}
void ResetStreamsLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
reader_.reset();
file_.reset();
}
mutex mu_;
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_ TF_GUARDED_BY(mu_);
std::unique_ptr<io::SequentialRecordReader> reader_ TF_GUARDED_BY(mu_);
};
const std::vector<string> filenames_;
const tstring compression_type_;
io::RecordReaderOptions options_;
const std::vector<int64_t> byte_offsets_;
const int op_version_;
};
TFRecordDatasetOp::TFRecordDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx),
op_version_(ctx->def().op() == kTFRecordDataset ? 1 : 2) {}
void TFRecordDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
const Tensor* filenames_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kFileNames, &filenames_tensor));
OP_REQUIRES(
ctx, filenames_tensor->dims() <= 1,
errors::InvalidArgument("`filenames` must be a scalar or a vector."));
bool is_gcs_fs = true;
bool is_s3_fs = true;
std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
VLOG(2) << "Reading file: " << filenames_tensor->flat<tstring>()(i);
filenames.push_back(filenames_tensor->flat<tstring>()(i));
is_gcs_fs &= absl::StartsWith(filenames[i], kGcsFsPrefix);
is_s3_fs &= absl::StartsWith(filenames[i], kS3FsPrefix);
metrics::RecordTFDataFilename(kDatasetType, filenames[i]);
}
LogFilenames(filenames);
tstring compression_type;
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kCompressionType,
&compression_type));
int64_t buffer_size = kUnspecifiedBufferSize;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(ctx,
(buffer_size == kUnspecifiedBufferSize) || (buffer_size >= 0),
errors::InvalidArgument(
"`buffer_size` must be >= 0 (0 == no buffering)"));
std::vector<int64_t> byte_offsets;
if (op_version_ > 1) {
const Tensor* byte_offsets_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kByteOffsets, &byte_offsets_tensor));
OP_REQUIRES(ctx, byte_offsets_tensor->dims() <= 1,
absl::InvalidArgumentError(
"`byte_offsets` must be a scalar or a vector."));
OP_REQUIRES(ctx, byte_offsets_tensor->dims() == filenames_tensor->dims(),
absl::InvalidArgumentError(
"`byte_offsets` must be of same size as `filenames`"));
byte_offsets.reserve(byte_offsets_tensor->NumElements());
for (int i = 0; i < byte_offsets_tensor->NumElements(); ++i) {
byte_offsets.push_back(byte_offsets_tensor->flat<int64_t>()(i));
}
}
if (buffer_size == kUnspecifiedBufferSize) {
if (is_gcs_fs && is_cloud_tpu_gcs_fs() &&
buffer_size < kCloudTpuBlockSize) {
LOG_FIRST_N(WARNING, 1)
<< "User buffer size is too small for reading Cloud TPU "
<< "TFRecords stored in GCS. Overriding " << buffer_size
<< " to the minimum recommended buffer_size = " << kCloudTpuBlockSize;
buffer_size = kCloudTpuBlockSize;
} else if (is_s3_fs && buffer_size < kS3BlockSize) {
LOG_FIRST_N(WARNING, 1)
<< "User buffer size is too small for reading "
<< "TFRecords stored in S3. Overriding " << buffer_size
<< " to the minimum recommended buffer_size = " << kS3BlockSize;
buffer_size = kS3BlockSize;
} else {
LOG_FIRST_N(INFO, 1)
<< "TFRecordDataset `buffer_size` is unspecified, default to "
<< kDefaultBufferSize;
buffer_size = kDefaultBufferSize;
}
} else {
LOG_FIRST_N(INFO, 1)
<< "The default buffer size is " << kDefaultBufferSize
<< ", which is overridden by the user specified `buffer_size` of "
<< buffer_size;
}
*output = new Dataset(ctx, std::move(filenames), compression_type,
buffer_size, std::move(byte_offsets), op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TFRecordDataset").Device(DEVICE_CPU),
TFRecordDatasetOp);
REGISTER_KERNEL_BUILDER(Name("TFRecordDatasetV2").Device(DEVICE_CPU),
TFRecordDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tf_record_dataset_op.h"
#include <memory>
#include <string>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tf_record_dataset";
constexpr char kOpVersion = 2;
int64_t GetOffset(const std::string& filename, int64_t index) {
Env* env_ = Env::Default();
std::unique_ptr<RandomAccessFile> file_;
std::unique_ptr<io::SequentialRecordReader> reader;
Status s1 = env_->NewRandomAccessFile(filename, &file_);
TF_CHECK_OK(s1) << s1;
reader = std::make_unique<io::SequentialRecordReader>(file_.get());
for (int i = 0; i < index; ++i) {
tstring record;
Status s2 = reader->ReadRecord(&record);
TF_CHECK_OK(s2) << s2;
}
return reader->TellOffset();
}
class TFRecordDatasetParams : public DatasetParams {
public:
TFRecordDatasetParams(std::vector<tstring> filenames,
CompressionType compression_type, int64_t buffer_size,
std::vector<int64_t> byte_offsets, string node_name)
: DatasetParams({DT_STRING}, {PartialTensorShape({})},
std::move(node_name)),
filenames_(std::move(filenames)),
compression_type_(compression_type),
buffer_size_(buffer_size),
byte_offsets_(std::move(byte_offsets)) {
op_version_ = 2;
}
std::vector<Tensor> GetInputTensors() const override {
int num_files = filenames_.size();
int num_byte_offsets = byte_offsets_.size();
return {
CreateTensor<tstring>(TensorShape({num_files}), filenames_),
CreateTensor<tstring>(TensorShape({}), {ToString(compression_type_)}),
CreateTensor<int64_t>(TensorShape({}), {buffer_size_}),
CreateTensor<int64_t>(TensorShape({num_byte_offsets}), byte_offsets_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
*input_names = {
TFRecordDatasetOp::kFileNames,
TFRecordDatasetOp::kCompressionType,
TFRecordDatasetOp::kBufferSize,
TFRecordDatasetOp::kByteOffsets,
};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return TFRecordDatasetOp::kDatasetType;
}
private:
std::vector<tstring> filenames_;
CompressionType compression_type_;
int64_t buffer_size_;
std::vector<int64_t> byte_offsets_;
};
class TFRecordDatasetOpTest : public DatasetOpsTestBase {};
Status CreateTestFiles(const std::vector<tstring>& filenames,
const std::vector<std::vector<string>>& contents,
CompressionType compression_type) {
if (filenames.size() != contents.size()) {
return tensorflow::errors::InvalidArgument(
"The number of files does not match with the contents");
}
for (int i = 0; i < filenames.size(); ++i) {
CompressionParams params;
params.output_buffer_size = 10;
params.compression_type = compression_type;
std::vector<absl::string_view> records(contents[i].begin(),
contents[i].end());
TF_RETURN_IF_ERROR(WriteDataToTFRecordFile(filenames[i], records, params));
}
return absl::OkStatus();
}
TFRecordDatasetParams TFRecordDatasetParams1() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_ZLIB_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_ZLIB_2")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"},
{"a", "bb", "ccc"}};
CompressionType compression_type = CompressionType::ZLIB;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TFRecordDatasetParams(filenames,
compression_type,
10,
{},
kNodeName);
}
TFRecordDatasetParams TFRecordDatasetParams2() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_GZIP_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_GZIP_2")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"},
{"a", "bb", "ccc"}};
CompressionType compression_type = CompressionType::GZIP;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TFRecordDatasetParams(filenames,
compression_type,
10,
{},
kNodeName);
}
TFRecordDatasetParams TFRecordDatasetParams3() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_2")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"},
{"a", "bb", "ccc"}};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return TFRecordDatasetParams(filenames,
compression_type,
10,
{},
kNodeName);
}
TFRecordDatasetParams TFRecordDatasetParams4() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_1"),
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_2"),
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_3")};
std::vector<std::vector<string>> contents = {
{"1", "22", "333"}, {"a", "bb", "ccc"}, {"x", "yy", "zzz"}};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
absl::Status status = CreateTestFiles(filenames, contents, compression_type);
TF_CHECK_OK(status) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ") << ": " << status;
std::vector<int64_t> byte_offsets = {};
byte_offsets.push_back(GetOffset(filenames[0], 0));
byte_offsets.push_back(GetOffset(filenames[1], 1));
byte_offsets.push_back(GetOffset(filenames[1], 2));
return TFRecordDatasetParams(filenames,
compression_type,
10, byte_offsets,
kNodeName);
}
TFRecordDatasetParams InvalidByteOffsets() {
std::vector<tstring> filenames = {
absl::StrCat(testing::TmpDir(), "/tf_record_UNCOMPRESSED_1")};
std::vector<std::vector<string>> contents = {{"1", "22", "333"}};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
absl::Status status = CreateTestFiles(filenames, contents, compression_type);
TF_CHECK_OK(status) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ") << ": " << status;
return TFRecordDatasetParams(filenames,
compression_type,
10, {1},
kNodeName);
}
std::vector<GetNextTestCase<TFRecordDatasetParams>> GetNextTestCases() {
return {
{TFRecordDatasetParams1(),
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams2(),
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams3(),
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams4(),
CreateTensors<tstring>(
TensorShape({}),
{{"1"}, {"22"}, {"333"}, {"bb"}, {"ccc"}, {"zzz"}})}};
}
ITERATOR_GET_NEXT_TEST_P(TFRecordDatasetOpTest, TFRecordDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<TFRecordDatasetParams>> SkipTestCases() {
return {{TFRecordDatasetParams1(),
2, 2, true,
CreateTensors<tstring>(TensorShape({}), {{"333"}})},
{TFRecordDatasetParams1(),
4, 4, true,
CreateTensors<tstring>(TensorShape({}), {{"bb"}})},
{TFRecordDatasetParams1(),
7, 6},
{TFRecordDatasetParams2(),
2, 2, true,
CreateTensors<tstring>(TensorShape({}), {{"333"}})},
{TFRecordDatasetParams2(),
4, 4, true,
CreateTensors<tstring>(TensorShape({}), {{"bb"}})},
{TFRecordDatasetParams2(),
7, 6},
{TFRecordDatasetParams3(),
2, 2, true,
CreateTensors<tstring>(TensorShape({}), {{"333"}})},
{TFRecordDatasetParams3(),
4, 4, true,
CreateTensors<tstring>(TensorShape({}), {{"bb"}})},
{TFRecordDatasetParams3(),
7, 6}};
}
ITERATOR_SKIP_TEST_P(TFRecordDatasetOpTest, TFRecordDatasetParams,
SkipTestCases())
TEST_F(TFRecordDatasetOpTest, DatasetNodeName) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TFRecordDatasetOpTest, DatasetTypeString) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = kOpVersion;
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TFRecordDatasetOp::kDatasetType, params)));
}
TEST_F(TFRecordDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_STRING}));
}
TEST_F(TFRecordDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(TFRecordDatasetOpTest, Cardinality) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(TFRecordDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_STRING}));
}
TEST_F(TFRecordDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(TFRecordDatasetOpTest, IteratorPrefix) {
auto dataset_params = TFRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams iterator_prefix_params;
iterator_prefix_params.op_version = kOpVersion;
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
TFRecordDatasetOp::kDatasetType, dataset_params.iterator_prefix(),
iterator_prefix_params)));
}
TEST_F(TFRecordDatasetOpTest, InvalidByteOffsetsToSeek) {
auto dataset_params = InvalidByteOffsets();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kDataLoss);
}
std::vector<IteratorSaveAndRestoreTestCase<TFRecordDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{TFRecordDatasetParams1(),
{0, 2, 7},
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams2(),
{0, 2, 7},
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})},
{TFRecordDatasetParams3(),
{0, 2, 7},
CreateTensors<tstring>(
TensorShape({}), {{"1"}, {"22"}, {"333"}, {"a"}, {"bb"}, {"ccc"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(TFRecordDatasetOpTest, TFRecordDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,543 | cpp | tensorflow/tensorflow | take_dataset_op | tensorflow/core/kernels/data/take_dataset_op.cc | tensorflow/core/kernels/data/take_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_TAKE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_TAKE_DATASET_OP_H_
#include <cstdlib>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
class TakeDataset : public DatasetBase {
public:
TakeDataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input);
TakeDataset(DatasetContext::Params params, int64_t count,
const DatasetBase* input);
~TakeDataset() override;
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override;
const DataTypeVector& output_dtypes() const override;
const std::vector<PartialTensorShape>& output_shapes() const override;
string DebugString() const override;
int64_t CardinalityInternal(CardinalityOptions options) const override;
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override;
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override;
Status CheckExternalState() const override;
absl::Status RandomIndexingCompatible() const override;
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override;
private:
class EmptyIterator;
class FiniteIterator;
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
class TakeDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Take";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kCount = "count";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit TakeDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
};
}
}
#endif
#include "tensorflow/core/kernels/data/take_dataset_op.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
constexpr const char* const TakeDatasetOp::kDatasetType;
constexpr const char* const TakeDatasetOp::kInputDataset;
constexpr const char* const TakeDatasetOp::kCount;
constexpr const char* const TakeDatasetOp::kOutputTypes;
constexpr const char* const TakeDatasetOp::kOutputShapes;
constexpr char kCurIndex[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kEmptyTake[] = "EmptyTake";
constexpr char kFiniteTake[] = "FiniteTake";
TakeDataset::TakeDataset(OpKernelContext* ctx, int64_t count,
const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
}
TakeDataset::TakeDataset(DatasetContext::Params params, int64_t count,
const DatasetBase* input)
: DatasetBase(DatasetContext(std::move(params))),
count_(count),
input_(input) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
TakeDataset::~TakeDataset() { input_->Unref(); }
const DataTypeVector& TakeDataset::output_dtypes() const {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& TakeDataset::output_shapes() const {
return input_->output_shapes();
}
string TakeDataset::DebugString() const {
return name_utils::DatasetDebugString(TakeDatasetOp::kDatasetType);
}
int64_t TakeDataset::CardinalityInternal(CardinalityOptions options) const {
int64_t n = input_->Cardinality(options);
if (n == kUnknownCardinality) {
return kUnknownCardinality;
}
if (n == kInfiniteCardinality) {
return count_;
} else if (count_ == kInfiniteCardinality) {
return n;
}
return std::min(n, count_);
}
Status TakeDataset::InputDatasets(
std::vector<const DatasetBase*>* inputs) const {
inputs->push_back(input_);
return absl::OkStatus();
}
Status TakeDataset::CheckExternalState() const {
return input_->CheckExternalState();
}
Status TakeDataset::Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index, out_tensors);
}
absl::Status TakeDataset::RandomIndexingCompatible() const {
return random_indexing_compatible_;
}
class TakeDataset::EmptyIterator : public DatasetIterator<TakeDataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<TakeDataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class TakeDataset::FiniteIterator : public DatasetIterator<TakeDataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<TakeDataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (dataset()->count_ < 0 || i_ < dataset()->count_) {
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (!*end_of_sequence) {
++i_;
return absl::OkStatus();
}
break;
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIndex, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputImplEmpty,
static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
mutex_lock l(mu_);
i_ = *ctx->restored_element_count();
return RestoreInput(ctx, reader, input_impl_);
}
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
std::unique_ptr<IteratorBase> TakeDataset::MakeIteratorInternal(
const string& prefix) const {
if (count_ == 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptyTake, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteTake, prefix)});
}
}
Status TakeDataset::AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
TakeDatasetOp::TakeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void TakeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new TakeDataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TakeDataset").Device(DEVICE_CPU), TakeDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/take_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "take_dataset";
class TakeDatasetOpTest : public DatasetOpsTestBase {};
TakeDatasetParams TakeLessTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
4,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeMoreTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
25,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeAllTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
-1,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeNothingTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
0,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<TakeDatasetParams>> GetNextTestCases() {
return {{TakeLessTakeDatasetParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}})},
{TakeMoreTakeDatasetParams(),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeAllTakeDatasetParams(),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeNothingTakeDatasetParams(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
GetNextTestCases())
TEST_F(TakeDatasetOpTest, DatasetNodeName) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TakeDatasetOpTest, DatasetTypeString) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(TakeDatasetOp::kDatasetType)));
}
TEST_F(TakeDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<TakeDatasetParams>>
DatasetOutputShapesTestCases() {
return {{TakeLessTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeMoreTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeAllTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeNothingTakeDatasetParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<TakeDatasetParams>> CardinalityTestCases() {
return {{TakeLessTakeDatasetParams(),
4},
{TakeMoreTakeDatasetParams(),
10},
{TakeAllTakeDatasetParams(),
10},
{TakeNothingTakeDatasetParams(),
0}};
}
DATASET_CARDINALITY_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
CardinalityTestCases())
TEST_F(TakeDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<TakeDatasetParams>>
IteratorOutputShapesTestCases() {
return {{TakeLessTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeMoreTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeAllTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeNothingTakeDatasetParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<TakeDatasetParams>>
IteratorPrefixTestCases() {
return {{TakeLessTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeLessTakeDatasetParams().iterator_prefix())},
{TakeMoreTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeMoreTakeDatasetParams().iterator_prefix())},
{TakeAllTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeAllTakeDatasetParams().iterator_prefix())},
{TakeNothingTakeDatasetParams(),
name_utils::IteratorPrefix(
"EmptyTake", TakeNothingTakeDatasetParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<TakeDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{TakeLessTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}})},
{TakeMoreTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeAllTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeNothingTakeDatasetParams(),
{0, 2, 5, 11},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,544 | cpp | tensorflow/tensorflow | cache_dataset_ops | tensorflow/core/kernels/data/cache_dataset_ops.cc | tensorflow/core/kernels/data/cache_dataset_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_CACHE_DATASET_OPS_H_
#define TENSORFLOW_CORE_KERNELS_DATA_CACHE_DATASET_OPS_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class CacheDatasetOp : public UnaryDatasetOpKernel {
public:
class FileDatasetBase;
class MemoryDatasetBase;
static constexpr const char* const kDatasetType = "Cache";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kFileName = "filename";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit CacheDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class FileDataset;
class FileDatasetV2;
class MemoryDataset;
class MemoryDatasetV2;
const int op_version_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/cache_dataset_ops.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/cache_ops.h"
#include "tensorflow/core/kernels/data/iterator_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
namespace data {
constexpr const char* const CacheDatasetOp::kDatasetType;
constexpr const char* const CacheDatasetOp::kInputDataset;
constexpr const char* const CacheDatasetOp::kFileName;
constexpr const char* const CacheDatasetOp::kOutputTypes;
constexpr const char* const CacheDatasetOp::kOutputShapes;
namespace {
constexpr char kKeyStrFormat[] = "%%%zuzu_%%%zuzu";
constexpr char kPaddingSizeStrFormat[] = "%zu";
constexpr char kFileDatasetPrefix[] = "File";
constexpr char kMode[] = "Mode";
constexpr char kLockFileSuffix[] = ".lockfile";
constexpr char kIterationCompleted[] = "iteration_completed";
constexpr char kCurIndex[] = "cur_index";
constexpr char kShardId[] = "shard_id";
constexpr char kCreatedAt[] = "Created at";
constexpr char kMemoryDatasetPrefix[] = "Memory";
constexpr char kMemoryCache[] = "MemoryCache";
constexpr char kCacheCompleted[] = "cache_completed";
constexpr char kIndex[] = "index";
constexpr char kImpl[] = "Impl";
constexpr char kCacheDataset[] = "CacheDataset";
constexpr char kIncompleteCacheErrorMessage[] =
"The calling iterator did not fully read the dataset being cached. In "
"order to avoid unexpected truncation of the dataset, the partially cached "
"contents of the dataset will be discarded. This can happen if you have "
"an input pipeline similar to `dataset.cache().take(k).repeat()`. You "
"should use `dataset.take(k).cache().repeat()` instead.";
}
class DatasetRandomAccessCache {
public:
explicit DatasetRandomAccessCache(const DatasetBase* dataset)
: input_(dataset) {}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) {
if (!iter_resource_) {
TF_ASSIGN_OR_RETURN(iter_resource_,
GetIteratorResourceFromDataset(ctx, input_));
TF_RETURN_IF_ERROR(iter_resource_->SetIteratorFromDataset(ctx, input_));
}
if (index >= cache_.size()) {
TF_RETURN_IF_ERROR(ExtendTempCacheToIndex(index, ctx));
}
*out_tensors = cache_.at(index);
return absl::OkStatus();
}
std::vector<std::vector<Tensor>> GetCacheData() { return cache_; }
private:
Status ExtendTempCacheToIndex(int64 index, OpKernelContext* ctx) {
bool end_of_sequence;
while (cache_.size() <= index) {
std::vector<Tensor> out_tensors;
TF_RETURN_IF_ERROR(
iter_resource_->GetNext(ctx, &out_tensors, &end_of_sequence));
if (end_of_sequence) {
return tensorflow::errors::OutOfRange("Index out of range [0, ",
cache_.size(), "):", index);
}
cache_.push_back(out_tensors);
}
return absl::OkStatus();
}
absl::StatusOr<core::RefCountPtr<IteratorResource>>
GetIteratorResourceFromDataset(OpKernelContext* ctx,
const DatasetBase* dataset) {
FunctionLibraryRuntime* flr;
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> plfr(nullptr);
TF_RETURN_IF_ERROR(
ctx->function_library()->Clone(&flib_def, &plfr, &flr, true));
core::RefCountPtr<IteratorResource> iter_resource(new IteratorResource(
ctx->env(), dataset->output_dtypes(), dataset->output_shapes(),
std::move(device_mgr), std::move(flib_def), std::move(plfr), flr));
return iter_resource;
}
const DatasetBase* input_;
core::RefCountPtr<IteratorResource> iter_resource_;
std::vector<std::vector<Tensor>> cache_;
};
class IteratorRandomAccessCache {
public:
explicit IteratorRandomAccessCache(const DatasetBase* input)
: input_(input) {}
absl::Status Get(AnyContext ctx, size_t element_position,
std::vector<Tensor>* out_tensors) {
if (element_position < cache_.size() && !cache_[element_position].empty()) {
*out_tensors = cache_[element_position];
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(input_->Get(ctx, element_position, out_tensors));
if (element_position >= cache_.size()) {
cache_.resize(element_position + 1);
}
cache_[element_position] = *out_tensors;
return absl::OkStatus();
}
private:
const DatasetBase* input_ = nullptr;
std::vector<std::vector<Tensor>> cache_;
};
class CacheDatasetOp::FileDatasetBase : public DatasetBase {
public:
FileDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
string filename, Env* env)
: DatasetBase(DatasetContext(ctx)),
input_(input),
filename_(std::move(filename)),
env_(env),
num_tensors_(input->output_dtypes().size()),
tensor_index_padding_size_(StringPaddingSize(num_tensors_)),
item_index_padding_size_(StringPaddingSize(kMaxItems)),
tensor_format_string_(strings::Printf(kKeyStrFormat,
item_index_padding_size_,
tensor_index_padding_size_)) {
input_->Ref();
DCHECK_EQ(item_index_padding_size_, 7);
}
~FileDatasetBase() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.dataset_prefix = kFileDatasetPrefix;
return std::make_unique<FileIterator>(FileIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.dataset_prefix = kFileDatasetPrefix;
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
const DatasetBase* const input_;
const tstring filename_;
private:
static size_t StringPaddingSize(size_t num_tensors) {
return strings::Printf(kPaddingSizeStrFormat, num_tensors - 1).size();
}
string FormatName(size_t item_index, size_t tensor_index) const {
return strings::Printf(tensor_format_string_.c_str(), item_index,
tensor_index);
}
class FileIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params) {
if (params.dataset->env_
->FileExists(MetaFilename(params.dataset->filename_))
.ok()) {
mode_ = Mode::read;
} else {
mode_ = Mode::write;
}
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return InitializeIterator(ctx);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
return iterator_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kMode, mode_));
return SaveInput(ctx, writer, iterator_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kMode, &temp));
mode_ = static_cast<Mode>(temp);
}
if (mode_ == Mode::write &&
dataset()
->env_->FileExists(MetaFilename(dataset()->filename_))
.ok()) {
LOG(WARNING)
<< "It looks like the cache was already completely written("
<< MetaFilename(dataset()->filename_)
<< ") after the last checkpoint was saved. Attempting to read "
<< "the cache instead of continuing to write. If this is a "
<< "mistake, please remove the above file and try running again.";
mode_ = Mode::read;
}
TF_RETURN_IF_ERROR(InitializeIterator(ctx));
return RestoreInput(ctx, reader, iterator_);
}
private:
class FileWriterIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileWriterIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params),
cur_index_(0),
shard_id_(0),
filename_(
strings::StrCat(params.dataset->filename_, "_", shard_id_)),
lockfile_(strings::StrCat(filename_, kLockFileSuffix)),
lockfile_created_(false),
iteration_completed_(false) {}
~FileWriterIterator() override {
if (!dataset()->env_->FileExists(MetaFilename(filename_)).ok()) {
LOG(WARNING) << kIncompleteCacheErrorMessage;
std::vector<string> cache_files;
Status s = dataset()->env_->GetMatchingPaths(
strings::StrCat(filename_, "*"), &cache_files);
if (!s.ok()) {
LOG(WARNING) << "Failed to get matching files on " << filename_
<< "* : " << s.ToString();
}
for (const string& path : cache_files) {
s = dataset()->env_->DeleteFile(path);
if (!s.ok()) {
LOG(WARNING) << "Failed to delete " << path << " : "
<< s.ToString();
}
}
}
}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(),
&input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
TF_RETURN_IF_ERROR(EnsureLockFileExists(end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(writer_->status());
if (cur_index_ >= kMaxItems) {
Status s = Finish();
if (!s.ok()) {
LOG(ERROR) << s;
}
return errors::InvalidArgument(
"Upstream iterator is producing more than ", kMaxItems,
" items, which is more than the cache limit.");
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence && out_tensors->empty()) {
TF_RETURN_IF_ERROR(Finish());
cur_index_++;
return absl::OkStatus();
}
if (out_tensors->size() != dataset()->num_tensors_) {
return errors::Internal(
"Upstream iterator returned invalid number of tensors. "
"Expected ",
dataset()->num_tensors_, " got: ", out_tensors->size());
}
size_t tensor_index = 0;
for (const Tensor& t : *out_tensors) {
DCHECK_LT(tensor_index, dataset()->num_tensors_);
string key = dataset()->FormatName(cur_index_, tensor_index++);
TF_RETURN_IF_ERROR(writer_->Add(key, t));
}
if (*end_of_sequence) {
TF_RETURN_IF_ERROR(Finish());
}
cur_index_++;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurIndex, cur_index_));
if (iteration_completed_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kIterationCompleted, ""));
return absl::OkStatus();
}
if (lockfile_created_) {
TF_RETURN_IF_ERROR(writer_->Finish());
shard_id_++;
filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_);
lockfile_ = strings::StrCat(filename_, kLockFileSuffix);
lockfile_created_ = false;
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kShardId, shard_id_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t temp;
{
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &temp));
cur_index_ = static_cast<size_t>(temp);
if (cur_index_ != temp) {
return errors::Internal("Invalid value for cur_index ", temp);
}
}
if (reader->Contains(prefix(), kIterationCompleted)) {
iteration_completed_ = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
{
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kShardId, &temp));
shard_id_ = static_cast<size_t>(temp);
if (shard_id_ != temp) {
return errors::Internal("Invalid value for shard_id ", temp);
}
}
filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_);
lockfile_ = strings::StrCat(filename_, kLockFileSuffix);
writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_);
return absl::OkStatus();
}
private:
Status EnsureLockFileExists(bool* end_of_sequence)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (iteration_completed_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (lockfile_created_) {
return absl::OkStatus();
}
if (dataset()->env_->FileExists(MetaFilename(filename_)).ok()) {
return errors::AlreadyExists("Existing cache files found: \n",
MetaFilename(filename_), "\n",
DataFilename(filename_, 0, 1), "\n",
"To continue delete the above files.");
}
if (dataset()->env_->FileExists(lockfile_).ok()) {
char contents_scratch[151] = {0};
StringPiece contents;
std::unique_ptr<RandomAccessFile> file;
if (dataset()->env_->NewRandomAccessFile(lockfile_, &file).ok()) {
file->Read(0, 150, &contents, contents_scratch).IgnoreError();
}
return errors::AlreadyExists(
"There appears to be a concurrent caching iterator running - "
"cache lockfile already exists ('",
lockfile_,
"'). If you are sure no other running TF computations are "
"using this cache prefix, delete the lockfile and "
"re-initialize the iterator. Lockfile contents: ",
contents);
}
std::unique_ptr<WritableFile> lockfile;
TF_RETURN_IF_ERROR(
dataset()->env_->NewWritableFile(lockfile_, &lockfile));
TF_RETURN_IF_ERROR(lockfile->Append(
strings::StrCat(kCreatedAt, ": ", EnvTime::NowSeconds())));
writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_);
lockfile_created_ = true;
return absl::OkStatus();
}
Status Finish() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
iteration_completed_ = true;
TF_RETURN_IF_ERROR(writer_->Finish());
{
std::vector<tstring> prefixes;
prefixes.reserve(shard_id_ + 1);
for (size_t i = 0; i <= shard_id_; ++i) {
prefixes.emplace_back(
strings::StrCat(dataset()->filename_, "_", i));
}
TF_RETURN_IF_ERROR(
MergeBundles(dataset()->env_, prefixes, dataset()->filename_));
}
for (size_t i = 0; i <= shard_id_; ++i) {
TF_RETURN_IF_ERROR(dataset()->env_->DeleteFile(
strings::StrCat(dataset()->filename_, "_", i, kLockFileSuffix)));
}
return absl::OkStatus();
}
mutex mu_;
size_t cur_index_ TF_GUARDED_BY(mu_);
size_t shard_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
string filename_;
std::unique_ptr<BundleWriter> writer_ TF_GUARDED_BY(mu_);
string lockfile_ TF_GUARDED_BY(mu_);
bool lockfile_created_ TF_GUARDED_BY(mu_);
bool iteration_completed_ TF_GUARDED_BY(mu_);
};
class FileReaderIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileReaderIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params),
cur_index_(0),
reader_(dataset()->env_, dataset()->filename_),
iterator_restored_(false) {}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
TF_RETURN_IF_ERROR(reader_.status());
if (!reader_.Valid()) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->resize(dataset()->num_tensors_);
for (size_t i = 0; i < dataset()->num_tensors_; ++i) {
if (!iterator_restored_) {
reader_.Next();
} else {
iterator_restored_ = false;
}
if (!reader_.Valid()) {
out_tensors->clear();
*end_of_sequence = true;
return absl::OkStatus();
}
StringPiece key = reader_.key();
DCHECK_EQ(key, dataset()->FormatName(cur_index_, i));
TF_RETURN_IF_ERROR(reader_.ReadCurrent(&(*out_tensors)[i]));
TF_RETURN_IF_ERROR(reader_.status());
}
cur_index_++;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurIndex, cur_index_));
return absl::OkStatus();
}
Status RestoreInternal(
IteratorContext* ctx,
IteratorStateReader* iterator_state_reader) override {
mutex_lock l(mu_);
{
int64_t temp;
TF_RETURN_IF_ERROR(
iterator_state_reader->ReadScalar(prefix(), kCurIndex, &temp));
cur_index_ = static_cast<size_t>(temp);
if (cur_index_ != temp) {
return errors::Internal("Invalid value for cur_index ", temp);
}
}
if (!reader_.Valid()) {
return errors::Internal("Error initializing BundleReader.");
}
reader_.Seek(dataset()->FormatName(cur_index_, 0));
iterator_restored_ = true;
return absl::OkStatus();
}
private:
mutex mu_;
size_t cur_index_ TF_GUARDED_BY(mu_);
BundleReader reader_ TF_GUARDED_BY(mu_);
bool iterator_restored_ TF_GUARDED_BY(mu_);
};
Status InitializeIterator(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
switch (mode_) {
case Mode::read:
iterator_ =
std::make_unique<FileReaderIterator>(FileReaderIterator::Params{
dataset(), strings::StrCat(prefix(), kImpl)});
break;
case Mode::write:
iterator_ =
std::make_unique<FileWriterIterator>(FileWriterIterator::Params{
dataset(), strings::StrCat(prefix(), kImpl)});
}
TF_RETURN_IF_ERROR(iterator_->InitializeBase(ctx, this));
return iterator_->Initialize(ctx);
}
mutex mu_;
enum Mode { read, write };
Mode mode_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> iterator_ TF_GUARDED_BY(mu_);
};
Env* const env_;
const size_t num_tensors_;
const size_t tensor_index_padding_size_;
static constexpr size_t kMaxItems = 10000000;
const size_t item_index_padding_size_;
const string tensor_format_string_;
};
class CacheDatasetOp::FileDataset : public CacheDatasetOp::FileDatasetBase {
public:
using FileDatasetBase::FileDatasetBase;
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph));
Node* filename = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph, filename}, output));
return absl::OkStatus();
}
};
class CacheDatasetOp::FileDatasetV2 : public CacheDatasetOp::FileDatasetBase {
public:
explicit FileDatasetV2(OpKernelContext* ctx, const DatasetBase* input,
string filename, Env* env,
const Tensor& resource_handle)
: FileDatasetBase(ctx, input, filename, env),
resource_handle_(resource_handle) {}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* filename_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename_node));
Node* resource_handle_node = nullptr;
TF_RETURN_IF_ERROR(b->AddTensor(resource_handle_, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_node, filename_node, resource_handle_node}, output));
return absl::OkStatus();
}
private:
const Tensor resource_handle_;
};
class CacheDatasetOp::MemoryDatasetBase : public DatasetBase {
public:
explicit MemoryDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
std::shared_ptr<MemoryCache> cache)
: DatasetBase(DatasetContext(ctx)),
input_(input),
cache_(std::move(cache)) {
input_->Ref();
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
~MemoryDatasetBase() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.dataset_prefix = kMemoryDatasetPrefix;
return std::make_unique<MemoryIterator>(
MemoryIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)},
cache_.get());
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->outp | #include "tensorflow/core/kernels/data/cache_dataset_ops.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/platform/path.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "cache_dataset";
constexpr char kFileDatasetPrefix[] = "File";
constexpr char kMemoryDatasetPrefix[] = "Memory";
class CacheDatasetParams : public DatasetParams {
public:
template <typename T>
CacheDatasetParams(T input_dataset_params, string filename,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
filename_(filename) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
Tensor filename_tensor =
CreateTensor<tstring>(TensorShape({}), {filename_});
return {filename_tensor};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {CacheDatasetOp::kInputDataset, CacheDatasetOp::kFileName};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return CacheDatasetOp::kDatasetType; }
string filename() const { return filename_; }
private:
string filename_;
};
class CacheDatasetOpTest : public DatasetOpsTestBase {
public:
Status Initialize(const DatasetParams& dataset_params) {
TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params));
auto params = static_cast<const CacheDatasetParams&>(dataset_params);
cache_filename_ = params.filename();
return absl::OkStatus();
}
~CacheDatasetOpTest() override {
if (!cache_filename_.empty()) {
std::vector<string> cache_files;
Status s = device_->env()->GetMatchingPaths(
strings::StrCat(cache_filename_, "*"), &cache_files);
if (!s.ok()) {
LOG(WARNING) << "Failed to get matching files on " << cache_filename_
<< "* : " << s.ToString();
}
for (const string& path : cache_files) {
s = device_->env()->DeleteFile(path);
if (!s.ok()) {
LOG(WARNING) << "Failed to delete " << path << " : " << s.ToString();
}
}
}
}
protected:
tstring cache_filename_;
};
CacheDatasetParams CacheDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return CacheDatasetParams(
std::move(tensor_slice_dataset_params),
io::JoinPath(testing::TmpDir(), "cache_data"),
{DT_INT64},
{PartialTensorShape({3, 1})}, kNodeName);
}
CacheDatasetParams CacheDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice");
return CacheDatasetParams(
std::move(tensor_slice_dataset_params),
io::JoinPath(testing::TmpDir(), "cache_data"),
{DT_INT64},
{PartialTensorShape({})}, kNodeName);
}
CacheDatasetParams CacheDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return CacheDatasetParams(std::move(tensor_slice_dataset_params),
"",
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
CacheDatasetParams CacheDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice");
return CacheDatasetParams(std::move(tensor_slice_dataset_params),
"",
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<CacheDatasetParams>> GetNextTestCases() {
return {{CacheDatasetParams1(),
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams2(),
{}},
{CacheDatasetParams3(),
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams4(),
{}}};
}
class ParameterizedGetNextTest : public CacheDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<CacheDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
end_of_sequence = false;
out_tensors.clear();
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(CacheDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(CacheDatasetOpTest, DatasetNodeName) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(CacheDatasetOpTest, DatasetTypeString) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(CacheDatasetOp::kDatasetType)));
}
TEST_F(CacheDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<CacheDatasetParams>>
DatasetOutputShapesTestCases() {
return {{CacheDatasetParams1(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams2(),
{PartialTensorShape({})}},
{CacheDatasetParams3(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams4(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<CacheDatasetParams>> CardinalityTestCases() {
return {{CacheDatasetParams1(),
3},
{CacheDatasetParams2(),
0},
{CacheDatasetParams3(),
3},
{CacheDatasetParams4(),
0}};
}
DATASET_CARDINALITY_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
CardinalityTestCases())
TEST_F(CacheDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<CacheDatasetParams>>
IteratorOutputShapesTestCases() {
return {{CacheDatasetParams1(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams2(),
{PartialTensorShape({})}},
{CacheDatasetParams3(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams4(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(CacheDatasetOpTest, IteratorPrefix) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams iterator_prefix_params;
iterator_prefix_params.dataset_prefix =
cache_filename_.empty() ? kMemoryDatasetPrefix : kFileDatasetPrefix;
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
CacheDatasetOp::kDatasetType, dataset_params.iterator_prefix(),
iterator_prefix_params)));
}
std::vector<IteratorSaveAndRestoreTestCase<CacheDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{CacheDatasetParams1(),
{0, 2, 4, 11},
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams2(),
{0, 2, 4, 11},
{}},
{CacheDatasetParams3(),
{0, 2, 4, 11},
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams4(),
{0, 2, 4, 11},
{}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public CacheDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<CacheDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
if (cache_filename_.empty()) {
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
}
end_of_sequence = false;
out_tensors.clear();
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
}
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
int cur_iteration = 0;
auto expected_outputs_it = test_case.expected_outputs.begin();
for (int breakpoint : test_case.breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(out_tensors.back(), *expected_outputs_it));
expected_outputs_it++;
}
cur_iteration++;
}
if (breakpoint >= dataset_->Cardinality()) {
EXPECT_TRUE(end_of_sequence);
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
} else {
EXPECT_FALSE(end_of_sequence);
}
}
}
INSTANTIATE_TEST_CASE_P(CacheDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} |
1,545 | cpp | tensorflow/tensorflow | skip_dataset_op | tensorflow/core/kernels/data/skip_dataset_op.cc | tensorflow/core/kernels/data/skip_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_SKIP_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_SKIP_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class SkipDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Skip";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kCount = "count";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit SkipDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
};
}
}
#endif
#include "tensorflow/core/kernels/data/skip_dataset_op.h"
#include <cstddef>
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
constexpr const char* const SkipDatasetOp::kDatasetType;
constexpr const char* const SkipDatasetOp::kInputDataset;
constexpr const char* const SkipDatasetOp::kCount;
constexpr const char* const SkipDatasetOp::kOutputTypes;
constexpr const char* const SkipDatasetOp::kOutputShapes;
constexpr char kEmptySkip[] = "EmptySkip";
constexpr char kFiniteSkip[] = "FiniteSkip";
constexpr char kCurIndex[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
class SkipDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
if (input_ != nullptr && count >= 0) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
} else {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("Global shuffling does not support empty dataset or "
"skipping the entire dataset. Got skip(",
count, ")."));
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
if (count_ < 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptySkip, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteSkip, prefix)});
}
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return count_ < 0 ? 0 : std::max(int64_t{0}, n - count_);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index + count_, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
private:
class EmptyIterator : public DatasetIterator<Dataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class FiniteIterator : public DatasetIterator<Dataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (i_ < dataset()->count_) {
int num_skipped;
TF_RETURN_IF_ERROR(input_impl_->Skip(ctx, dataset()->count_ - i_,
end_of_sequence, &num_skipped));
i_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
}
return absl::OkStatus();
}
absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
ctx_with_index_mapper.MergeCheckpoint();
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t skip_count = dataset()->count_;
return [parent_index_mapper,
skip_count](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(element_position));
return shuffled_element_position + skip_count;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIndex, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
mutex_lock l(mu_);
return RestoreInput(ctx, reader, input_impl_);
}
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
SkipDatasetOp::SkipDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void SkipDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new Dataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SkipDataset").Device(DEVICE_CPU), SkipDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/skip_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "skip_dataset";
class SkipDatasetParams : public DatasetParams {
public:
template <typename T>
SkipDatasetParams(T input_dataset_params, int64_t count,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
count_(count) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {count_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(SkipDatasetOp::kInputDataset);
input_names->emplace_back(SkipDatasetOp::kCount);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return SkipDatasetOp::kDatasetType; }
private:
int64_t count_;
};
class SkipDatasetOpTest : public DatasetOpsTestBase {};
SkipDatasetParams SkipDatasetParams1() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
4,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams2() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
25,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams3() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
10,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams4() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
0,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams5() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
-1,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<SkipDatasetParams>> GetNextTestCases() {
return {
{SkipDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams2(),
{}},
{SkipDatasetParams3(),
{}},
{SkipDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams5(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
GetNextTestCases())
TEST_F(SkipDatasetOpTest, DatasetNodeName) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SkipDatasetOpTest, DatasetTypeString) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(SkipDatasetOp::kDatasetType)));
}
TEST_F(SkipDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(SkipDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<SkipDatasetParams>> CardinalityTestCases() {
return {{SkipDatasetParams1(),
6},
{SkipDatasetParams2(),
0},
{SkipDatasetParams3(),
0},
{SkipDatasetParams4(),
10},
{SkipDatasetParams5(),
0}};
}
DATASET_CARDINALITY_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
CardinalityTestCases())
TEST_F(SkipDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(SkipDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
std::vector<IteratorPrefixTestCase<SkipDatasetParams>>
IteratorPrefixTestCases() {
return {{SkipDatasetParams1(),
name_utils::IteratorPrefix("FiniteSkip",
SkipDatasetParams1().iterator_prefix())},
{SkipDatasetParams2(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams2().iterator_prefix())},
{SkipDatasetParams3(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams3().iterator_prefix())},
{SkipDatasetParams4(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams4().iterator_prefix())},
{SkipDatasetParams5(),
name_utils::IteratorPrefix(
"EmptySkip", SkipDatasetParams5().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<SkipDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{SkipDatasetParams1(),
{0, 2, 7},
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams2(),
{0, 2, 5},
{}},
{SkipDatasetParams3(),
{0, 2, 5},
{}},
{SkipDatasetParams4(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape{}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams5(),
{0, 2, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,546 | cpp | tensorflow/tensorflow | tensor_dataset_op | tensorflow/core/kernels/data/tensor_dataset_op.cc | tensorflow/core/kernels/data/tensor_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_TENSOR_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_TENSOR_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class TensorDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Tensor";
static constexpr const char* const kComponents = "components";
static constexpr const char* const kToutput_types = "Toutput_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit TensorDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/tensor_dataset_op.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const TensorDatasetOp::kDatasetType;
constexpr const char* const TensorDatasetOp::kComponents;
constexpr const char* const TensorDatasetOp::kToutput_types;
constexpr const char* const TensorDatasetOp::kOutputShapes;
constexpr char kFromTensor[] = "FromTensor";
constexpr char kProduced[] = "produced";
class TensorDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors)
: DatasetBase(DatasetContext(ctx)), tensors_(std::move(tensors)) {
dtypes_.reserve(tensors_.size());
shapes_.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
dtypes_.push_back(t.dtype());
shapes_.emplace_back(t.shape().dim_sizes());
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kFromTensor, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(std::make_unique<IndexSplitProvider>(1));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return 1LL;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
*out_tensors = tensors_;
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> components;
components.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
components.emplace_back(node);
}
AttrValue dtypes;
b->BuildAttrValue(dtypes_, &dtypes);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}},
{{kToutput_types, dtypes}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
produced_(false),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (!ctx->split_providers().empty()) {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
mutex_lock l(mu_);
if (split_provider_) {
bool end_of_splits;
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, &end_of_splits));
if (end_of_splits) {
produced_ = true;
}
}
if (!produced_) {
*out_tensors = dataset()->tensors_;
produced_ = true;
*end_of_sequence = false;
return absl::OkStatus();
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kProduced,
static_cast<int64_t>(produced_)));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(ctx);
}
mutex_lock l(mu_);
int64_t produced;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kProduced, &produced));
produced_ = static_cast<bool>(produced);
return absl::OkStatus();
}
private:
mutex mu_;
std::shared_ptr<SplitProvider> split_provider_;
bool produced_ TF_GUARDED_BY(mu_);
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
DataTypeVector dtypes_;
std::vector<PartialTensorShape> shapes_;
};
TensorDatasetOp::TensorDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutput_types, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void TensorDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs));
std::vector<Tensor> components(inputs.begin(), inputs.end());
*output = new Dataset(ctx, std::move(components));
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TensorDataset").Device(DEVICE_CPU),
TensorDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tensor_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_dataset";
class TensorDatasetParams : public DatasetParams {
public:
TensorDatasetParams(std::vector<Tensor> components, string node_name)
: DatasetParams(TensorDtypes(components), TensorShapes(components),
std::move(node_name)),
components_(std::move(components)) {}
std::vector<Tensor> GetInputTensors() const override { return components_; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(components_.size());
for (int i = 0; i < components_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(TensorDatasetOp::kComponents, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"Toutput_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return TensorDatasetOp::kDatasetType; }
private:
DataTypeVector TensorDtypes(const std::vector<Tensor>& input_components) {
DataTypeVector dtypes;
for (const auto& component : input_components) {
dtypes.emplace_back(component.dtype());
}
return dtypes;
}
std::vector<PartialTensorShape> TensorShapes(
const std::vector<Tensor>& input_components) {
std::vector<PartialTensorShape> shapes;
for (const auto& component : input_components) {
shapes.emplace_back(component.shape());
}
return shapes;
}
public:
std::vector<Tensor> components_;
};
class TensorDatasetOpTest : public DatasetOpsTestBase {};
std::vector<Tensor> PlainTensors() {
return {CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3}),
CreateTensor<double>(TensorShape({}), {37.0}),
CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})};
}
TensorDatasetParams PlainTensorDatasetParams() {
return {PlainTensors(),
kNodeName};
}
TensorDatasetParams NestedTensorDatasetParams() {
return {
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})},
kNodeName};
}
std::vector<GetNextTestCase<TensorDatasetParams>> GetNextTestCases() {
return {{PlainTensorDatasetParams(),
PlainTensors()},
{NestedTensorDatasetParams(),
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})}}};
}
class ParameterizedGetNextTest : public TensorDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<TensorDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
EXPECT_EQ(out_tensors.size(), test_case.expected_outputs.size());
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i].scalar<Variant>()().get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(out_tensors[i], test_case.expected_outputs[i]));
}
}
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence));
EXPECT_TRUE(end_of_sequence);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_CASE_P(TensorDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(TensorDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TensorDatasetOp::kDatasetType)));
}
TEST_F(TensorDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TensorDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(TensorDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
TEST_F(TensorDatasetOpTest, Cardinality) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(1));
}
TEST_F(TensorDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(TensorDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(TensorDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainTensorDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
"FromTensor", dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TensorDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{PlainTensorDatasetParams(),
{0, 1, 2},
PlainTensors()},
{NestedTensorDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(TensorShape({}),
{CreateTensor<double>(TensorShape({2, 2}),
{1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({1, 3}), {1, 2, 3})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public TensorDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<TensorDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
int cardinality = 1;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
if (breakpoint >= cardinality) {
EXPECT_TRUE(end_of_sequence);
} else {
EXPECT_FALSE(end_of_sequence);
}
}
EXPECT_EQ(out_tensors.size(), test_case.expected_outputs.size());
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i].scalar<Variant>()().get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(out_tensors[i], test_case.expected_outputs[i]));
}
}
}
INSTANTIATE_TEST_CASE_P(TensorDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(TensorDatasetOpTest, Splitting) {
auto params = PlainTensorDatasetParams();
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, PlainTensors()));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 2,
CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 0,
PlainTensors()));
}
}
}
} |
1,547 | cpp | tensorflow/tensorflow | interleave_dataset_op | tensorflow/core/kernels/data/interleave_dataset_op.cc | tensorflow/core/kernels/data/interleave_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_INTERLEAVE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_INTERLEAVE_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class InterleaveDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Interleave";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kCycleLength = "cycle_length";
static constexpr const char* const kBlockLength = "block_length";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit InterleaveDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int graph_def_version_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
};
}
}
#endif
#include "tensorflow/core/kernels/data/interleave_dataset_op.h"
#include <algorithm>
#include <memory>
#include <optional>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const InterleaveDatasetOp::kDatasetType;
constexpr const char* const InterleaveDatasetOp::kInputDataset;
constexpr const char* const InterleaveDatasetOp::kOtherArguments;
constexpr const char* const InterleaveDatasetOp::kCycleLength;
constexpr const char* const InterleaveDatasetOp::kBlockLength;
constexpr const char* const InterleaveDatasetOp::kFunc;
constexpr const char* const InterleaveDatasetOp::kTarguments;
constexpr const char* const InterleaveDatasetOp::kOutputTypes;
constexpr const char* const InterleaveDatasetOp::kOutputShapes;
constexpr char kCycleIndex[] = "cycle_index";
constexpr char kBlockIndex[] = "block_index";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kNumOpen[] = "num_open";
constexpr char kArgsSize[] = "args_size";
constexpr char kArgsList[] = "args_list_";
constexpr char kCurrentElementsUninitialized[] =
"current_elements_uninitialized";
constexpr char kNextInputElementIndex[] = "next_input_element_index";
constexpr char kLastCheckpointedInputElementIndex[] =
"last_checkpointed_input_element_index";
constexpr char kInputElementIndices[] = "input_element_indices";
class InterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func, int64_t cycle_length,
int64_t block_length, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
cycle_length_(cycle_length),
block_length_(block_length),
output_types_(output_types),
output_shapes_(output_shapes),
traceme_metadata_(
{{"block_length",
strings::Printf("%lld", static_cast<long long>(block_length))},
{"cycle_length",
strings::Printf("%lld", static_cast<long long>(cycle_length))}}) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* cycle_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(cycle_length_, &cycle_length_node));
Node* block_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(block_length_, &block_length_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_node}, {2, cycle_length_node}, {3, block_length_node}},
{{1, other_arguments}},
{{kFunc, f}, {kTarguments, other_arguments_types_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
current_elements_(params.dataset->cycle_length_) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry());
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
void AdvanceToNextInCycle() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
block_index_ = 0;
cycle_index_ = (cycle_index_ + 1) % dataset()->cycle_length_;
}
Status AdvancePosition(int num_elements) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
block_index_ += num_elements;
if (block_index_ == dataset()->block_length_) {
AdvanceToNextInCycle();
return absl::OkStatus();
} else if (block_index_ < dataset()->block_length_) {
return absl::OkStatus();
}
return absl::InternalError(
"Something went wrong as `block_index_` should never be larger than "
"`dataset()->block_length_`");
}
void AdvancePosition() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
++block_index_;
if (block_index_ == dataset()->block_length_) {
AdvanceToNextInCycle();
}
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
while (!end_of_input_ || num_open_ > 0) {
if (current_elements_[cycle_index_]) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
CurrentElement& current_element = *current_elements_[cycle_index_];
TF_RETURN_IF_ERROR(current_element.iterator->GetNext(
&nested_ctx, out_tensors, &end_of_element));
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
AdvancePosition();
*end_of_sequence = false;
return absl::OkStatus();
} else {
ctx->PurgeCheckpoint(current_element.iterator->prefix());
UpdateSymbolicCheckpointAfterCurrentElementFinished(
*ctx, *current_elements_[cycle_index_]);
current_elements_[cycle_index_].reset();
--num_open_;
AdvanceToNextInCycle();
}
} else {
TF_RETURN_IF_ERROR(MoveToNextElement(ctx));
}
}
ctx->MergeCheckpoint(input_ckpt_.get());
*end_of_sequence = true;
return absl::OkStatus();
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
mutex_lock l(mu_);
*num_skipped = 0;
while (!end_of_input_ || num_open_ > 0) {
if (current_elements_[cycle_index_]) {
CurrentElement& current_element = *current_elements_[cycle_index_];
int element_num_to_skip = num_to_skip - *num_skipped;
if (element_num_to_skip > dataset()->block_length_ - block_index_) {
element_num_to_skip = dataset()->block_length_ - block_index_;
}
bool end_of_element = false;
int element_num_skipped = 0;
auto nested_ctx = MakeNestedIteratorContext(ctx);
TF_RETURN_IF_ERROR(current_element.iterator->Skip(
&nested_ctx, element_num_to_skip, &end_of_element,
&element_num_skipped));
*num_skipped += element_num_skipped;
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
TF_RETURN_IF_ERROR(AdvancePosition(element_num_skipped));
} else {
ctx->PurgeCheckpoint(current_element.iterator->prefix());
UpdateSymbolicCheckpointAfterCurrentElementFinished(
*ctx, *current_elements_[cycle_index_]);
current_elements_[cycle_index_].reset();
--num_open_;
AdvanceToNextInCycle();
}
if (num_to_skip == *num_skipped) {
*end_of_sequence = false;
return absl::OkStatus();
}
} else {
TF_RETURN_IF_ERROR(MoveToNextElement(ctx));
}
}
ctx->MergeCheckpoint(input_ckpt_.get());
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args), {model::MakeNonTunableParameter(
kCycleLength, dataset()->cycle_length_)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCycleIndex, cycle_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBlockIndex, block_index_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kEndOfInput, static_cast<int64_t>(end_of_input_)));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNumOpen, num_open_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNextInputElementIndex,
next_input_element_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kLastCheckpointedInputElementIndex,
last_checkpointed_input_element_index_));
TF_RETURN_IF_ERROR(SaveCurrentElements(ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t cycle_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCycleIndex, &cycle_index));
cycle_index_ = size_t(cycle_index);
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBlockIndex, &block_index_));
int64_t end_of_input;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kEndOfInput, &end_of_input));
end_of_input_ = static_cast<bool>(end_of_input);
int64_t num_open;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNumOpen, &num_open));
num_open_ = size_t(num_open);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNextInputElementIndex,
&next_input_element_index_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kLastCheckpointedInputElementIndex,
&last_checkpointed_input_element_index_));
int64_t cycle_length = dataset()->cycle_length_;
std::vector<InputOffset> input_element_indices(cycle_length, -1);
std::vector<std::optional<MemoryCheckpoint>> checkpoints(cycle_length);
std::vector<std::vector<Tensor>> args(cycle_length);
if (ctx->symbolic_checkpoint()) {
auto status_or = RestoreInputOffsets(*reader);
if (!status_or.ok()) {
return status_or.status();
}
auto& input_offset_w_cycle_idxs = status_or.value();
TF_RETURN_IF_ERROR(RestoreArgsListAndInputOffsetCycleIdxMap(
*ctx, input_element_indices, checkpoints, args,
input_offset_w_cycle_idxs));
}
TF_RETURN_IF_ERROR(
RestoreCurrentElements(ctx, reader, input_element_indices,
std::move(checkpoints), std::move(args)));
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
using InputOffset = int64_t;
using CycleIdx = int;
struct CurrentElement;
struct InputOffsetWithCycleIdx;
int64_t GetSubIteratorIndexForPrefix(bool symbolic_checkpoint)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return GetSubIteratorIndexForPrefix(symbolic_checkpoint, cycle_index_,
next_input_element_index_);
}
int64_t GetSubIteratorIndexForPrefix(
bool symbolic_checkpoint, int64_t cycle_index,
std::optional<int64_t> input_element_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (symbolic_checkpoint) ? (input_element_index.value())
: (cycle_index);
}
Status SaveCurrentElements(SerializationContext* ctx,
IteratorStateWriter* writer)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (int idx = 0; idx < current_elements_.size(); idx++) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", idx, "]"),
!current_elements_[idx]));
if (!current_elements_[idx]) {
continue;
}
if (!ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(
SaveInput(ctx, writer, current_elements_[idx]->iterator));
const auto& args = current_elements_[idx]->args;
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kArgsSize, "[", idx, "]"),
args.size()));
for (int i = 0; i < args.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kArgsList, "[", idx, "][", i, "]"),
args[i]));
}
} else {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kInputElementIndices, "[", idx, "]"),
current_elements_[idx]->input_element_index));
}
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<InputOffsetWithCycleIdx>> RestoreInputOffsets(
IteratorStateReader& reader) {
std::vector<InputOffsetWithCycleIdx> input_offsets;
int64_t cycle_length = dataset()->cycle_length_;
for (int cycle_idx = 0; cycle_idx < cycle_length; cycle_idx++) {
int64_t current_element_uninitialized;
TF_RETURN_IF_ERROR(reader.ReadScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", cycle_idx, "]"),
¤t_element_uninitialized));
if (!current_element_uninitialized) {
int64_t input_element_index;
TF_RETURN_IF_ERROR(reader.ReadScalar(
prefix(),
strings::StrCat(kInputElementIndices, "[", cycle_idx, "]"),
&input_element_index));
input_offsets.push_back(
InputOffsetWithCycleIdx{input_element_index, cycle_idx});
}
}
return std::move(input_offsets);
}
Status RestoreArgsListAndInputOffsetCycleIdxMap(
IteratorContext& ctx, std::vector<InputOffset>& input_element_indices,
std::vector<std::optional<MemoryCheckpoint>>& checkpoints,
std::vector<std::vector<Tensor>>& args,
std::vector<InputOffsetWithCycleIdx>& input_offset_w_cycle_idxs)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (input_element_indices.size() != dataset()->cycle_length_ ||
checkpoints.size() != dataset()->cycle_length_ ||
args.size() != dataset()->cycle_length_) {
return absl::FailedPreconditionError(
"input_element_indices, checkpoints and args should be of same "
"length");
}
std::sort(input_offset_w_cycle_idxs.begin(),
input_offset_w_cycle_idxs.end(),
[](const InputOffsetWithCycleIdx& lhs,
const InputOffsetWithCycleIdx& rhs) {
return lhs.input_element_index < rhs.input_element_index;
});
bool end_of_sequence = false;
int num_to_skip;
int num_actually_skip;
InputOffset prev_input_element_index =
last_checkpointed_input_element_index_;
auto input_ctx = std::make_unique<IteratorContext>(ctx);
for (const auto& input_offset_w_cycle_idx : input_offset_w_cycle_idxs) {
InputOffset input_element_index =
input_offset_w_cycle_idx.input_element_index;
CycleIdx cycle_idx = input_offset_w_cycle_idx.cycle_idx;
if (input_element_index >= next_input_element_index_) {
return absl::FailedPreconditionError(
"input_element_index < next_input_element_index_ must be "
"met.");
}
num_to_skip = input_element_index - prev_input_element_index - 1;
TF_RETURN_IF_ERROR(input_impl_->Skip(input_ctx.get(), num_to_skip,
&end_of_sequence,
&num_actually_skip));
if (end_of_sequence || num_actually_skip != num_to_skip) {
return absl::InternalError(
"Unexpected end of sequence while symbolically restoring "
"InterleaveDataset. Please verify that the input produces data "
"deterministically.");
}
std::vector<Tensor> current_element_args;
TF_RETURN_IF_ERROR(input_impl_->GetNext(
input_ctx.get(), ¤t_element_args, &end_of_sequence));
prev_input_element_index = input_element_index;
checkpoints[cycle_idx].emplace(*input_ctx->checkpoint());
args[cycle_idx] = std::move(current_element_args);
input_element_indices[cycle_idx] = input_element_index;
}
num_to_skip = next_input_element_index_ - prev_input_element_index - 1;
TF_RETURN_IF_ERROR(input_impl_->Skip(
input_ctx.get(), num_to_skip, &end_of_sequence, &num_actually_skip));
if (end_of_sequence || num_actually_skip != num_to_skip) {
return absl::InternalError(
"Unexpected end of sequence while symbolically restoring "
"InterleaveDataset. Please verify that the input produces data "
"deterministically.");
}
input_ckpt_->Merge(input_ctx->checkpoint());
return absl::OkStatus();
}
Status RestoreCurrentElements(
IteratorContext* ctx, IteratorStateReader* reader,
std::vector<InputOffset>& input_element_indices,
std::vector<std::optional<MemoryCheckpoint>>&& checkpoints,
std::vector<std::vector<Tensor>>&& args)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (int idx = 0; idx < current_elements_.size(); idx++) {
int64_t current_element_uninitialized;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", idx, "]"),
¤t_element_uninitialized));
if (!current_element_uninitialized) {
if (!ctx->symbolic_checkpoint()) {
int64_t args_size;
std::vector<Tensor> current_element_args;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kArgsSize, "[", idx, "]"),
&args_size));
current_element_args.resize(args_size);
for (int i = 0; i < args_size; i++) {
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix(),
strings::StrCat(kArgsList, "[", idx, "][", i, "]"),
¤t_element_args[i]));
}
args[idx] = std::move(current_element_args);
}
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, args[idx],
GetSubIteratorIndexForPrefix(ctx->symbolic_checkpoint(), idx,
input_element_indices[idx]),
*instantiated_captured_func_, prefix(), &iterator,
nullptr));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, iterator));
current_elements_[idx].emplace(
std::move(checkpoints[idx]), std::move(args[idx]),
input_element_indices[idx], std::move(iterator));
} else {
current_elements_[idx].reset();
}
}
return absl::OkStatus();
}
Status MoveToNextElement(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!end_of_input_) {
IteratorContext input_ctx = MakeNestedIteratorContext(ctx);
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(
input_impl_->GetNext(&input_ctx, &args, &end_of_input_));
input_ckpt_->Merge(input_ctx.checkpoint());
if (!end_of_input_) {
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, args,
GetSubIteratorIndexForPrefix(ctx->symbolic_checkpoint()),
*instantiated_captured_func_, prefix(), &iterator, model_node()));
++num_open_;
std::optional<MemoryCheckpoint> checkpoint;
if (ctx->symbolic_checkpoint()) {
checkpoint.emplace(*input_ckpt_);
}
current_elements_[cycle_index_].emplace(
std::move(checkpoint), std::move(args), next_input_element_index_,
std::move(iterator));
next_input_element_index_++;
}
} else {
AdvanceToNextInCycle();
}
return absl::OkStatus();
}
InputOffset IsEarliestInputElementIndex(InputOffset input_element_index) {
InputOffset min_input_element_index = input_element_index;
for (int i = 0; i < current_elements_.size(); ++i) {
if (!current_elements_[i]) continue;
if (current_elements_[i]->input_element_index <
min_input_element_index) {
min_input_element_index = current_elements_[i]->input_element_index;
}
}
return (min_input_element_index == input_element_index);
}
void UpdateSymbolicCheckpointAfterCurrentElementFinished(
IteratorContext& ctx, CurrentElement& current_element)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!ctx.symbolic_checkpoint()) {
return;
}
InputOffset input_element_index = current_element.input_element_index;
if (IsEarliestInputElementIndex(input_element_index)) {
MemoryCheckpoint& checkpoint =
const_cast<MemoryCheckpoint&>(current_element.checkpoint.value());
ctx.MergeCheckpoint(&checkpoint);
last_checkpointed_input_element_index_ = input_element_index;
}
}
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
struct CurrentElement {
const std::optional<MemoryCheckpoint> checkpoint = std::nullopt;
const InputOffset input_element_index = -1;
const std::vector<Tensor> args;
std::unique_ptr<IteratorBase> iterator = nullptr;
explicit CurrentElement(std::optional<MemoryCheckpoint>&& checkpoint,
std::vector<Tensor>&& args,
InputOffset input_element_index,
std::unique_ptr<IteratorBase> iterator)
: checkpoint(std::move(checkpoint)),
input_element_index(input_element_index),
args(std::move(args)),
iterator(std::move(iterator)) {}
CurrentElement(CurrentElement&& other) = default;
};
struct InputOffsetWithCycleIdx {
InputOffset input_element_index;
CycleIdx cycle_idx;
};
std::vector<std::optional<CurrentElement>> current_elements_; | #include "tensorflow/core/kernels/data/interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "interleave_dataset";
class InterleaveDatasetParams : public DatasetParams {
public:
template <typename T>
InterleaveDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
int64_t cycle_length, int64_t block_length,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
cycle_length_(cycle_length),
block_length_(block_length),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {cycle_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {block_length_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size() + 2);
input_names->emplace_back(InterleaveDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(InterleaveDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(InterleaveDatasetOp::kCycleLength);
input_names->emplace_back(InterleaveDatasetOp::kBlockLength);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override {
return InterleaveDatasetOp::kDatasetType;
}
private:
std::vector<Tensor> other_arguments_;
int64_t cycle_length_;
int64_t block_length_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class InterleaveDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MakeTensorSliceDatasetFunc(
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
return FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{"Toutput_types", output_types},
{"output_shapes", output_shapes}});
}
InterleaveDatasetParams InterleaveDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
3,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
5,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
3,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams7() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
5,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParamsWithInvalidCycleLength() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
0,
5,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParamsWithInvalidBlockLength() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
-1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<InterleaveDatasetParams>> GetNextTestCases() {
return {
{InterleaveDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams3(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams4(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams5(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams6(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams7(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})}};
}
ITERATOR_GET_NEXT_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<InterleaveDatasetParams>> SkipTestCases() {
return {{InterleaveDatasetParams1(),
0, 0, true,
CreateTensors<int64_t>(TensorShape({1}), {{0}})},
{InterleaveDatasetParams1(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{5}})},
{InterleaveDatasetParams1(),
10, 9},
{InterleaveDatasetParams2(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{5}})},
{InterleaveDatasetParams2(),
10, 9},
{InterleaveDatasetParams3(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{7}})},
{InterleaveDatasetParams3(),
10, 9},
{InterleaveDatasetParams4(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{7}})},
{InterleaveDatasetParams4(),
10, 9},
{InterleaveDatasetParams5(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"e"}})},
{InterleaveDatasetParams5(),
10, 9},
{InterleaveDatasetParams6(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"d"}})},
{InterleaveDatasetParams6(),
10, 9},
{InterleaveDatasetParams7(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"d"}})},
{InterleaveDatasetParams7(),
10, 9}};
}
ITERATOR_SKIP_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
SkipTestCases())
TEST_F(InterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(InterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(InterleaveDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<InterleaveDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{InterleaveDatasetParams1(),
{DT_INT64}},
{InterleaveDatasetParams2(),
{DT_INT64}},
{InterleaveDatasetParams3(),
{DT_INT64}},
{InterleaveDatasetParams4(),
{DT_INT64}},
{InterleaveDatasetParams5(),
{DT_STRING}},
{InterleaveDatasetParams6(),
{DT_STRING}},
{InterleaveDatasetParams7(),
{DT_STRING}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<InterleaveDatasetParams>>
DatasetOutputShapesTestCases() {
return {{InterleaveDatasetParams1(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams2(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams3(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams4(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams5(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams6(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams7(),
{PartialTensorShape({1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<InterleaveDatasetParams>>
CardinalityTestCases() {
return {{InterleaveDatasetParams1(),
kUnknownCardinality},
{InterleaveDatasetParams2(),
kUnknownCardinality},
{InterleaveDatasetParams3(),
kUnknownCardinality},
{InterleaveDatasetParams4(),
kUnknownCardinality},
{InterleaveDatasetParams5(),
kUnknownCardinality},
{InterleaveDatasetParams6(),
kUnknownCardinality},
{InterleaveDatasetParams7(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<InterleaveDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{InterleaveDatasetParams1(),
{DT_INT64}},
{InterleaveDatasetParams2(),
{DT_INT64}},
{InterleaveDatasetParams3(),
{DT_INT64}},
{InterleaveDatasetParams4(),
{DT_INT64}},
{InterleaveDatasetParams5(),
{DT_STRING}},
{InterleaveDatasetParams6(),
{DT_STRING}},
{InterleaveDatasetParams7(),
{DT_STRING}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<InterleaveDatasetParams>>
IteratorOutputShapesTestCases() {
return {{InterleaveDatasetParams1(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams2(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams3(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams4(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams5(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams6(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams7(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(InterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
InterleaveDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<InterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{InterleaveDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams5(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams6(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams7(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(InterleaveDatasetOpTest,
InterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(InterleaveDatasetOpTest, InvalidCycleLength) {
auto dataset_params = InterleaveDatasetParamsWithInvalidCycleLength();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(InterleaveDatasetOpTest, InvalidLength) {
auto dataset_params = InterleaveDatasetParamsWithInvalidBlockLength();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} |
1,548 | cpp | tensorflow/tensorflow | flat_map_dataset_op | tensorflow/core/kernels/data/flat_map_dataset_op.cc | tensorflow/core/kernels/data/flat_map_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_FLAT_MAP_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_FLAT_MAP_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class FlatMapDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "FlatMap";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit FlatMapDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int graph_def_version_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
};
}
}
#endif
#include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/flat_map_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const FlatMapDatasetOp::kDatasetType;
constexpr const char* const FlatMapDatasetOp::kInputDataset;
constexpr const char* const FlatMapDatasetOp::kOtherArguments;
constexpr const char* const FlatMapDatasetOp::kFunc;
constexpr const char* const FlatMapDatasetOp::kTarguments;
constexpr const char* const FlatMapDatasetOp::kOutputTypes;
constexpr const char* const FlatMapDatasetOp::kOutputShapes;
constexpr int64_t kMaxRandomIndexingCardinality = 100;
constexpr char kCycleLength[] = "cycle_length";
constexpr char kElementIndex[] = "element_index";
constexpr char kInputsSize[] = "inputs_size";
constexpr char kInputs[] = "inputs";
constexpr char kCurrentElementIteratorUninitialized[] =
"current_element_iterator_uninitialized";
constexpr char kExhausted[] = "exhausted";
class FlatMapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
output_types_(output_types),
output_shapes_(output_shapes),
random_access_handler_(ctx, input, *captured_func_) {
input_->Ref();
random_indexing_compatible_ = input_->RandomIndexingCompatible();
if (random_indexing_compatible_.ok() &&
input_->Cardinality() > kMaxRandomIndexingCardinality) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("The cardinality of the input to ", type_string(),
" is too large to support global shuffling. It is ",
input_->Cardinality(), ", which is greater than ",
kMaxRandomIndexingCardinality));
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (options.compute_level() <
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE) {
return kUnknownCardinality;
}
absl::StatusOr<int64_t> cardinality = random_access_handler_.Cardinality();
if (!cardinality.ok()) {
LOG(ERROR) << "Unable to compute cardinality for dataset "
<< DebugString() << " due to error: " << cardinality.status();
return kUnknownCardinality;
}
return *cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {std::make_pair(0, input_graph_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f),
std::make_pair(kTarguments, other_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry());
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper()) {
return Get(ctx, out_tensors, end_of_sequence);
}
mutex_lock l(mu_);
do {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
TF_RETURN_IF_ERROR(current_element_iterator_->GetNext(
&nested_ctx, out_tensors, &end_of_element));
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
*end_of_sequence = false;
return absl::OkStatus();
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, true));
} while (true);
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
mutex_lock l(mu_);
*num_skipped = 0;
while (*num_skipped < num_to_skip) {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
int last_num_skipped;
TF_RETURN_IF_ERROR(current_element_iterator_->Skip(
&nested_ctx, num_to_skip - *num_skipped, &end_of_element,
&last_num_skipped));
*num_skipped += last_num_skipped;
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
if (*num_skipped != num_to_skip) {
return absl::InternalError(absl::StrFormat(
"Expected `num_skipped` and `num_to_skip` to be the same. Got"
" %d(num_skipped) and %d(num_to_skip)",
*num_skipped, num_to_skip));
}
continue;
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
}
*end_of_sequence = false;
return absl::OkStatus();
}
absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(size_t parent_index,
ctx->index_mapper()(element_count_));
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
absl::StatusOr<int64_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(dataset_index.status());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
next_positions_.resize(dataset_iterators_.size(), 0);
input_element_counts_.resize(dataset_iterators_.size(), 0);
}
IteratorContext::Params params(ctx);
params.index_mapper =
GetFlatMapIndexMapper(ctx->index_mapper(), *dataset_index);
IteratorContext global_shuffle_ctx(std::move(params));
TF_RETURN_IF_ERROR(dataset_iterators_[*dataset_index]->GetNext(
&global_shuffle_ctx, out_tensors, end_of_sequence));
ctx->MergeCheckpoint(global_shuffle_ctx.checkpoint());
++element_count_;
++input_element_counts_[*dataset_index];
return absl::OkStatus();
}
IndexMapperFn GetFlatMapIndexMapper(IndexMapperFn parent_index_mapper,
size_t input_dataset_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::StatusOr<int64_t> cardinality =
dataset()->random_access_handler_.Cardinality();
return [this, parent_index_mapper = std::move(parent_index_mapper),
input_dataset_index, cardinality = std::move(cardinality)](
size_t element_position) -> absl::StatusOr<size_t> {
if (!cardinality.ok() || *cardinality < 0) {
return absl::FailedPreconditionError(
"Global shuffling requires finite cardinalities.");
}
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
while (next_positions_[input_dataset_index] < *cardinality) {
size_t index = next_positions_[input_dataset_index];
if (parent_index_mapper != nullptr) {
TF_ASSIGN_OR_RETURN(index, parent_index_mapper(index));
}
++next_positions_[input_dataset_index];
TF_ASSIGN_OR_RETURN(int64_t shuffled_dataset_index,
random_access.GetDatasetIndex(index));
if (input_dataset_index == shuffled_dataset_index) {
if (input_dataset_index > 0) {
TF_ASSIGN_OR_RETURN(
int64_t cumulative_cardinality,
random_access.CumulativeCardinality(input_dataset_index - 1));
index -= cumulative_cardinality;
}
return index;
}
}
return *cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args),
{model::MakeNonTunableParameter(kCycleLength, 1)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override
TF_LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kExhausted, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kElementIndex, element_index_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kCurrentElementIteratorUninitialized,
static_cast<int64_t>(!current_element_iterator_)));
if (current_element_iterator_ && !ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputsSize, inputs_.size()));
for (int i = 0; i < inputs_.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kInputs, "[", i, "]"), inputs_[i]));
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, current_element_iterator_));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override
TF_LOCKS_EXCLUDED(mu_) {
if (ctx->restored_element_count().has_value()) {
return RestoreForGlobalShuffle(ctx, reader);
}
mutex_lock l(mu_);
input_impl_.reset();
element_index_ = 0;
current_element_iterator_.reset();
inputs_.clear();
int64_t input_exhausted;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kExhausted, &input_exhausted));
if (!static_cast<bool>(input_exhausted)) {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kElementIndex, &temp));
element_index_ = temp;
}
int64_t current_element_iterator_uninitialized;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentElementIteratorUninitialized,
¤t_element_iterator_uninitialized));
if (!static_cast<bool>(current_element_iterator_uninitialized)) {
TF_RETURN_IF_ERROR(RestoreCurrentElementIterator(ctx, reader));
}
}
return absl::OkStatus();
}
Status RestoreForGlobalShuffle(IteratorContext* ctx,
IteratorStateReader* reader)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
element_count_ = *ctx->restored_element_count();
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
TF_ASSIGN_OR_RETURN(int64_t cardinality, random_access.Cardinality());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
}
input_element_counts_.resize(dataset_iterators_.size(), 0);
next_positions_.resize(dataset_iterators_.size(), 0);
std::fill(input_element_counts_.begin(), input_element_counts_.end(), 0);
std::fill(next_positions_.begin(), next_positions_.end(), 0);
for (size_t count = 0; count < element_count_ && count < cardinality;
++count) {
TF_ASSIGN_OR_RETURN(size_t parent_index, ctx->index_mapper()(count));
absl::StatusOr<size_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
break;
}
TF_RETURN_IF_ERROR(dataset_index.status());
++input_element_counts_[*dataset_index];
next_positions_[*dataset_index] = count + 1;
}
for (size_t i = 0; i < dataset_iterators_.size(); ++i) {
IteratorContext::Params params(ctx);
params.restored_element_count = input_element_counts_[i];
IteratorContext ctx_copy(std::move(params));
TF_RETURN_IF_ERROR(
RestoreInput(&ctx_copy, reader, dataset_iterators_[i]));
ctx->MergeCheckpoint(ctx_copy.checkpoint());
}
return absl::OkStatus();
}
private:
Status BuildCurrentElementIteratorLocked(IteratorContext* ctx,
bool is_get_next)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<model::Node> node = is_get_next ? model_node() : nullptr;
return MakeIteratorFromInputElement(
ctx, this, inputs_, element_index_++, *instantiated_captured_func_,
prefix(), ¤t_element_iterator_, node);
}
Status RestoreCurrentElementIterator(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (ctx->symbolic_checkpoint()) {
return RestoreCurrentElementIteratorSymbolic(ctx, reader);
}
size_t inputs_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kInputsSize, &temp));
inputs_size = static_cast<size_t>(temp);
}
inputs_.reserve(inputs_size);
for (int i = 0; i < inputs_size; i++) {
inputs_.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix(), strings::StrCat(kInputs, "[", i, "]"),
&inputs_.back()));
}
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
Status RestoreCurrentElementIteratorSymbolic(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
bool end_of_sequence;
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, &end_of_sequence));
if (end_of_sequence) {
return absl::FailedPreconditionError(
"Unexpected end of sequence while symbolically restoring "
"FlatMapDataset. Please verify that the input produces data "
"deterministically.");
}
input_ckpt_->Merge(input_ctx->checkpoint());
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
mutex mu_;
size_t element_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<MemoryCheckpoint> input_ckpt_ TF_GUARDED_BY(mu_);
std::vector<Tensor> inputs_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
size_t element_count_ TF_GUARDED_BY(mu_) = 0;
std::vector<int64_t> input_element_counts_ TF_GUARDED_BY(mu_);
std::vector<size_t> next_positions_;
std::vector<std::unique_ptr<IteratorBase>> dataset_iterators_
TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> current_element_iterator_ TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
mutable FlatMapRandomAccessHandler random_access_handler_;
};
FlatMapDatasetOp::FlatMapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void FlatMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), output_types_,
output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FlatMapDataset").Device(DEVICE_CPU),
FlatMapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("FlatMapDataset");
}
}
} | #include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "flat_map_dataset";
class FlatMapDatasetParams : public DatasetParams {
public:
template <typename T>
FlatMapDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return other_arguments_;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(FlatMapDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(FlatMapDatasetOp::kOtherArguments, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return FlatMapDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class FlatMapDatasetOpTest : public DatasetOpsTestBase {};
FlatMapDatasetParams FlatMapDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{"Toutput_types", DataTypeVector({DT_INT64})},
{"output_shapes",
std::vector<PartialTensorShape>({PartialTensorShape({1})})}});
return FlatMapDatasetParams(
std::move(tensor_slice_dataset_params),
{},
func,
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
FlatMapDatasetParams InvalidFlatMapDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef( "NonZero",
{{"T", DT_INT64}});
return FlatMapDatasetParams(std::move(tensor_slice_dataset_params),
{},
func,
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<FlatMapDatasetParams>> GetNextTestCases() {
return {
{FlatMapDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_GET_NEXT_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<FlatMapDatasetParams>> SkipTestCases() {
return {{FlatMapDatasetParams1(),
2, 2, true,
CreateTensors<int64_t>(TensorShape({1}), {{2}})},
{FlatMapDatasetParams1(),
4, 4, true,
CreateTensors<int64_t>(TensorShape({1}), {{4}})},
{FlatMapDatasetParams1(),
9, 9, false},
{FlatMapDatasetParams1(),
10, 9, false}};
}
ITERATOR_SKIP_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
SkipTestCases())
TEST_F(FlatMapDatasetOpTest, DatasetNodeName) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FlatMapDatasetOpTest, DatasetTypeString) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FlatMapDatasetOp::kDatasetType)));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, Cardinality) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorPrefix) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FlatMapDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<FlatMapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{FlatMapDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(FlatMapDatasetOpTest, InvalidMapFunc) {
auto dataset_params = InvalidFlatMapDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} |
1,549 | cpp | tensorflow/tensorflow | reduce_dataset_op | tensorflow/core/kernels/data/reduce_dataset_op.cc | tensorflow/core/kernels/data/reduce_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_REDUCE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_REDUCE_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/data/iterator_ops.h"
namespace tensorflow {
namespace data {
class ReduceDatasetOp : public HybridAsyncOpKernel {
public:
explicit ReduceDatasetOp(OpKernelConstruction* ctx);
protected:
Status DoCompute(OpKernelContext* ctx) override;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/reduce_dataset_op.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
namespace {
const char kOutputShapes[] = "output_shapes";
const char kOutputTypes[] = "output_types";
}
ReduceDatasetOp::ReduceDatasetOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_reduce_dataset") {
FunctionMetadata::Params params;
OP_REQUIRES_OK(ctx, ctx->GetAttr("use_inter_op_parallelism",
¶ms.use_inter_op_parallelism));
params.use_default_device = false;
OP_REQUIRES_OK(ctx,
FunctionMetadata::Create(ctx, "f", params, &func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
Status ReduceDatasetOp::DoCompute(OpKernelContext* ctx) {
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode("ReduceDatasetOp::DoCompute",
{{"id", ctx->step_id()}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("ReduceDatasetOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
OpInputList inputs;
TF_RETURN_IF_ERROR(ctx->input_list("initial_state", &inputs));
std::vector<Tensor> state(inputs.begin(), inputs.end());
std::unique_ptr<CapturedFunction> captured_func;
TF_RETURN_IF_ERROR(CapturedFunction::Create(
ctx, func_metadata_, "other_arguments", &captured_func));
IteratorContext::Params params(ctx);
auto function_handle_cache =
std::make_unique<FunctionHandleCache>(params.flr);
params.function_handle_cache = function_handle_cache.get();
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func;
TF_RETURN_IF_ERROR(
captured_func->Instantiate(&iter_ctx, &instantiated_captured_func));
std::unique_ptr<IteratorBase> iterator;
if (ctx->function_library()->device()->device_type() == DEVICE_CPU) {
DatasetBase* finalized_dataset = nullptr;
TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset));
core::ScopedUnref unref(finalized_dataset);
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(
&iter_ctx, nullptr, "ReduceIterator", &iterator));
} else {
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iter_ctx, nullptr,
"ReduceIterator", &iterator));
}
while (true) {
if (ctx->cancellation_manager()->IsCancelled()) {
return errors::Cancelled("Operation was cancelled");
}
std::vector<Tensor> next_input_element;
bool end_of_input;
TF_RETURN_IF_ERROR(
iterator->GetNext(&iter_ctx, &next_input_element, &end_of_input));
if (end_of_input) {
break;
}
std::vector<Tensor> args;
args.reserve(state.size() + next_input_element.size());
std::copy(state.begin(), state.end(), std::back_inserter(args));
std::copy(next_input_element.begin(), next_input_element.end(),
std::back_inserter(args));
std::vector<Tensor> reduce_func_output;
TF_RETURN_IF_ERROR(instantiated_captured_func->Run(
&iter_ctx, std::move(args), &reduce_func_output, nullptr));
if (reduce_func_output.size() != state.size()) {
return errors::InvalidArgument(
"The number of components of the initial state and the "
"reduce "
"function output does not match. (initial_state=",
state.size(), ", output=", reduce_func_output.size(), ").");
}
std::swap(reduce_func_output, state);
}
TF_RETURN_IF_ERROR(VerifyTypesMatch(output_types_, state));
TF_RETURN_IF_ERROR(VerifyShapesCompatible(output_shapes_, state));
for (size_t i = 0; i < state.size(); ++i) {
ctx->set_output(i, state[i]);
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ReduceDataset").Device(DEVICE_CPU),
ReduceDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ReduceDataset");
}
}
} | #include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "reduce_dataset";
class ReduceDatasetParams : public DatasetParams {
public:
template <typename T>
ReduceDatasetParams(T input_dataset_params, std::vector<Tensor> initial_state,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_state, DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
bool use_inter_op_parallelism, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
initial_state_(std::move(initial_state)),
other_arguments_(std::move(other_arguments)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_state_(std::move(type_state)),
type_arguments_(std::move(type_arguments)),
use_inter_op_parallelism_(use_inter_op_parallelism) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = initial_state_;
input_tensors.insert(input_tensors.end(), other_arguments_.begin(),
other_arguments_.end());
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back("input_dataset");
for (int i = 0; i < initial_state_.size(); ++i) {
input_names->emplace_back(strings::StrCat("initial_state_", i));
}
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(strings::StrCat("other_arguments_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
*attr_vector = {{"f", func_},
{"Tstate", type_state_},
{"Targuments", type_arguments_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"use_inter_op_parallelism", use_inter_op_parallelism_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return "Reduce"; }
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> initial_state_;
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_state_;
DataTypeVector type_arguments_;
bool use_inter_op_parallelism_;
};
class ReduceDatasetOpTest : public DatasetOpsTestBase {};
ReduceDatasetParams ReduceDatasetParams1() {
return ReduceDatasetParams(
RangeDatasetParams(0, 10, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}}),
{},
FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}}),
{test::function::XAddY()},
{DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
kNodeName);
}
ReduceDatasetParams ReduceDatasetParams2() {
return ReduceDatasetParams(
RangeDatasetParams(1, 10, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}, {1}}),
{},
FunctionDefHelper::FunctionRef("XPlusOneXTimesY", {{"T", DT_INT64}}),
{test::function::XPlusOneXTimesY()},
{DT_INT64, DT_INT64},
{},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
true,
kNodeName);
}
ReduceDatasetParams ReduceDatasetParams3() {
return ReduceDatasetParams(
RangeDatasetParams(0, 0, 1),
CreateTensors<int64_t>(TensorShape({}), {{1}, {3}}),
{},
FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}}),
{test::function::XAddY()},
{DT_INT64, DT_INT64},
{},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
true,
kNodeName);
}
std::vector<GetNextTestCase<ReduceDatasetParams>> GetNextTestCases() {
return {{
ReduceDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{46}})},
{ReduceDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}),
{{10}, {1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9}})},
{
ReduceDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{1}, {3}})}};
}
class ParameterizedReduceDatasetOpTest
: public ReduceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<ReduceDatasetParams>> {};
TEST_P(ParameterizedReduceDatasetOpTest, Compute) {
auto test_case = GetParam();
TF_ASSERT_OK(InitializeRuntime(test_case.dataset_params));
std::vector<Tensor> output;
TF_ASSERT_OK(RunDatasetOp(test_case.dataset_params, &output));
TF_EXPECT_OK(
ExpectEqual(test_case.expected_outputs, output, true));
}
INSTANTIATE_TEST_SUITE_P(ReduceDatasetOpTest, ParameterizedReduceDatasetOpTest,
::testing::ValuesIn(GetNextTestCases()));
}
}
} |
1,550 | cpp | tensorflow/tensorflow | fixed_length_record_dataset_op | tensorflow/core/kernels/data/fixed_length_record_dataset_op.cc | tensorflow/core/kernels/data/fixed_length_record_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_FIXED_LENGTH_RECORD_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_FIXED_LENGTH_RECORD_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class FixedLengthRecordDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "FixedLengthRecord";
static constexpr const char* const kFileNames = "filenames";
static constexpr const char* const kHeaderBytes = "header_bytes";
static constexpr const char* const kRecordBytes = "record_bytes";
static constexpr const char* const kFooterBytes = "footer_bytes";
static constexpr const char* const kBufferSize = "buffer_size";
static constexpr const char* const kCompressionType = "compression_type";
explicit FixedLengthRecordDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
const int op_version_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/fixed_length_record_dataset_op.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
namespace tensorflow {
namespace data {
constexpr const char* const
FixedLengthRecordDatasetOp::kDatasetType;
constexpr const char* const FixedLengthRecordDatasetOp::kFileNames;
constexpr const char* const
FixedLengthRecordDatasetOp::kHeaderBytes;
constexpr const char* const
FixedLengthRecordDatasetOp::kRecordBytes;
constexpr const char* const
FixedLengthRecordDatasetOp::kFooterBytes;
constexpr const char* const
FixedLengthRecordDatasetOp::kBufferSize;
constexpr const char* const
FixedLengthRecordDatasetOp::kCompressionType;
constexpr char kFixedLengthRecordDataset[] = "FixedLengthRecordDataset";
constexpr char kCurrentFileIndex[] = "current_file_index";
constexpr char kCurrentPos[] = "current_pos";
constexpr char kZLIB[] = "ZLIB";
constexpr char kGZIP[] = "GZIP";
class FixedLengthRecordDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, std::vector<string> filenames,
int64_t header_bytes, int64_t record_bytes,
int64_t footer_bytes, int64_t buffer_size,
const string& compression_type, int op_version)
: DatasetBase(DatasetContext(ctx)),
filenames_(std::move(filenames)),
header_bytes_(header_bytes),
record_bytes_(record_bytes),
footer_bytes_(footer_bytes),
buffer_size_(buffer_size),
compression_type_(compression_type),
op_version_(op_version) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
if (compression_type_.empty()) {
return std::make_unique<UncompressedIterator>(
UncompressedIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
} else {
return std::make_unique<CompressedIterator>(CompressedIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(kDatasetType, params);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* filenames = nullptr;
Node* header_bytes = nullptr;
Node* record_bytes = nullptr;
Node* footer_bytes = nullptr;
Node* buffer_size = nullptr;
Node* compression_type = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames));
TF_RETURN_IF_ERROR(b->AddScalar(header_bytes_, &header_bytes));
TF_RETURN_IF_ERROR(b->AddScalar(record_bytes_, &record_bytes));
TF_RETURN_IF_ERROR(b->AddScalar(footer_bytes_, &footer_bytes));
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size));
TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type));
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{filenames, header_bytes, record_bytes, footer_bytes,
buffer_size, compression_type},
output));
return absl::OkStatus();
}
private:
class UncompressedIterator : public DatasetIterator<Dataset> {
public:
explicit UncompressedIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (input_buffer_) {
const int64_t current_pos = input_buffer_->Tell();
DCHECK_GE(file_pos_limit_, 0);
if (current_pos < file_pos_limit_) {
string record;
TF_RETURN_IF_ERROR(
input_buffer_->ReadNBytes(dataset()->record_bytes_, &record));
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(kDatasetType);
bytes_counter->IncrementBy(dataset()->record_bytes_);
Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<tstring>()() = record;
out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false;
return absl::OkStatus();
}
input_buffer_.reset();
file_.reset();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
uint64 file_size;
const std::string& next_filename =
dataset()->filenames_[current_file_index_];
TF_RETURN_IF_ERROR(ctx->env()->GetFileSize(next_filename, &file_size));
file_pos_limit_ = file_size - dataset()->footer_bytes_;
uint64 body_size =
file_size - (dataset()->header_bytes_ + dataset()->footer_bytes_);
if (body_size % dataset()->record_bytes_ != 0) {
return errors::InvalidArgument(
"Excluding the header (", dataset()->header_bytes_,
" bytes) and footer (", dataset()->footer_bytes_,
" bytes), input file \"", next_filename, "\" has body length ",
body_size,
" bytes, which is not an exact multiple of the record length (",
dataset()->record_bytes_, " bytes).");
}
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(next_filename), &file_));
input_buffer_ = std::make_unique<io::InputBuffer>(
file_.get(), dataset()->buffer_size_);
TF_RETURN_IF_ERROR(input_buffer_->SkipNBytes(dataset()->header_bytes_));
} while (true);
}
protected:
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
int64_t current_pos = input_buffer_ ? input_buffer_->Tell() : -1;
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurrentPos, current_pos));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
int64_t current_pos;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentPos, ¤t_pos));
input_buffer_.reset();
file_.reset();
if (current_pos >= 0) {
uint64 file_size;
const std::string& current_filename =
dataset()->filenames_[current_file_index_];
TF_RETURN_IF_ERROR(
ctx->env()->GetFileSize(current_filename, &file_size));
file_pos_limit_ = file_size - dataset()->footer_bytes_;
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(current_filename), &file_));
input_buffer_ = std::make_unique<io::InputBuffer>(
file_.get(), dataset()->buffer_size_);
TF_RETURN_IF_ERROR(input_buffer_->Seek(current_pos));
}
return absl::OkStatus();
}
private:
mutex mu_;
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_
TF_GUARDED_BY(mu_);
std::unique_ptr<io::InputBuffer> input_buffer_ TF_GUARDED_BY(mu_);
int64_t file_pos_limit_ TF_GUARDED_BY(mu_) = -1;
};
class CompressedIterator : public DatasetIterator<Dataset> {
public:
explicit CompressedIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
static monitoring::CounterCell* bytes_counter =
metrics::GetTFDataBytesReadCounter(kDatasetType);
mutex_lock l(mu_);
do {
if (buffered_input_stream_) {
const int64_t current_pos = buffered_input_stream_->Tell();
if (dataset()->compression_type_.empty()) {
DCHECK_GE(file_pos_limit_, 0);
if (current_pos < file_pos_limit_) {
tstring record;
TF_RETURN_IF_ERROR(buffered_input_stream_->ReadNBytes(
dataset()->record_bytes_, &record));
bytes_counter->IncrementBy(dataset()->record_bytes_);
Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<tstring>()() = std::move(record);
out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false;
return absl::OkStatus();
}
} else {
tstring record;
Status s = buffered_input_stream_->ReadNBytes(
dataset()->record_bytes_, &record);
if (s.ok()) {
bytes_counter->IncrementBy(dataset()->record_bytes_);
lookahead_cache_.append(record);
StringPiece lookahead_cache_view(lookahead_cache_);
record = tstring(
lookahead_cache_view.substr(0, dataset()->record_bytes_));
lookahead_cache_ = tstring(
lookahead_cache_view.substr(dataset()->record_bytes_));
Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<tstring>()() = std::move(record);
out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(s) && !record.empty()) {
uint64 body_size =
current_pos + record.size() -
(dataset()->header_bytes_ + dataset()->footer_bytes_);
return errors::DataLoss(
"Excluding the header (", dataset()->header_bytes_,
" bytes) and footer (", dataset()->footer_bytes_,
" bytes), input file \"",
dataset()->filenames_[current_file_index_],
"\" has body length ", body_size,
" bytes, which is not an exact multiple of the record "
"length (",
dataset()->record_bytes_, " bytes).");
}
}
buffered_input_stream_.reset();
file_.reset();
++current_file_index_;
}
if (current_file_index_ == dataset()->filenames_.size()) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (dataset()->compression_type_.empty()) {
uint64 file_size;
TF_RETURN_IF_ERROR(ctx->env()->GetFileSize(
dataset()->filenames_[current_file_index_], &file_size));
file_pos_limit_ = file_size - dataset()->footer_bytes_;
uint64 body_size =
file_size - (dataset()->header_bytes_ + dataset()->footer_bytes_);
if (body_size % dataset()->record_bytes_ != 0) {
return errors::InvalidArgument(
"Excluding the header (", dataset()->header_bytes_,
" bytes) and footer (", dataset()->footer_bytes_,
" bytes), input file \"",
dataset()->filenames_[current_file_index_],
"\" has body length ", body_size,
" bytes, which is not an exact multiple of the record length "
"(",
dataset()->record_bytes_, " bytes).");
}
}
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
if (!dataset()->compression_type_.empty()) {
const io::ZlibCompressionOptions zlib_options =
dataset()->compression_type_ == kZLIB
? io::ZlibCompressionOptions::DEFAULT()
: io::ZlibCompressionOptions::GZIP();
file_stream_ =
std::make_unique<io::RandomAccessInputStream>(file_.get());
buffered_input_stream_ = std::make_unique<io::ZlibInputStream>(
file_stream_.get(), dataset()->buffer_size_,
dataset()->buffer_size_, zlib_options);
} else {
buffered_input_stream_ = std::make_unique<io::BufferedInputStream>(
file_.get(), dataset()->buffer_size_);
}
TF_RETURN_IF_ERROR(
buffered_input_stream_->SkipNBytes(dataset()->header_bytes_));
lookahead_cache_.clear();
if (!dataset()->compression_type_.empty()) {
TF_RETURN_IF_ERROR(buffered_input_stream_->ReadNBytes(
dataset()->footer_bytes_, &lookahead_cache_));
}
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex,
current_file_index_));
int64_t current_pos =
buffered_input_stream_ ? buffered_input_stream_->Tell() : -1;
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurrentPos, current_pos));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t current_file_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentFileIndex, ¤t_file_index));
current_file_index_ = size_t(current_file_index);
int64_t current_pos;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentPos, ¤t_pos));
buffered_input_stream_.reset();
file_.reset();
if (current_pos >= 0) {
TF_RETURN_IF_ERROR(ctx->env()->NewRandomAccessFile(
TranslateFileName(dataset()->filenames_[current_file_index_]),
&file_));
const io::ZlibCompressionOptions zlib_options =
dataset()->compression_type_ == kZLIB
? io::ZlibCompressionOptions::DEFAULT()
: io::ZlibCompressionOptions::GZIP();
file_stream_ =
std::make_unique<io::RandomAccessInputStream>(file_.get());
buffered_input_stream_ = std::make_unique<io::ZlibInputStream>(
file_stream_.get(), dataset()->buffer_size_,
dataset()->buffer_size_, zlib_options);
lookahead_cache_.clear();
TF_RETURN_IF_ERROR(buffered_input_stream_->SkipNBytes(
current_pos - dataset()->footer_bytes_));
TF_RETURN_IF_ERROR(buffered_input_stream_->ReadNBytes(
dataset()->footer_bytes_, &lookahead_cache_));
}
return absl::OkStatus();
}
private:
mutex mu_;
size_t current_file_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_
TF_GUARDED_BY(mu_);
std::unique_ptr<io::RandomAccessInputStream>
file_stream_;
std::unique_ptr<io::InputStreamInterface> buffered_input_stream_
TF_GUARDED_BY(mu_);
int64_t file_pos_limit_ TF_GUARDED_BY(mu_) = -1;
tstring lookahead_cache_ TF_GUARDED_BY(mu_);
};
const std::vector<string> filenames_;
const int64_t header_bytes_;
const int64_t record_bytes_;
const int64_t footer_bytes_;
const int64_t buffer_size_;
const tstring compression_type_;
const int op_version_;
};
FixedLengthRecordDatasetOp::FixedLengthRecordDatasetOp(
OpKernelConstruction* ctx)
: DatasetOpKernel(ctx),
op_version_(ctx->def().op() == kFixedLengthRecordDataset ? 1 : 2) {}
void FixedLengthRecordDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
const Tensor* filenames_tensor;
OP_REQUIRES_OK(ctx, ctx->input(kFileNames, &filenames_tensor));
OP_REQUIRES(
ctx, filenames_tensor->dims() <= 1,
errors::InvalidArgument("`filenames` must be a scalar or a vector."));
std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<tstring>()(i));
metrics::RecordTFDataFilename(kDatasetType, filenames[i]);
}
LogFilenames(filenames);
int64_t header_bytes = -1;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64_t>(ctx, kHeaderBytes, &header_bytes));
OP_REQUIRES(ctx, header_bytes >= 0,
errors::InvalidArgument("`header_bytes` must be >= 0"));
int64_t record_bytes = -1;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64_t>(ctx, kRecordBytes, &record_bytes));
OP_REQUIRES(ctx, record_bytes > 0,
errors::InvalidArgument("`record_bytes` must be > 0"));
int64_t footer_bytes = -1;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64_t>(ctx, kFooterBytes, &footer_bytes));
OP_REQUIRES(ctx, footer_bytes >= 0,
errors::InvalidArgument("`footer_bytes` must be >= 0"));
int64_t buffer_size = -1;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(ctx, buffer_size >= 0,
errors::InvalidArgument("`buffer_size` must be >= 0"));
if (buffer_size == 0) {
buffer_size = 256 << 10;
}
tstring compression_type;
if (op_version_ > 1) {
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kCompressionType,
&compression_type));
OP_REQUIRES(ctx,
compression_type.empty() || compression_type == kZLIB ||
compression_type == kGZIP,
errors::InvalidArgument("Unsupported compression_type."));
}
*output =
new Dataset(ctx, std::move(filenames), header_bytes, record_bytes,
footer_bytes, buffer_size, compression_type, op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FixedLengthRecordDataset").Device(DEVICE_CPU),
FixedLengthRecordDatasetOp);
REGISTER_KERNEL_BUILDER(Name("FixedLengthRecordDatasetV2").Device(DEVICE_CPU),
FixedLengthRecordDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/fixed_length_record_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "fixed_length_record_dataset";
constexpr int kOpVersion = 2;
tstring LocalTempFilename() {
std::string path;
CHECK(Env::Default()->LocalTempFilename(&path));
return tstring(path);
}
class FixedLengthRecordDatasetParams : public DatasetParams {
public:
FixedLengthRecordDatasetParams(const std::vector<tstring>& filenames,
int64_t header_bytes, int64_t record_bytes,
int64_t footer_bytes, int64_t buffer_size,
CompressionType compression_type,
string node_name)
: DatasetParams({DT_STRING}, {PartialTensorShape({})},
std::move(node_name)),
filenames_(filenames),
header_bytes_(header_bytes),
record_bytes_(record_bytes),
footer_bytes_(footer_bytes),
buffer_size_(buffer_size),
compression_type_(compression_type) {
op_version_ = 2;
}
std::vector<Tensor> GetInputTensors() const override {
int num_files = filenames_.size();
return {
CreateTensor<tstring>(TensorShape({num_files}), filenames_),
CreateTensor<int64_t>(TensorShape({}), {header_bytes_}),
CreateTensor<int64_t>(TensorShape({}), {record_bytes_}),
CreateTensor<int64_t>(TensorShape({}), {footer_bytes_}),
CreateTensor<int64_t>(TensorShape({}), {buffer_size_}),
CreateTensor<tstring>(TensorShape({}), {ToString(compression_type_)})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
*input_names = {FixedLengthRecordDatasetOp::kFileNames,
FixedLengthRecordDatasetOp::kHeaderBytes,
FixedLengthRecordDatasetOp::kRecordBytes,
FixedLengthRecordDatasetOp::kFooterBytes,
FixedLengthRecordDatasetOp::kBufferSize,
FixedLengthRecordDatasetOp::kCompressionType};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return FixedLengthRecordDatasetOp::kDatasetType;
}
private:
std::vector<tstring> filenames_;
int64_t header_bytes_;
int64_t record_bytes_;
int64_t footer_bytes_;
int64_t buffer_size_;
CompressionType compression_type_;
};
class FixedLengthRecordDatasetOpTest : public DatasetOpsTestBase {};
Status CreateTestFiles(const std::vector<tstring>& filenames,
const std::vector<string>& contents,
CompressionType compression_type) {
if (filenames.size() != contents.size()) {
return tensorflow::errors::InvalidArgument(
"The number of files does not match with the contents");
}
if (compression_type == CompressionType::UNCOMPRESSED) {
for (int i = 0; i < filenames.size(); ++i) {
TF_RETURN_IF_ERROR(WriteDataToFile(filenames[i], contents[i].data()));
}
} else {
CompressionParams params;
params.output_buffer_size = 10;
params.compression_type = compression_type;
for (int i = 0; i < filenames.size(); ++i) {
TF_RETURN_IF_ERROR(
WriteDataToFile(filenames[i], contents[i].data(), params));
}
}
return absl::OkStatus();
}
FixedLengthRecordDatasetParams FixedLengthRecordDatasetParams1() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<string> contents = {
absl::StrCat("HHHHH", "111", "222", "333", "FF"),
absl::StrCat("HHHHH", "aaa", "bbb", "FF")};
CompressionType compression_type = CompressionType::ZLIB;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return FixedLengthRecordDatasetParams(filenames,
5,
3,
2,
10,
compression_type,
kNodeName);
}
FixedLengthRecordDatasetParams FixedLengthRecordDatasetParams2() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<string> contents = {
absl::StrCat("HHHHH", "111", "222", "333", "FF"),
absl::StrCat("HHHHH", "aaa", "bbb", "FF")};
CompressionType compression_type = CompressionType::GZIP;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return FixedLengthRecordDatasetParams(filenames,
5,
3,
2,
10,
compression_type,
kNodeName);
}
FixedLengthRecordDatasetParams FixedLengthRecordDatasetParams3() {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
std::vector<string> contents = {
absl::StrCat("HHHHH", "111", "222", "333", "FF"),
absl::StrCat("HHHHH", "aaa", "bbb", "FF")};
CompressionType compression_type = CompressionType::UNCOMPRESSED;
if (!CreateTestFiles(filenames, contents, compression_type).ok()) {
VLOG(WARNING) << "Failed to create the test files: "
<< absl::StrJoin(filenames, ", ");
}
return FixedLengthRecordDatasetParams(filenames,
5,
3,
2,
10,
compression_type,
kNodeName);
}
std::vector<GetNextTestCase<FixedLengthRecordDatasetParams>>
GetNextTestCases() {
return {
{FixedLengthRecordDatasetParams1(),
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams2(),
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams3(),
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})}};
}
ITERATOR_GET_NEXT_TEST_P(FixedLengthRecordDatasetOpTest,
FixedLengthRecordDatasetParams, GetNextTestCases())
TEST_F(FixedLengthRecordDatasetOpTest, DatasetNodeName) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FixedLengthRecordDatasetOpTest, DatasetTypeString) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = kOpVersion;
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FixedLengthRecordDatasetOp::kDatasetType, params)));
}
TEST_F(FixedLengthRecordDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_STRING}));
}
TEST_F(FixedLengthRecordDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(FixedLengthRecordDatasetOpTest, Cardinality) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(FixedLengthRecordDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_STRING}));
}
TEST_F(FixedLengthRecordDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(FixedLengthRecordDatasetOpTest, IteratorPrefix) {
auto dataset_params = FixedLengthRecordDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams iterator_prefix_params;
iterator_prefix_params.op_version = kOpVersion;
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FixedLengthRecordDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), iterator_prefix_params)));
}
std::vector<IteratorSaveAndRestoreTestCase<FixedLengthRecordDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{FixedLengthRecordDatasetParams1(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams2(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})},
{FixedLengthRecordDatasetParams3(),
{0, 2, 6},
CreateTensors<tstring>(TensorShape({}),
{{"111"}, {"222"}, {"333"}, {"aaa"}, {"bbb"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FixedLengthRecordDatasetOpTest,
FixedLengthRecordDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,551 | cpp | tensorflow/tensorflow | parallel_batch_dataset_op | tensorflow/core/kernels/data/parallel_batch_dataset_op.cc | tensorflow/core/kernels/data/parallel_batch_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_BATCH_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_BATCH_DATASET_OP_H_
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ParallelBatchDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "ParallelBatch";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kBatchSize = "batch_size";
static constexpr const char* const kNumParallelCalls = "num_parallel_calls";
static constexpr const char* const kDropRemainder = "drop_remainder";
static constexpr const char* const kParallelCopy = "parallel_copy";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kDeterministic = "deterministic";
explicit ParallelBatchDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
DeterminismPolicy deterministic_;
bool parallel_copy_ = false;
};
}
}
#endif
#include "tensorflow/core/kernels/data/parallel_batch_dataset_op.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelBatchDatasetOp::kDatasetType;
constexpr const char* const ParallelBatchDatasetOp::kInputDataset;
constexpr const char* const ParallelBatchDatasetOp::kBatchSize;
constexpr const char* const
ParallelBatchDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelBatchDatasetOp::kDropRemainder;
constexpr const char* const ParallelBatchDatasetOp::kParallelCopy;
constexpr const char* const ParallelBatchDatasetOp::kOutputTypes;
constexpr const char* const ParallelBatchDatasetOp::kOutputShapes;
constexpr const char* const ParallelBatchDatasetOp::kDeterministic;
namespace {
constexpr char kBatchResultsSize[] = "batch_results_size";
constexpr char kTFDataParallelBatch[] = "tf_data_parallel_batch";
constexpr char kBatchResults[] = "batch_results";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kNumElements[] = "num_elements";
constexpr char kCallFinished[] = "call_finished";
constexpr char kOutputAllocated[] = "output_allocated";
constexpr char kStatus[] = "status";
}
class ParallelBatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t batch_size, int64_t num_parallel_calls,
bool drop_remainder, bool parallel_copy, const DatasetBase* input,
DeterminismPolicy deterministic)
: DatasetBase(DatasetContext(ctx)),
batch_size_(batch_size),
reserve_size_(drop_remainder ? batch_size
: std::min<int64_t>(batch_size, 1 << 16)),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
input_(input),
deterministic_(deterministic),
traceme_metadata_(
{{"autotune",
num_parallel_calls == model::kAutotune ? "true" : "false"},
{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"},
{"parallel_copy", parallel_copy ? "true" : "false"}}) {
input_->Ref();
const auto& input_shapes = input_->output_shapes();
output_shapes_.reserve(input_shapes.size());
for (const auto& input_shape : input_shapes) {
if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) {
output_shapes_.emplace_back(
PartialTensorShape({batch_size_}).Concatenate(input_shape));
} else {
output_shapes_.emplace_back(
PartialTensorShape({-1}).Concatenate(input_shape));
}
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(batch_size_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size));
Node* num_parallel_calls = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(num_parallel_calls_, &num_parallel_calls));
Node* drop_remainder = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder));
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue parallel_copy_attr;
b->BuildAttrValue(parallel_copy_, ¶llel_copy_attr);
attrs.emplace_back(kParallelCopy, parallel_copy_attr);
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, batch_size, num_parallel_calls, drop_remainder},
attrs, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()) {}
~Iterator() override {
CancelThreads(true);
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
if (dataset()->parallel_copy_) {
num_parallel_calls_->value = 1;
} else {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
IteratorContext iter_ctx(std::move(params));
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&iter_ctx, this, prefix(), &input_impl_));
ctx->MergeCheckpoint(iter_ctx.checkpoint());
if (ctx->warm_start() && !ctx->is_restoring()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<BatchResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(&result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelBatchConsume",
{{"element_id", result->uid}});
});
mutex_lock l(result->mu);
auto cleanup =
gtl::MakeCleanup([result]() TF_EXCLUSIVE_LOCKS_REQUIRED(
&BatchResult::mu) { result->output.clear(); });
if (result->output_allocated) {
RecordBufferDequeue(ctx, result->output);
}
ctx->MergeCheckpoint(&result->checkpoint);
TF_RETURN_IF_ERROR(
ProcessBatch(dataset()->batch_size_, result->num_elements,
dataset()->drop_remainder_, result->status, ctx,
out_tensors, end_of_sequence, &result->output));
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncKnownRatioNode(
std::move(args),
dataset()->batch_size_, 1.0,
{model::MakeParameter("parallelism", num_parallel_calls_, 1,
ctx->runner_threadpool_size())});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
if (ctx->symbolic_checkpoint()) {
return writer->WriteScalar(prefix(), kBatchResultsSize, 0);
}
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
DCHECK_EQ(num_calls_, 0);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kBatchResultsSize,
batch_results_.size()));
for (size_t i = 0; i < batch_results_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteBatchResult(writer, i));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
DCHECK(!runner_thread_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t batch_results_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBatchResultsSize, &batch_results_size));
DCHECK(batch_results_.empty());
for (int i = 0; i < batch_results_size; ++i) {
TF_RETURN_IF_ERROR(ReadBatchResult(ctx, reader, i));
}
if (ctx->warm_start()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
auto result = dataset()->traceme_metadata_;
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
struct BatchResult {
explicit BatchResult(IteratorContext* ctx)
: end_of_input(false),
num_elements(0),
status(absl::OkStatus()),
call_finished(false),
output_allocated(false),
uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
mutex mu;
bool end_of_input TF_GUARDED_BY(mu);
int64_t num_elements TF_GUARDED_BY(mu);
std::vector<Tensor> output TF_GUARDED_BY(mu);
Status status TF_GUARDED_BY(mu);
bool call_finished TF_GUARDED_BY(&Iterator::mu_);
bool output_allocated TF_GUARDED_BY(mu);
const int64_t uid = -1;
MemoryCheckpoint checkpoint;
};
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<BatchResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->call_finished = true;
cond_var_->notify_all();
}
void CallBatching(std::shared_ptr<IteratorContext> ctx,
const std::shared_ptr<BatchResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelBatchProduce",
{{"element_id", result->uid}});
});
if (!input_impl_) {
CallCompleted(ctx, result);
return;
}
std::vector<std::vector<Tensor>> batch_elements;
batch_elements.reserve(dataset()->reserve_size_);
bool end_of_input = false;
for (int i = 0; i < dataset()->batch_size_ && !end_of_input; ++i) {
std::vector<Tensor> batch_element_tuple;
Status status = input_impl_->GetNext(ctx.get(), &batch_element_tuple,
&end_of_input);
{
mutex_lock l(result->mu);
result->end_of_input = result->end_of_input || end_of_input;
result->status.Update(status);
result->checkpoint.Merge(ctx->checkpoint());
if (result->end_of_input || !result->status.ok()) break;
}
if (!end_of_input) {
batch_elements.emplace_back(std::move(batch_element_tuple));
mutex_lock l(result->mu);
result->num_elements++;
} else {
input_impl_.reset();
}
}
if (batch_elements.empty()) {
CallCompleted(ctx, result);
return;
}
auto copy_elements_fn = [this, ctx, result,
batch_elements =
std::move(batch_elements)]() mutable {
Status status;
{
mutex_lock l(result->mu);
status = CopyBatch(AnyContext(ctx.get()), std::move(batch_elements),
dataset()->parallel_copy_, &result->output);
result->status.Update(status);
if (result->status.ok()) {
result->output_allocated = true;
RecordBufferEnqueue(ctx.get(), result->output);
} else {
result->output.clear();
result->output_allocated = false;
}
}
CallCompleted(ctx, result);
return status;
};
(*ctx->runner())(std::move(copy_elements_fn));
}
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto new_ctx = std::make_shared<IteratorContext>(*ctx);
runner_thread_ =
ctx->StartThread(kTFDataParallelBatch,
std::bind(&Iterator::RunnerThread, this, new_ctx));
}
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
std::vector<std::shared_ptr<BatchResult>> new_calls;
RecordStart(ctx.get());
auto stop_cleanup =
gtl::MakeCleanup([this, &ctx]() { RecordStop(ctx.get()); });
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
batch_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
batch_results_.push_back(std::make_shared<BatchResult>(ctx.get()));
new_calls.emplace_back(batch_results_.back());
num_calls_++;
}
}
for (const auto& call : new_calls) {
CallBatching(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(std::shared_ptr<BatchResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
if (!deterministic_) {
bool find_batch;
for (auto it = batch_results_.begin(); it != batch_results_.end();
++it) {
if (!(*it)->call_finished) continue;
find_batch = (it == batch_results_.begin());
if (!find_batch) {
tf_shared_lock l((*it)->mu);
find_batch = !(*it)->end_of_input;
}
if (find_batch) {
std::swap(*result, *it);
batch_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else if (!batch_results_.empty() &&
batch_results_.front()->call_finished) {
std::swap(*result, batch_results_.front());
batch_results_.pop_front();
cond_var_->notify_all();
return false;
}
return true;
}
Status ReadBatchResult(IteratorContext* ctx, IteratorStateReader* reader,
size_t index) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
batch_results_.push_back(std::make_shared<BatchResult>(ctx));
std::shared_ptr<BatchResult> result = batch_results_.back();
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
result->end_of_input = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
&result->num_elements));
result->call_finished = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kCallFinished));
result->output_allocated = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated));
TF_RETURN_IF_ERROR(ReadBatch(ctx, reader, dataset()->batch_size_,
prefix(), batch_prefix, &result->output));
TF_RETURN_IF_ERROR(ReadStatus(prefix(),
strings::StrCat(batch_prefix, "_", kStatus),
reader, &result->status));
if (result->output_allocated) {
RecordBufferEnqueue(ctx, result->output);
}
return absl::OkStatus();
}
Status WriteBatchResult(IteratorStateWriter* writer, size_t index)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
std::shared_ptr<BatchResult> result = batch_results_[index];
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
if (result->end_of_input) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput), ""));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
result->num_elements));
if (result->call_finished) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kCallFinished), ""));
}
if (result->output_allocated) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated),
""));
}
TF_RETURN_IF_ERROR(WriteBatch(dataset()->batch_size_,
result->num_elements, prefix(),
batch_prefix, writer, &result->output));
TF_RETURN_IF_ERROR(
WriteStatus(prefix(), strings::StrCat(batch_prefix, "_", kStatus),
result->status, writer));
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
const bool deterministic_;
std::unique_ptr<CancellationManager> cancellation_manager_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<BatchResult>> batch_results_ TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
};
const int64_t batch_size_;
const int64_t reserve_size_;
const int64_t num_parallel_calls_;
const bool drop_remainder_;
const bool parallel_copy_;
const DatasetBase* const input_;
std::vector<PartialTensorShape> output_shapes_;
const DeterminismPolicy deterministic_;
const TraceMeMetadata traceme_metadata_;
};
ParallelBatchDatasetOp::ParallelBatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kDeterministic)) {
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(
ctx, DeterminismPolicy::FromString(deterministic, &deterministic_));
}
if (ctx->HasAttr(kParallelCopy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, ¶llel_copy_));
}
}
void ParallelBatchDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t batch_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("Batch size must be greater than zero."));
int64_t num_parallel_calls = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kNumParallelCalls,
&num_parallel_calls));
bool drop_remainder = false;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
*output = new Dataset(ctx, batch_size, num_parallel_calls, drop_remainder,
parallel_copy_, input, deterministic_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ParallelBatchDataset").Device(DEVICE_CPU),
ParallelBatchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/parallel_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_batch_dataset";
constexpr int kOpVersion = 1;
class ParallelBatchDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelBatchDatasetParams(T input_dataset_params, int64_t batch_size,
int64_t num_parallel_calls, bool drop_remainder,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
const bool parallel_copy,
const std::string& deterministic, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
batch_size_(batch_size),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
deterministic_(deterministic) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
Tensor batch_size = CreateTensor<int64_t>(TensorShape({}), {batch_size_});
Tensor num_parallel_calls =
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_});
Tensor drop_remainder =
CreateTensor<bool>(TensorShape({}), {drop_remainder_});
return {batch_size, num_parallel_calls, drop_remainder};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {ParallelBatchDatasetOp::kInputDataset,
ParallelBatchDatasetOp::kBatchSize,
ParallelBatchDatasetOp::kNumParallelCalls,
ParallelBatchDatasetOp::kDropRemainder};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{"parallel_copy", parallel_copy_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"deterministic", deterministic_},
{"metadata", ""},
};
return absl::OkStatus();
};
string dataset_type() const override {
return ParallelBatchDatasetOp::kDatasetType;
}
private:
int64_t batch_size_;
int64_t num_parallel_calls_;
bool drop_remainder_;
bool parallel_copy_;
std::string deterministic_;
};
class ParallelBatchDatasetOpTest : public DatasetOpsTestBase {};
ParallelBatchDatasetParams ParallelBatchDatasetParams1() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
1,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams2() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
1,
true,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams3() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
3,
1,
false,
{DT_INT64},
{PartialTensorShape({-1})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams4() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
3,
1,
true,
{DT_INT64},
{PartialTensorShape({3})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams5() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
12,
1,
true,
{DT_INT64},
{PartialTensorShape({12})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams6() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
12,
1,
false,
{DT_INT64},
{PartialTensorShape({-1})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams7() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 0, 1),
4,
1,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams8() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
2,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams9() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
4,
false,
{DT_INT64},
{PartialTensorShape({4})},
false,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams ParallelBatchDatasetParams10() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 12, 1),
4,
1,
false,
{DT_INT64},
{PartialTensorShape({4})},
true,
DeterminismPolicy::kDeterministic,
kNodeName);
}
ParallelBatchDatasetParams InvalidBatchSizeParallelBatchDatasetParams() {
return ParallelBatchDatasetParams(
RangeDatasetParams(0, 10, 1),
-1,
1,
false,
{DT_INT64},
{PartialTensorShape({3})},
false,
DeterminismPolicy::kNondeterministic,
kNodeName);
}
std::vector<GetNextTestCase<ParallelBatchDatasetParams>> GetNextTestCases() {
return {
{ParallelBatchDatasetParams1(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams2(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{ParallelBatchDatasetParams4(),
CreateTensors<int64_t>(TensorShape({3}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{ParallelBatchDatasetParams5(),
{}},
{ParallelBatchDatasetParams6(),
CreateTensors<int64_t>(TensorShape({10}),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}})},
{ParallelBatchDatasetParams7(),
{}},
{ParallelBatchDatasetParams8(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams9(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams10(),
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelBatchDatasetOpTest, ParallelBatchDatasetParams,
GetNextTestCases())
TEST_F(ParallelBatchDatasetOpTest, DatasetNodeName) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(parallel_batch_dataset_params.node_name()));
}
TEST_F(ParallelBatchDatasetOpTest, DatasetTypeString) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
name_utils::OpNameParams params;
params.op_version = parallel_batch_dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelBatchDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelBatchDatasetOpTest, DatasetOutputDtypes) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<ParallelBatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ParallelBatchDatasetParams1(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams2(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams3(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams4(),
{PartialTensorShape({3})}},
{ParallelBatchDatasetParams5(),
{PartialTensorShape({12})}},
{ParallelBatchDatasetParams6(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams7(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams8(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams9(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams10(),
{PartialTensorShape({4})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ParallelBatchDatasetParams>>
CardinalityTestCases() {
return {{ParallelBatchDatasetParams1(),
3},
{ParallelBatchDatasetParams2(),
3},
{ParallelBatchDatasetParams3(),
4},
{ParallelBatchDatasetParams4(),
3},
{ParallelBatchDatasetParams5(),
0},
{ParallelBatchDatasetParams6(),
1},
{ParallelBatchDatasetParams7(),
0},
{ParallelBatchDatasetParams8(),
3},
{ParallelBatchDatasetParams9(),
3},
{ParallelBatchDatasetParams10(),
3}};
}
DATASET_CARDINALITY_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams, CardinalityTestCases())
TEST_F(ParallelBatchDatasetOpTest, IteratorOutputDtypes) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<ParallelBatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ParallelBatchDatasetParams1(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams2(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams3(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams4(),
{PartialTensorShape({3})}},
{ParallelBatchDatasetParams5(),
{PartialTensorShape({12})}},
{ParallelBatchDatasetParams6(),
{PartialTensorShape({-1})}},
{ParallelBatchDatasetParams7(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams8(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams9(),
{PartialTensorShape({4})}},
{ParallelBatchDatasetParams10(),
{PartialTensorShape({4})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ParallelBatchDatasetOpTest, IteratorOutputPrefix) {
auto parallel_batch_dataset_params = ParallelBatchDatasetParams1();
TF_ASSERT_OK(Initialize(parallel_batch_dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = parallel_batch_dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ParallelBatchDatasetOp::kDatasetType,
parallel_batch_dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelBatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{ParallelBatchDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams3(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{ParallelBatchDatasetParams4(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8})}},
{ParallelBatchDatasetParams5(),
{0, 1, 5},
{}},
{ParallelBatchDatasetParams6(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({10}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})}},
{ParallelBatchDatasetParams7(),
{0, 1, 5},
{}},
{ParallelBatchDatasetParams8(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams9(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{ParallelBatchDatasetParams10(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({4}),
{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelBatchDatasetOpTest,
ParallelBatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelBatchDatasetOpTest, InvalidParallelBatchSize) {
auto parallel_batch_dataset_params =
InvalidBatchSizeParallelBatchDatasetParams();
EXPECT_EQ(Initialize(parallel_batch_dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} |
1,552 | cpp | tensorflow/tensorflow | get_options_op | tensorflow/core/kernels/data/get_options_op.cc | tensorflow/core/kernels/data/get_options_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_GET_OPTIONS_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_GET_OPTIONS_OP_H_
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
namespace data {
class GetOptionsOp : public OpKernel {
public:
explicit GetOptionsOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) final;
string TraceString(const OpKernelContext& ctx, bool verbose) const override;
};
}
}
#endif
#include "tensorflow/core/kernels/data/get_options_op.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
void GetOptionsOp::Compute(OpKernelContext* ctx) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
if (ctx->status().ok()) {
Tensor* string_handle_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &string_handle_t));
string_handle_t->scalar<tstring>()() = input->options().SerializeAsString();
}
}
string GetOptionsOp::TraceString(const OpKernelContext& ctx,
bool verbose) const {
return tsl::profiler::TraceMeOp(name_view(), type_string_view());
}
namespace {
REGISTER_KERNEL_BUILDER(Name("GetOptions").Device(DEVICE_CPU).Priority(2),
GetOptionsOp);
REGISTER_KERNEL_BUILDER(Name("GetOptions")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("serialized_options")
.Priority(1),
GetOptionsOp);
}
}
} | #include "tensorflow/core/kernels/data/get_options_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptions[] = R"proto(
deterministic: true
slack: true
optimization_options { apply_default_optimizations: true autotune: true }
distribute_options {}
)proto";
class GetOptionsParams : public DatasetParams {
public:
template <typename T>
GetOptionsParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(OptionsDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
return absl::OkStatus();
}
string dataset_type() const override { return "GetOptions"; }
string op_name() const override { return dataset_type(); }
private:
string serialized_options_;
};
class GetOptionsOpTest : public DatasetOpsTestBase {};
OptionsDatasetParams OptionsDatasetParams0() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
GetOptionsParams GetOptionsParams0() {
return GetOptionsParams(OptionsDatasetParams0(),
{DT_INT64},
{PartialTensorShape({})},
"get_options_0");
}
TEST_F(GetOptionsOpTest, Compute) {
auto test_case_params = GetOptionsParams0();
TF_ASSERT_OK(InitializeRuntime(test_case_params));
std::vector<Tensor> output;
TF_ASSERT_OK(RunDatasetOp(test_case_params, &output));
EXPECT_EQ(1, output.size());
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
Tensor expected_tensor =
CreateTensor<tstring>(TensorShape({}), {options.SerializeAsString()});
Tensor result_tensor = output[0];
string serialized_options = result_tensor.scalar<tstring>()();
Options result_options;
result_options.ParseFromString(serialized_options);
TF_EXPECT_OK(ExpectEqual(expected_tensor, result_tensor));
}
}
}
} |
1,553 | cpp | tensorflow/tensorflow | rewrite_dataset_op | tensorflow/core/kernels/data/rewrite_dataset_op.cc | tensorflow/core/kernels/data/rewrite_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_REWRITE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_REWRITE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class RewriteDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Rewrite";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kRewriteName = "rewrite_name";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit RewriteDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
};
}
}
#endif
#include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <map>
#include <string>
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
constexpr const char* const RewriteDatasetOp::kDatasetType;
constexpr const char* const RewriteDatasetOp::kInputDataset;
constexpr const char* const RewriteDatasetOp::kRewriteName;
constexpr const char* const RewriteDatasetOp::kOutputTypes;
constexpr const char* const RewriteDatasetOp::kOutputShapes;
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
tstring rewrite_name;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kRewriteName, &rewrite_name));
auto config_factory = [rewrite_name]() {
RewriterConfig rewriter_config;
rewriter_config.add_optimizers(std::string(rewrite_name));
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.set_fail_on_optimizer_errors(true);
return rewriter_config;
};
core::RefCountPtr<DatasetBase> rewritten;
OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten));
*output = rewritten.release();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#else
namespace tensorflow {
namespace data {
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
input->Ref();
*output = input;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#endif | #include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "rewrite_dataset";
constexpr char kReplicateOnSplit[] = "replicate_on_split";
class RewriteDatasetParams : public DatasetParams {
public:
template <typename T>
RewriteDatasetParams(T input_dataset_params, string rewrite_name,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
rewrite_name_(rewrite_name) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<tstring>(TensorShape({}), {rewrite_name_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {RewriteDatasetOp::kInputDataset,
RewriteDatasetOp::kRewriteName};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
return absl::OkStatus();
}
string dataset_type() const override {
return RewriteDatasetOp::kDatasetType;
}
private:
string rewrite_name_;
};
class RewriteDatasetOpTest : public DatasetOpsTestBase {};
TEST_F(RewriteDatasetOpTest, ReplicateOnSplit) {
auto range_dataset_params = RangeDatasetParams(0, 5, 1);
auto rewrite_dataset_params =
RewriteDatasetParams(std::move(range_dataset_params),
kReplicateOnSplit,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
std::vector<Tensor> expected_outputs =
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}});
TF_ASSERT_OK(Initialize(rewrite_dataset_params));
TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true));
}
}
}
} |
1,554 | cpp | tensorflow/tensorflow | window_dataset_op | tensorflow/core/kernels/data/window_dataset_op.cc | tensorflow/core/kernels/data/window_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_WINDOW_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_WINDOW_DATASET_OP_H_
#include <vector>
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
namespace data {
class WindowDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Window";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kSize = "size";
static constexpr const char* const kShift = "shift";
static constexpr const char* const kStride = "stride";
static constexpr const char* const kDropRemainder = "drop_remainder";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit WindowDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
};
}
}
#endif
#include "tensorflow/core/kernels/data/window_dataset_op.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/kernels/data/window_dataset.h"
#include "tensorflow/core/platform/stringprintf.h"
namespace tensorflow {
namespace data {
constexpr const char* const WindowDatasetOp::kDatasetType;
constexpr const char* const WindowDatasetOp::kInputDataset;
constexpr const char* const WindowDatasetOp::kSize;
constexpr const char* const WindowDatasetOp::kShift;
constexpr const char* const WindowDatasetOp::kStride;
constexpr const char* const WindowDatasetOp::kDropRemainder;
constexpr const char* const WindowDatasetOp::kOutputTypes;
constexpr const char* const WindowDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kBufferSize[] = "buffer_size";
constexpr char kBuffer[] = "buffer";
constexpr char kSizeSuffix[] = ".size";
constexpr char kCodeSuffix[] = ".code";
constexpr char kErrorMessage[] = ".error_message";
class WindowDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t window_size,
int64_t window_shift, int64_t window_stride, bool drop_remainder)
: DatasetBase(DatasetContext(ctx)),
input_(input),
window_size_(window_size),
window_shift_(window_shift),
window_stride_(window_stride),
drop_remainder_(drop_remainder),
output_dtypes_(input_->output_dtypes().size(), {DT_VARIANT}),
output_shapes_(input_->output_shapes().size(), TensorShape({})),
traceme_metadata_(
{{"window_size",
strings::Printf("%lld", static_cast<long long>(window_size))},
{"window_shift",
strings::Printf("%lld", static_cast<long long>(window_shift))},
{"window_stride", strings::Printf("%lld", static_cast<long long>(
window_stride))}}) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return output_dtypes_;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(window_size_, window_shift_, window_stride_,
drop_remainder_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
int64_t cardinality = 0;
if (drop_remainder_) {
int64_t rest_elements = n - ((window_size_ - 1) * window_stride_ + 1);
cardinality = rest_elements < 0 ? 0 : rest_elements / window_shift_ + 1;
} else {
cardinality = n / window_shift_ + (n % window_shift_ == 0 ? 0 : 1);
}
return cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* window_size_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_size_, &window_size_node));
Node* window_shift_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_shift_, &window_shift_node));
Node* window_stride_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_stride_, &window_stride_node));
Node* drop_remainder_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{input_graph_node, window_size_node, window_shift_node,
window_stride_node, drop_remainder_node},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const int64_t window_size = dataset()->window_size_;
const int64_t window_shift = dataset()->window_shift_;
const int64_t window_stride = dataset()->window_stride_;
std::vector<std::vector<Tensor>> window_elements;
Status status = absl::OkStatus();
{
const size_t target_size = TargetBufferSize(window_size, window_stride);
mutex_lock l(mu_);
if (!input_impl_ &&
(buffer_.empty() ||
(dataset()->drop_remainder_ && buffer_.size() < target_size))) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (input_impl_) {
*end_of_sequence = false;
for (size_t i = buffer_.size(); i < target_size && !*end_of_sequence;
++i) {
std::vector<Tensor> element;
Status status =
input_impl_->GetNext(ctx, &element, end_of_sequence);
if (!*end_of_sequence) {
RecordBufferEnqueue(ctx, element);
buffer_.emplace_back(std::move(element), status);
} else {
input_impl_.reset();
}
}
}
if (buffer_.empty() ||
(dataset()->drop_remainder_ && buffer_.size() < target_size)) {
DCHECK(*end_of_sequence);
return absl::OkStatus();
}
int num_elements = 1 + (buffer_.size() - 1) / window_stride;
window_elements.reserve(num_elements);
for (size_t i = 0; i < num_elements; ++i) {
status.Update(buffer_[window_stride * i].status);
if (!status.ok()) {
break;
}
window_elements.emplace_back(buffer_[window_stride * i].result);
}
int buffer_size = buffer_.size();
if (window_shift >= buffer_size) {
for (size_t i = buffer_size; input_impl_ && i < window_shift; ++i) {
bool end_of_input;
std::vector<Tensor> element;
input_impl_->GetNext(ctx, &element, &end_of_input).IgnoreError();
if (end_of_input) {
input_impl_.reset();
}
}
for (size_t i = 0; i < buffer_.size(); ++i) {
RecordBufferDequeue(ctx, buffer_.at(i).result);
}
buffer_.clear();
} else {
for (size_t i = 0; i < window_shift; ++i) {
RecordBufferDequeue(ctx, buffer_.at(i).result);
}
buffer_.erase(buffer_.begin(), buffer_.begin() + window_shift);
}
}
if (!status.ok()) {
return status;
}
const size_t num_tuple_components = window_elements[0].size();
const int64_t num_window_elements = window_elements.size();
*end_of_sequence = false;
for (size_t idx = 0; idx < num_tuple_components; ++idx) {
DatasetBase* window_dataset;
std::vector<std::vector<Tensor>> window_component_elements;
window_component_elements.reserve(num_window_elements);
for (size_t i = 0; i < num_window_elements; ++i) {
std::vector<Tensor> component_element;
component_element.push_back(std::move(window_elements[i][idx]));
window_component_elements.push_back(component_element);
}
DataTypeVector output_types({dataset()->input_->output_dtypes()[idx]});
std::vector<PartialTensorShape> output_shapes(
{dataset()->input_->output_shapes()[idx]});
TF_RETURN_IF_ERROR(NewWindow(window_component_elements, output_types,
output_shapes, &window_dataset));
out_tensors->emplace_back(DT_VARIANT, TensorShape({}));
TF_RETURN_IF_ERROR(
StoreDatasetInVariantTensor(window_dataset, &out_tensors->back()));
}
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
dataset()->window_shift_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (!input_impl_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputImplEmpty, ""));
} else {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBufferSize, buffer_.size()));
for (int64_t i = 0; i < buffer_.size(); i++) {
TF_RETURN_IF_ERROR(WriteStatusLocked(writer, i, buffer_[i].status));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kBuffer, "[", i, "]", kSizeSuffix),
buffer_[i].result.size()));
for (int64_t j = 0; j < buffer_[i].result.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kBuffer, "[", i, "][", j, "]"),
buffer_[i].result[j]));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (!reader->Contains(prefix(), kInputImplEmpty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
int64_t buffer_size = 0;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBufferSize, &buffer_size));
buffer_.resize(buffer_size);
for (int64_t i = 0; i < buffer_size; i++) {
int64_t vector_size;
TF_RETURN_IF_ERROR(ReadStatusLocked(reader, i, &buffer_[i].status));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kBuffer, "[", i, "]", kSizeSuffix),
&vector_size));
buffer_[i].result.resize(vector_size);
for (int64_t j = 0; j < vector_size; j++) {
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), prefix(),
strings::StrCat(kBuffer, "[", i, "][", j, "]"),
&buffer_[i].result[j]));
}
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
struct InvocationResult {
InvocationResult() = default;
InvocationResult(std::vector<Tensor>&& result, const Status& status)
: result(result), status(status) {}
std::vector<Tensor> result;
Status status;
};
Status WriteStatusLocked(IteratorStateWriter* writer, size_t index,
const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), CodeKey(index), static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), ErrorMessageKey(index),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader, size_t index,
Status* status) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), CodeKey(index), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), ErrorMessageKey(index),
&error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
string CodeKey(size_t index) {
return strings::StrCat(kBuffer, "[", index, "]", kCodeSuffix);
}
string ErrorMessageKey(size_t index) {
return strings::StrCat(kBuffer, "[", index, "]", kErrorMessage);
}
size_t TargetBufferSize(int64_t window_size, int64_t window_stride) {
return (window_size - 1) * window_stride + 1;
}
mutex mu_;
std::deque<InvocationResult> buffer_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
const int64_t window_size_;
const int64_t window_shift_;
const int64_t window_stride_;
const bool drop_remainder_;
const DataTypeVector output_dtypes_;
const std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
};
WindowDatasetOp::WindowDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void WindowDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t window_size = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSize, &window_size));
OP_REQUIRES(
ctx, window_size > 0,
errors::InvalidArgument("Window size must be greater than zero."));
int64_t window_shift = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kShift, &window_shift));
OP_REQUIRES(
ctx, window_shift > 0,
errors::InvalidArgument("Window shift must be greater than zero."));
int64_t window_stride = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kStride, &window_stride));
OP_REQUIRES(
ctx, window_stride > 0,
errors::InvalidArgument("Window stride must be greater than zero."));
bool drop_remainder;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
*output = new Dataset(ctx, input, window_size, window_shift, window_stride,
drop_remainder);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("WindowDataset").Device(DEVICE_CPU),
WindowDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/window_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "window_dataset";
class WindowDatasetParams : public DatasetParams {
public:
template <typename T>
WindowDatasetParams(T input_dataset_params, int64_t size, int64_t shift,
int64_t stride, bool drop_remainder,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
size_(size),
shift_(shift),
stride_(stride),
drop_remainder_(drop_remainder) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {size_}),
CreateTensor<int64_t>(TensorShape({}), {shift_}),
CreateTensor<int64_t>(TensorShape({}), {stride_}),
CreateTensor<bool>(TensorShape({}), {drop_remainder_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(WindowDatasetOp::kInputDataset);
input_names->emplace_back(WindowDatasetOp::kSize);
input_names->emplace_back(WindowDatasetOp::kShift);
input_names->emplace_back(WindowDatasetOp::kStride);
input_names->emplace_back(WindowDatasetOp::kDropRemainder);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return WindowDatasetOp::kDatasetType; }
private:
int64_t size_;
int64_t shift_;
int64_t stride_;
bool drop_remainder_;
};
class WindowDatasetOpTest : public DatasetOpsTestBase {};
WindowDatasetParams WindowDatasetParams1() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
1,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams2() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams3() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
8,
3,
1,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams4() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
8,
3,
1,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams5() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
8,
1,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams6() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
8,
1,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams7() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
8,
false,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams8() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
8,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams9() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
4,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParams10() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
5,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParamsWithInvalidWindowSize() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
0,
2,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParamswithInvalidWindowShift() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
0,
2,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
WindowDatasetParams WindowDatasetParamsWithInvalidWindowStride() {
return WindowDatasetParams(RangeDatasetParams(0, 7, 1),
2,
2,
0,
true,
{DT_VARIANT},
{PartialTensorShape({})},
kNodeName);
}
template <typename T>
struct GetNextTestCase {
T dataset_params;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<GetNextTestCase<WindowDatasetParams>> GetNextTestCases() {
return {{WindowDatasetParams1(),
{CreateTensors<int64_t>(TensorShape{}, {{0}, {1}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {3}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}}),
CreateTensors<int64_t>(TensorShape{}, {{6}})}},
{WindowDatasetParams2(),
{CreateTensors<int64_t>(TensorShape{}, {{0}, {2}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {4}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {6}})}},
{WindowDatasetParams3(),
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams4(),
{}},
{WindowDatasetParams5(),
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams6(),
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams7(),
{CreateTensors<int64_t>(TensorShape({}), {{0}}),
CreateTensors<int64_t>(TensorShape({}), {{2}}),
CreateTensors<int64_t>(TensorShape({}), {{4}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams8(),
{}},
{WindowDatasetParams9(),
{CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}})}},
{WindowDatasetParams10(),
{}}};
}
class ParameterizedGetNextTest : public WindowDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<WindowDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
auto expected_outputs_it = test_case.expected_outputs.begin();
while (!end_of_sequence) {
std::vector<Tensor> out_tensors;
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& window_dataset_tensor : out_tensors) {
DatasetBase* window_dataset;
TF_ASSERT_OK(GetDatasetFromVariantTensor(window_dataset_tensor,
&window_dataset));
std::unique_ptr<IteratorBase> window_dataset_iterator;
TF_ASSERT_OK(window_dataset->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(),
&window_dataset_iterator));
bool end_of_window_dataset = false;
std::vector<Tensor> window_elements;
while (!end_of_window_dataset) {
std::vector<Tensor> next_element;
TF_EXPECT_OK(window_dataset_iterator->GetNext(
iterator_ctx_.get(), &next_element, &end_of_window_dataset));
window_elements.insert(window_elements.end(), next_element.begin(),
next_element.end());
}
EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(window_elements, *expected_outputs_it, false));
expected_outputs_it++;
}
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
INSTANTIATE_TEST_CASE_P(WindowDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(WindowDatasetOpTest, DatasetTypeString) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(WindowDatasetOp::kDatasetType)));
}
TEST_F(WindowDatasetOpTest, DatasetNodeName) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(WindowDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(WindowDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<WindowDatasetParams>>
DatasetCardinalityTestCases() {
return {{WindowDatasetParams1(),
4},
{WindowDatasetParams2(),
3},
{WindowDatasetParams3(),
3},
{WindowDatasetParams4(),
0},
{WindowDatasetParams5(),
1},
{WindowDatasetParams6(),
1},
{WindowDatasetParams7(),
4},
{WindowDatasetParams8(),
0},
{WindowDatasetParams9(),
1},
{WindowDatasetParams10(),
0}};
}
DATASET_CARDINALITY_TEST_P(WindowDatasetOpTest, WindowDatasetParams,
DatasetCardinalityTestCases())
TEST_F(WindowDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(WindowDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(WindowDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = WindowDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
WindowDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
template <typename T>
struct IteratorSaveAndRestoreTestCase {
T dataset_params;
std::vector<int> breakpoints;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<IteratorSaveAndRestoreTestCase<WindowDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{WindowDatasetParams1(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape{}, {{0}, {1}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {3}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}}),
CreateTensors<int64_t>(TensorShape{}, {{6}})}},
{WindowDatasetParams2(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape{}, {{0}, {2}}),
CreateTensors<int64_t>(TensorShape{}, {{2}, {4}}),
CreateTensors<int64_t>(TensorShape{}, {{4}, {6}})}},
{WindowDatasetParams3(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{3}, {4}, {5}, {6}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams4(),
{0, 1, 9},
{}},
{WindowDatasetParams5(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams6(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}})}},
{WindowDatasetParams7(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}}),
CreateTensors<int64_t>(TensorShape({}), {{2}}),
CreateTensors<int64_t>(TensorShape({}), {{4}}),
CreateTensors<int64_t>(TensorShape({}), {{6}})}},
{WindowDatasetParams8(),
{0, 1, 9},
{}},
{WindowDatasetParams9(),
{0, 1, 9},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}})}},
{WindowDatasetParams10(),
{0, 1, 9},
{}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public WindowDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<WindowDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, IteratorSaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
std::unique_ptr<SerializationContext> window_serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&window_serialization_ctx));
bool end_of_sequence = false;
auto expected_outputs_it = test_case.expected_outputs.begin();
int cur_iteration = 0;
for (int breakpoint : test_case.breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
while (!end_of_sequence) {
std::vector<Tensor> out_tensors;
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& window_dataset_tensor : out_tensors) {
DatasetBase* window_dataset;
TF_ASSERT_OK(GetDatasetFromVariantTensor(window_dataset_tensor,
&window_dataset));
std::unique_ptr<IteratorBase> window_dataset_iterator;
TF_ASSERT_OK(window_dataset->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(),
&window_dataset_iterator));
bool end_of_window_dataset = false;
std::vector<Tensor> window_elements;
VariantTensorDataWriter window_dataset_writer;
std::vector<const VariantTensorData*> window_dataset_data;
TF_EXPECT_OK(window_dataset_iterator->Save(
window_serialization_ctx.get(), &window_dataset_writer));
window_dataset_writer.GetData(&window_dataset_data);
VariantTensorDataReader window_reader(window_dataset_data);
while (!end_of_window_dataset) {
std::vector<Tensor> next_element;
TF_EXPECT_OK(window_dataset_iterator->GetNext(
iterator_ctx_.get(), &next_element, &end_of_window_dataset));
}
TF_EXPECT_OK(
RestoreIterator(iterator_ctx_.get(), &window_reader,
test_case.dataset_params.iterator_prefix(),
*window_dataset, &window_dataset_iterator));
end_of_window_dataset = false;
while (!end_of_window_dataset) {
std::vector<Tensor> next_element;
TF_EXPECT_OK(window_dataset_iterator->GetNext(
iterator_ctx_.get(), &next_element, &end_of_window_dataset));
window_elements.insert(window_elements.end(),
next_element.begin(), next_element.end());
}
EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(
ExpectEqual(window_elements, *expected_outputs_it, false));
expected_outputs_it++;
}
}
}
cur_iteration++;
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
INSTANTIATE_TEST_CASE_P(WindowDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(WindowDatasetOpTest, InvalidArguments) {
std::vector<WindowDatasetParams> dataset_params_vec(
{WindowDatasetParamsWithInvalidWindowSize(),
WindowDatasetParamswithInvalidWindowShift(),
WindowDatasetParamsWithInvalidWindowStride()});
for (const auto& dataset_params : dataset_params_vec) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,555 | cpp | tensorflow/tensorflow | padded_batch_dataset_op | tensorflow/core/kernels/data/padded_batch_dataset_op.cc | tensorflow/core/kernels/data/padded_batch_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_PADDED_BATCH_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PADDED_BATCH_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class PaddedBatchDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "PaddedBatch";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kBatchSize = "batch_size";
static constexpr const char* const kPaddedShapes = "padded_shapes";
static constexpr const char* const kPaddingValues = "padding_values";
static constexpr const char* const kDropRemainder = "drop_remainder";
static constexpr const char* const kParallelCopy = "parallel_copy";
static constexpr const char* const kToutputTypes = "Toutput_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kNumPaddedShapes = "N";
explicit PaddedBatchDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int op_version_;
bool parallel_copy_ = false;
};
}
}
#endif
#include "tensorflow/core/kernels/data/padded_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const PaddedBatchDatasetOp::kDatasetType;
constexpr const char* const PaddedBatchDatasetOp::kInputDataset;
constexpr const char* const PaddedBatchDatasetOp::kBatchSize;
constexpr const char* const PaddedBatchDatasetOp::kPaddedShapes;
constexpr const char* const PaddedBatchDatasetOp::kPaddingValues;
constexpr const char* const PaddedBatchDatasetOp::kDropRemainder;
constexpr const char* const PaddedBatchDatasetOp::kParallelCopy;
constexpr const char* const PaddedBatchDatasetOp::kToutputTypes;
constexpr const char* const PaddedBatchDatasetOp::kOutputShapes;
constexpr const char* const PaddedBatchDatasetOp::kNumPaddedShapes;
constexpr char kExhausted[] = "exhausted";
class PaddedBatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t batch_size, bool drop_remainder,
bool parallel_copy, std::vector<PartialTensorShape> padded_shapes,
std::vector<Tensor> padding_values, const DatasetBase* input,
int op_version)
: DatasetBase(DatasetContext(ctx)),
batch_size_(batch_size),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
padded_shapes_(std::move(padded_shapes)),
padding_values_(std::move(padding_values)),
input_(input),
op_version_(op_version),
traceme_metadata_(
{{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"},
{"parallel_copy", parallel_copy ? "true" : "false"}}) {
input_->Ref();
const auto& input_shapes = input_->output_shapes();
output_shapes_.reserve(input_shapes.size());
for (size_t i = 0; i < input_shapes.size(); ++i) {
if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) {
output_shapes_.push_back(
PartialTensorShape({batch_size_}).Concatenate(padded_shapes_[i]));
} else {
output_shapes_.push_back(
PartialTensorShape({-1}).Concatenate(padded_shapes_[i]));
}
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(batch_size_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size));
std::vector<Node*> padded_shapes;
padded_shapes.reserve(padded_shapes_.size());
for (int i = 0; i < padded_shapes_.size(); i++) {
Node* node;
Tensor t(DT_INT64, TensorShape({padded_shapes_[i].dims()}));
for (int j = 0; j < padded_shapes_[i].dims(); j++) {
t.vec<int64_t>()(j) = padded_shapes_[i].dim_size(j);
}
TF_RETURN_IF_ERROR(b->AddTensor(t, &node));
padded_shapes.emplace_back(node);
}
std::vector<Node*> padding_values;
padding_values.reserve(padding_values_.size());
for (const Tensor& t : padding_values_) {
Node* node;
TF_RETURN_IF_ERROR(b->AddTensor(t, &node));
padding_values.emplace_back(node);
}
Node* drop_remainder = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder));
AttrValue parallel_copy;
b->BuildAttrValue(parallel_copy_, ¶llel_copy);
AttrValue output_types;
b->BuildAttrValue(output_dtypes(), &output_types);
AttrValue N;
b->BuildAttrValue<int64_t>(padded_shapes_.size(), &N);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_graph_node}, {1, batch_size}, {4, drop_remainder}},
{{2, padded_shapes}, {3, padding_values}},
{{kParallelCopy, parallel_copy},
{kToutputTypes, output_types},
{kNumPaddedShapes, N}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::vector<std::vector<Tensor>> batch_elements;
{
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
} else {
*end_of_sequence = false;
batch_elements.reserve(dataset()->batch_size_);
for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence;
++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
end_of_sequence));
if (!*end_of_sequence) {
batch_elements.push_back(std::move(batch_element_tuple));
}
}
if (*end_of_sequence) {
input_impl_.reset();
}
}
}
if (batch_elements.empty()) {
DCHECK(*end_of_sequence);
return absl::OkStatus();
}
if (dataset()->drop_remainder_ &&
batch_elements.size() < dataset()->batch_size_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CopyBatch(ctx, batch_elements, out_tensors));
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args), dataset()->batch_size_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kExhausted, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_exhausted;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kExhausted, &input_exhausted));
if (static_cast<bool>(input_exhausted)) {
input_impl_.reset();
} else {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
Status CopyBatch(IteratorContext* ctx,
const std::vector<std::vector<Tensor>>& batch_elements,
std::vector<Tensor>* out_tensors) {
const size_t num_tuple_components = batch_elements[0].size();
const int64_t num_batch_elements = batch_elements.size();
for (size_t component_index = 0; component_index < num_tuple_components;
++component_index) {
TensorShape batch_component_shape({num_batch_elements});
const PartialTensorShape& padded_shape =
dataset()->padded_shapes_[component_index];
for (int dim = 0; dim < padded_shape.dims(); ++dim) {
if (padded_shape.dim_size(dim) == -1) {
TF_RETURN_IF_ERROR(batch_component_shape.AddDimWithStatus(0));
} else {
TF_RETURN_IF_ERROR(batch_component_shape.AddDimWithStatus(
padded_shape.dim_size(dim)));
}
}
for (int64_t i = 0; i < num_batch_elements; ++i) {
const TensorShape& element_shape =
batch_elements[i][component_index].shape();
if (element_shape.dims() != padded_shape.dims()) {
return errors::InvalidArgument(
"All elements in a batch must have the same rank as the "
"padded shape for component",
component_index, ": expected rank ", padded_shape.dims(),
" but got element with rank ", element_shape.dims());
}
for (int dim = 0; dim < padded_shape.dims(); ++dim) {
if (padded_shape.dim_size(dim) == -1) {
if (batch_elements[i][component_index].shape().dim_size(dim) >
batch_component_shape.dim_size(dim + 1)) {
batch_component_shape.set_dim(
dim + 1,
batch_elements[i][component_index].shape().dim_size(dim));
}
} else {
if (batch_elements[i][component_index].shape().dim_size(dim) >
batch_component_shape.dim_size(dim + 1)) {
return errors::DataLoss(
"Attempted to pad to a smaller size than the input "
"element.");
}
}
}
}
out_tensors->emplace_back(ctx->allocator({}),
output_dtypes()[component_index],
batch_component_shape);
Tensor& batch_component = out_tensors->back();
TF_RETURN_IF_ERROR(batch_util::SetElementZero(
&batch_component, dataset()->padding_values_[component_index]));
TensorShape component_shape({});
for (int i = 1; i < batch_component_shape.dims(); ++i) {
TF_RETURN_IF_ERROR(component_shape.AddDimWithStatus(
batch_component_shape.dim_size(i)));
}
auto copy_element_fn = [component_index, &batch_elements,
&batch_component, &component_shape](int index) {
if (batch_elements[index][component_index].shape() ==
component_shape) {
TF_RETURN_IF_ERROR(batch_util::CopyElementToSlice(
batch_elements[index][component_index], &batch_component,
index));
} else {
TF_RETURN_IF_ERROR(batch_util::CopyElementToLargerSlice(
batch_elements[index][component_index], &batch_component,
index));
}
return absl::OkStatus();
};
if (dataset()->parallel_copy_ && (batch_component.AllocatedBytes() /
num_batch_elements) >= (1 << 15)) {
BlockingCounter counter(num_batch_elements);
Status status;
mutex status_mu;
const auto num_threads = ctx->runner_threadpool_size();
const auto slice_size = num_batch_elements / num_threads;
int64_t offset = 0;
for (size_t i = 0; i < num_threads; ++i) {
int64_t length = slice_size;
if (i < num_batch_elements % num_threads) ++length;
(*ctx->runner())([offset, length, &status, &status_mu, &counter,
©_element_fn]() {
for (size_t j = offset; j < offset + length; ++j) {
{
Status s = copy_element_fn(j);
mutex_lock l(status_mu);
status.Update(s);
}
counter.DecrementCount();
}
});
offset += length;
}
counter.Wait();
TF_RETURN_IF_ERROR(status);
} else {
for (size_t i = 0; i < num_batch_elements; ++i) {
TF_RETURN_IF_ERROR(copy_element_fn(i));
}
}
}
return absl::OkStatus();
}
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t batch_size_;
const bool drop_remainder_;
const bool parallel_copy_;
const std::vector<PartialTensorShape> padded_shapes_;
const std::vector<Tensor> padding_values_;
const DatasetBase* const input_;
const int op_version_;
std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
};
PaddedBatchDatasetOp::PaddedBatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->def().op() == "PaddedBatchDataset" ? 1 : 2) {
if (ctx->HasAttr(kParallelCopy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, ¶llel_copy_));
}
}
void PaddedBatchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t batch_size;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("Batch size must be greater than zero."));
bool drop_remainder = false;
if (op_version_ > 1) {
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
}
OpInputList padded_shape_tensors;
OP_REQUIRES_OK(ctx, ctx->input_list(kPaddedShapes, &padded_shape_tensors));
std::vector<PartialTensorShape> padded_shapes;
padded_shapes.reserve(padded_shape_tensors.size());
OP_REQUIRES(ctx, padded_shape_tensors.size() == input->output_shapes().size(),
errors::InvalidArgument("Number of padded shapes (",
padded_shape_tensors.size(),
") must match the number of components "
"in the input dataset's elements (",
input->output_shapes().size(), ")"));
for (const Tensor& padded_shape_t : padded_shape_tensors) {
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(padded_shape_t.shape()),
errors::InvalidArgument("All padded shapes must be vectors"));
PartialTensorShape padded_shape;
OP_REQUIRES_OK(ctx, PartialTensorShape::MakePartialShape(
padded_shape_t.vec<int64_t>().data(),
padded_shape_t.NumElements(), &padded_shape));
padded_shapes.push_back(std::move(padded_shape));
}
OpInputList padding_values_list;
OP_REQUIRES_OK(ctx, ctx->input_list(kPaddingValues, &padding_values_list));
std::vector<Tensor> padding_values;
OP_REQUIRES(ctx, padding_values_list.size() == input->output_shapes().size(),
errors::InvalidArgument(
"Number of padding values (", padding_values_list.size(),
") must match the number of components in the input "
"dataset's elements (",
input->output_shapes().size(), ")"));
for (int i = 0; i < padding_values_list.size(); ++i) {
const Tensor& padding_value_t = padding_values_list[i];
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(padding_value_t.shape()),
errors::InvalidArgument("All padding values must be scalars"));
OP_REQUIRES(ctx, padding_value_t.dtype() == input->output_dtypes()[i],
errors::InvalidArgument(
"Mismatched type between padding value ", i,
" and input dataset's component ", i, ": ",
DataTypeString(padding_value_t.dtype()), " vs. ",
DataTypeString(input->output_dtypes()[i])));
padding_values.push_back(tensor::DeepCopy(padding_value_t));
}
*output = new Dataset(ctx, batch_size, drop_remainder, parallel_copy_,
std::move(padded_shapes), std::move(padding_values),
input, op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("PaddedBatchDataset").Device(DEVICE_CPU),
PaddedBatchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("PaddedBatchDatasetV2").Device(DEVICE_CPU),
PaddedBatchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/padded_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "padded_batch_dataset";
constexpr int kOpVersion = 2;
class PaddedBatchDatasetOpTest : public DatasetOpsTestBase {};
class PaddedBatchDatasetParams : public DatasetParams {
public:
template <typename T>
PaddedBatchDatasetParams(T input_dataset_params, int64_t batch_size,
std::vector<Tensor> padded_shapes,
std::vector<Tensor> padded_values,
bool drop_remainder, bool parallel_copy,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int num_padded_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
batch_size_(batch_size),
padded_shapes_(std::move(padded_shapes)),
padded_values_(std::move(padded_values)),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
num_padded_shapes_(num_padded_shapes) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {batch_size_}));
for (auto& padded_shape : padded_shapes_) {
input_tensors.emplace_back(padded_shape);
}
for (auto& padded_value : padded_values_) {
input_tensors.emplace_back(padded_value);
}
input_tensors.emplace_back(
CreateTensor<bool>(TensorShape({}), {drop_remainder_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {PaddedBatchDatasetOp::kInputDataset,
PaddedBatchDatasetOp::kBatchSize};
for (int i = 0; i < num_padded_shapes_; ++i) {
input_names->emplace_back(
strings::StrCat(PaddedBatchDatasetOp::kPaddedShapes, "_", i));
}
for (int j = 0; j < padded_values_.size(); ++j) {
input_names->emplace_back(
strings::StrCat(PaddedBatchDatasetOp::kPaddingValues, "_", j));
}
input_names->push_back(PaddedBatchDatasetOp::kDropRemainder);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"parallel_copy", parallel_copy_},
{"Toutput_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"N", num_padded_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return PaddedBatchDatasetOp::kDatasetType;
}
private:
int64_t batch_size_;
std::vector<Tensor> padded_shapes_;
std::vector<Tensor> padded_values_;
bool drop_remainder_;
bool parallel_copy_;
int num_padded_shapes_;
};
PaddedBatchDatasetParams PaddedBatchDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(
TensorShape{7, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})},
"tensor_slice");
return PaddedBatchDatasetParams(
tensor_slice_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
true,
true,
{DT_INT64},
{PartialTensorShape({2, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams2() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
true,
true,
{DT_INT64},
{PartialTensorShape({2, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams3() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({2, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams4() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 1}, {{6, 7, 8}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, 3})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams5() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {-1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
false,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams6() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({-1})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {-1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParams7() {
return PaddedBatchDatasetParams(
RangeDatasetParams(0, 0, 1),
2,
{CreateTensor<int64_t>(TensorShape{1}, {-1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParamsWithShortPaddingShape() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {1})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidPaddingShape() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{2}, {1, 2})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidBatchSize() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
-1,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingShapesSize() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3}),
CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
2,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingValuesSize() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{}, {1}),
CreateTensor<int64_t>(TensorShape{}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
2,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingValuesDType() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<tstring>(TensorShape{}, {"a"})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
PaddedBatchDatasetParams
PaddedBatchDatasetParamsWithInvalidPaddingValuesShape() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{0, 1, 2, 3, 4, 5}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{3, 2},
{{6, 7, 8, 9, 10, 11}}),
"tensor_slice_1");
auto concatenate_dataset_params =
ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
"concatenate");
return PaddedBatchDatasetParams(
concatenate_dataset_params,
2,
{CreateTensor<int64_t>(TensorShape{1}, {3})},
{CreateTensor<int64_t>(TensorShape{1}, {1})},
false,
true,
{DT_INT64},
{PartialTensorShape({-1, -1})},
1,
kNodeName);
}
std::vector<GetNextTestCase<PaddedBatchDatasetParams>> GetNextTestCases() {
return {{PaddedBatchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 7, 1}, {8, 9, 1, 10, 11, 1}})},
{PaddedBatchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape{2, 3}, {0, 1, 1, 2, 3, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {4, 5, 1, 6, 1, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {7, 1, 1, 8, 1, 1}),
CreateTensor<int64_t>(TensorShape{1, 3}, {9, 1, 1})}},
{PaddedBatchDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams5(),
{CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}),
CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}),
CreateTensor<int64_t>(TensorShape{1, 1}, {9})}},
{PaddedBatchDatasetParams6(),
{CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}),
CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}),
CreateTensor<int64_t>(TensorShape{1, 1}, {9})}},
{PaddedBatchDatasetParams7(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
GetNextTestCases())
TEST_F(PaddedBatchDatasetOpTest, DatasetNodeName) {
auto dataset_params = PaddedBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(PaddedBatchDatasetOpTest, DatasetTypeString) {
auto dataset_params = PaddedBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(PaddedBatchDatasetOp::kDatasetType, params)));
}
std::vector<DatasetOutputDtypesTestCase<PaddedBatchDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{PaddedBatchDatasetParams1(),
{DT_INT64}},
{PaddedBatchDatasetParams2(),
{DT_INT64}},
{PaddedBatchDatasetParams3(),
{DT_INT64}},
{PaddedBatchDatasetParams4(),
{DT_INT64}},
{PaddedBatchDatasetParams5(),
{DT_INT64}},
{PaddedBatchDatasetParams6(),
{DT_INT64}},
{PaddedBatchDatasetParams7(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<PaddedBatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{PaddedBatchDatasetParams1(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams2(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams3(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams4(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams5(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams6(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams7(),
{PartialTensorShape({-1, -1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<PaddedBatchDatasetParams>>
CardinalityTestCases() {
return {{PaddedBatchDatasetParams1(),
3},
{PaddedBatchDatasetParams2(),
3},
{PaddedBatchDatasetParams3(),
4},
{PaddedBatchDatasetParams4(),
3},
{PaddedBatchDatasetParams5(),
4},
{PaddedBatchDatasetParams6(),
4},
{PaddedBatchDatasetParams7(),
0}};
}
DATASET_CARDINALITY_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<PaddedBatchDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{PaddedBatchDatasetParams1(),
{DT_INT64}},
{PaddedBatchDatasetParams2(),
{DT_INT64}},
{PaddedBatchDatasetParams3(),
{DT_INT64}},
{PaddedBatchDatasetParams4(),
{DT_INT64}},
{PaddedBatchDatasetParams5(),
{DT_INT64}},
{PaddedBatchDatasetParams6(),
{DT_INT64}},
{PaddedBatchDatasetParams7(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(PaddedBatchDatasetOpTest,
PaddedBatchDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<PaddedBatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{PaddedBatchDatasetParams1(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams2(),
{PartialTensorShape({2, 3})}},
{PaddedBatchDatasetParams3(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams4(),
{PartialTensorShape({-1, 3})}},
{PaddedBatchDatasetParams5(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams6(),
{PartialTensorShape({-1, -1})}},
{PaddedBatchDatasetParams7(),
{PartialTensorShape({-1, -1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(PaddedBatchDatasetOpTest,
PaddedBatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(PaddedBatchDatasetOpTest, IteratorPrefix) {
auto dataset_params = PaddedBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(PaddedBatchDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<PaddedBatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{PaddedBatchDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 7, 1}, {8, 9, 1, 10, 11, 1}})},
{PaddedBatchDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams3(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{2, 3}, {0, 1, 1, 2, 3, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {4, 5, 1, 6, 1, 1}),
CreateTensor<int64_t>(TensorShape{2, 3}, {7, 1, 1, 8, 1, 1}),
CreateTensor<int64_t>(TensorShape{1, 3}, {9, 1, 1})}},
{PaddedBatchDatasetParams4(),
{0, 2, 5},
CreateTensors<int64_t>(
TensorShape{2, 3},
{{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})},
{PaddedBatchDatasetParams5(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}),
CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}),
CreateTensor<int64_t>(T |
1,556 | cpp | tensorflow/tensorflow | filter_dataset_op | tensorflow/core/kernels/data/filter_dataset_op.cc | tensorflow/core/kernels/data/filter_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_FILTER_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_FILTER_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class FilterDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Filter";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kPredicate = "predicate";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit FilterDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
};
}
}
#endif
#include "tensorflow/core/kernels/data/filter_dataset_op.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const FilterDatasetOp::kDatasetType;
constexpr const char* const FilterDatasetOp::kInputDataset;
constexpr const char* const FilterDatasetOp::kOtherArguments;
constexpr const char* const FilterDatasetOp::kPredicate;
constexpr const char* const FilterDatasetOp::kTarguments;
constexpr const char* const FilterDatasetOp::kOutputTypes;
constexpr const char* const FilterDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kFilteredElements[] = "filtered_elements";
constexpr char kDroppedElements[] = "dropped_elements";
class FilterDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_graph_node}}, {{1, other_arguments}},
{{kPredicate, f}, {kTarguments, other_arguments_types_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
filtered_elements_(0),
dropped_elements_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
auto stats_aggregator = ctx->stats_aggregator();
bool matched;
do {
{
tf_shared_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
}
if (*end_of_sequence) {
mutex_lock l(mu_);
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
auto status = instantiated_captured_func_->RunWithBorrowedArgs(
ctx, *out_tensors, &result, model_node());
if (!status.ok()) {
return AddErrorContext(status);
}
if (result.size() != 1 || result[0].dtype() != DT_BOOL ||
result[0].NumElements() != 1) {
out_tensors->clear();
return errors::InvalidArgument(
"Filter predicate `f` must return a scalar bool.");
}
matched = result[0].scalar<bool>()();
if (!matched) {
out_tensors->clear();
{
mutex_lock l(mu_);
dropped_elements_++;
}
if (stats_aggregator) {
mutex_lock l(mu_);
stats_aggregator->AddScalar(
stats_utils::DroppedElementsScalarName(dataset()->node_name()),
static_cast<float>(dropped_elements_), num_elements());
stats_aggregator->IncrementCounter(dataset()->node_name(),
stats_utils::kDroppedElements,
static_cast<float>(1));
}
}
} while (!matched);
{
mutex_lock l(mu_);
filtered_elements_++;
}
if (stats_aggregator) {
mutex_lock l(mu_);
stats_aggregator->AddScalar(
stats_utils::FilterdElementsScalarName(dataset()->node_name()),
static_cast<float>(filtered_elements_), num_elements());
stats_aggregator->IncrementCounter(dataset()->node_name(),
stats_utils::kFilteredElements,
static_cast<float>(1));
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeUnknownRatioNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kFilteredElements, filtered_elements_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kDroppedElements, dropped_elements_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(input_empty)) {
input_impl_.reset();
} else {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
}
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kFilteredElements, &filtered_elements_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kDroppedElements, &dropped_elements_));
return absl::OkStatus();
}
data::TraceMeMetadata GetTraceMeMetadata() const override {
tf_shared_lock l(mu_);
data::TraceMeMetadata result;
result.push_back(std::make_pair(
"passed",
strings::Printf("%lld", static_cast<long long>(filtered_elements_))));
result.push_back(std::make_pair(
"filtered",
strings::Printf("%lld", static_cast<long long>(dropped_elements_))));
return result;
}
private:
mutable mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t filtered_elements_ TF_GUARDED_BY(mu_);
int64_t dropped_elements_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
};
FilterDatasetOp::FilterDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kPredicate, {},
&func_metadata_));
OP_REQUIRES(ctx, func_metadata_->short_circuit_info().indices.size() <= 1,
errors::InvalidArgument(
"predicate function has more than one return value."));
}
void FilterDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FilterDataset").Device(DEVICE_CPU),
FilterDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("FilterDataset");
}
}
} | #include "tensorflow/core/kernels/data/filter_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "filter_dataset";
class FilterDatasetParams : public DatasetParams {
public:
template <typename T>
FilterDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper pred_func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
pred_func_(std::move(pred_func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return other_arguments_;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size());
input_names->emplace_back(FilterDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(FilterDatasetOp::kOtherArguments, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"predicate", pred_func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override { return FilterDatasetOp::kDatasetType; }
private:
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper pred_func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class FilterDatasetOpTest : public DatasetOpsTestBase {};
FilterDatasetParams FilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
FilterDatasetParams FilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("GetUnique",
{{"T", DT_INT64}, {"out_idx", DT_INT32}}),
{test::function::Unique()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("NonZero", {{"T", DT_INT64}}),
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<FilterDatasetParams>> GetNextTestCases() {
return {{FilterDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{FilterDatasetParams2(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
GetNextTestCases())
TEST_F(FilterDatasetOpTest, DatasetNodeName) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FilterDatasetOpTest, DatasetTypeString) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FilterDatasetOp::kDatasetType)));
}
TEST_F(FilterDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<FilterDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FilterDatasetParams1(),
{PartialTensorShape({1})}},
{FilterDatasetParams2(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<FilterDatasetParams>> CardinalityTestCases() {
return {{FilterDatasetParams1(),
kUnknownCardinality},
{FilterDatasetParams2(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
CardinalityTestCases())
TEST_F(FilterDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<FilterDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FilterDatasetParams1(),
{PartialTensorShape({1})}},
{FilterDatasetParams2(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(FilterDatasetOpTest, IteratorPrefix) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FilterDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<FilterDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FilterDatasetParams1(),
{0, 2, 6},
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{FilterDatasetParams2(),
{0, 2, 6},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidPredicateFuncTest
: public FilterDatasetOpTest,
public ::testing::WithParamInterface<FilterDatasetParams> {};
TEST_P(ParameterizedInvalidPredicateFuncTest, InvalidPredicateFunc) {
auto dataset_params = GetParam();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_SUITE_P(
FilterDatasetOpTest, ParameterizedInvalidPredicateFuncTest,
::testing::ValuesIn({InvalidPredFuncFilterDatasetParams1(),
InvalidPredFuncFilterDatasetParams2(),
InvalidPredFuncFilterDatasetParams3()}));
}
}
} |
1,557 | cpp | tensorflow/tensorflow | zip_dataset_op | tensorflow/core/kernels/data/zip_dataset_op.cc | tensorflow/core/kernels/data/zip_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_ZIP_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_ZIP_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ZipDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Zip";
static constexpr const char* const kInputDatasets = "input_datasets";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kNumInputDatasets = "N";
explicit ZipDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
};
}
}
#endif
#include "tensorflow/core/kernels/data/zip_dataset_op.h"
#include <functional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ZipDatasetOp::kDatasetType;
constexpr const char* const ZipDatasetOp::kInputDatasets;
constexpr const char* const ZipDatasetOp::kOutputTypes;
constexpr const char* const ZipDatasetOp::kOutputShapes;
constexpr const char* const ZipDatasetOp::kNumInputDatasets;
constexpr char kInputImplsEmpty[] = "input_impls_empty";
class ZipDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx,
const std::vector<DatasetBase*>& inputs)
: DatasetBase(DatasetContext(ctx)), inputs_(inputs) {
for (const auto& input : inputs_) {
input->Ref();
for (DataType dt : input->output_dtypes()) {
output_dtypes_.push_back(dt);
}
output_shapes_.insert(output_shapes_.end(),
input->output_shapes().begin(),
input->output_shapes().end());
if (input != nullptr && random_indexing_compatible_.ok() &&
!input->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input->RandomIndexingCompatible();
}
}
}
~Dataset() override {
for (const auto& input : inputs_) {
input->Unref();
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return output_dtypes_;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t result = kInfiniteCardinality;
for (const auto& input : inputs_) {
int64_t n = input->Cardinality(options);
if (n == kUnknownCardinality) {
return kUnknownCardinality;
}
if (n != kInfiniteCardinality &&
(result == kInfiniteCardinality || n < result)) {
result = n;
}
}
return result;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
for (const auto& input : inputs_) {
inputs->push_back(input);
}
return absl::OkStatus();
}
Status CheckExternalState() const override {
for (const auto& input : inputs_) {
TF_RETURN_IF_ERROR(input->CheckExternalState());
}
return absl::OkStatus();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->reserve(output_dtypes().size());
for (int i = 0; i < inputs_.size(); ++i) {
std::vector<Tensor> input_tensors;
TF_RETURN_IF_ERROR(inputs_[i]->Get(ctx, index, &input_tensors));
out_tensors->insert(out_tensors->end(), input_tensors.begin(),
input_tensors.end());
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> input_graph_nodes;
input_graph_nodes.reserve(inputs_.size());
for (const auto& input : inputs_) {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input, &input_node));
input_graph_nodes.emplace_back(input_node);
}
TF_RETURN_IF_ERROR(b->AddDataset(
this, {}, {std::make_pair(0, input_graph_nodes)}, {}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
input_impls_.resize(dataset()->inputs_.size());
for (size_t i = 0; i < input_impls_.size(); ++i) {
TF_RETURN_IF_ERROR(dataset()->inputs_[i]->MakeIterator(
&input_contexts_[i], this, strings::StrCat(prefix(), "[", i, "]"),
&input_impls_[i]));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (input_impls_.empty()) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->reserve(dataset()->output_dtypes().size());
Status status = absl::OkStatus();
*end_of_sequence = false;
if (TF_PREDICT_FALSE(ctx->index_mapper() && !input_contexts_.empty() &&
input_contexts_.back().index_mapper() == nullptr)) {
for (IteratorContext& input_context : input_contexts_) {
input_context.SetIndexMapper(ctx->index_mapper());
}
}
for (int i = 0; i < input_impls_.size(); ++i) {
const auto& input_impl = input_impls_[i];
std::vector<Tensor> input_tensors;
bool component_end_of_sequence = false;
status.Update(input_impl->GetNext(&input_contexts_[i], &input_tensors,
&component_end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
*end_of_sequence |= component_end_of_sequence;
if (!status.ok()) {
continue;
}
if (*end_of_sequence) {
for (int j = i + 1; j < input_impls_.size(); ++j) {
Status s =
input_impls_[j]->GetNext(&input_contexts_[j], &input_tensors,
&component_end_of_sequence);
ctx->MergeCheckpoint(input_contexts_[j].checkpoint());
}
break;
}
out_tensors->insert(out_tensors->end(), input_tensors.begin(),
input_tensors.end());
}
if (*end_of_sequence || !status.ok()) {
out_tensors->clear();
}
if (*end_of_sequence) {
input_impls_.clear();
}
return status;
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputImplsEmpty,
static_cast<int64_t>(input_impls_.empty())));
for (auto& input_impl : input_impls_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count()) {
if (input_impls_.size() != dataset()->inputs_.size()) {
return absl::FailedPreconditionError(
"`Initialize` should be called before restoring from the "
"checkpoint.");
}
if (ctx->index_mapper() == nullptr) {
return absl::FailedPreconditionError(
"ctx->index_mapper() should be provided along with "
"ctx->restored_element_count() when restoring.");
}
for (const auto& input_impl : input_impls_) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl));
}
return absl::OkStatus();
}
int64_t inputs_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplsEmpty, &inputs_empty));
if (static_cast<bool>(inputs_empty)) {
input_impls_.clear();
} else {
DCHECK_EQ(input_impls_.size(), dataset()->inputs_.size());
for (auto& input_impl : input_impls_)
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl));
}
return absl::OkStatus();
}
private:
mutex mu_;
std::vector<std::unique_ptr<IteratorBase>> input_impls_ TF_GUARDED_BY(mu_);
std::vector<IteratorContext> input_contexts_ TF_GUARDED_BY(mu_);
};
const std::vector<DatasetBase*> inputs_;
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
};
ZipDatasetOp::ZipDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {}
void ZipDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
std::vector<DatasetBase*> inputs;
for (size_t i = 0; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
inputs.push_back(input);
}
*output = new Dataset(ctx, inputs);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ZipDataset").Device(DEVICE_CPU), ZipDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/zip_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "zip_dataset";
class ZipDatasetParams : public DatasetParams {
public:
template <typename T>
ZipDatasetParams(std::vector<T> input_dataset_params,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int num_input_datasets, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_input_datasets_(num_input_datasets) {
for (auto& params : input_dataset_params) {
input_dataset_params_.push_back(std::make_unique<T>(params));
}
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params[0].dataset_type(),
input_dataset_params[0].iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
for (int i = 0; i < num_input_datasets_; ++i) {
input_names->emplace_back(
absl::StrCat(ZipDatasetOp::kDatasetType, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("N", num_input_datasets_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ZipDatasetOp::kDatasetType; }
private:
int32 num_input_datasets_;
};
class ZipDatasetOpTest : public DatasetOpsTestBase {};
ZipDatasetParams ZipDatasetParams1() {
return ZipDatasetParams(
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
ZipDatasetParams ZipDatasetParams2() {
return ZipDatasetParams(
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 15, 1)},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
std::vector<GetNextTestCase<ZipDatasetParams>> GetNextTestCases() {
return {{ZipDatasetParams1(),
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})},
{ZipDatasetParams2(),
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})}};
}
ITERATOR_GET_NEXT_TEST_P(ZipDatasetOpTest, ZipDatasetParams, GetNextTestCases())
TEST_F(ZipDatasetOpTest, DatasetNodeName) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ZipDatasetOpTest, DatasetTypeString) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ZipDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ZipDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{ZipDatasetParams1(),
{DT_INT64, DT_INT64}},
{ZipDatasetParams2(),
{DT_INT64, DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<ZipDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ZipDatasetParams1(),
{PartialTensorShape({}),
PartialTensorShape({})}},
{ZipDatasetParams2(),
{PartialTensorShape({}),
PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ZipDatasetParams>> CardinalityTestCases() {
return {{ZipDatasetParams1(),
3},
{ZipDatasetParams2(),
3}};
}
DATASET_CARDINALITY_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ZipDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{ZipDatasetParams1(),
{DT_INT64, DT_INT64}},
{ZipDatasetParams2(),
{DT_INT64, DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<ZipDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ZipDatasetParams1(),
{PartialTensorShape({}),
PartialTensorShape({})}},
{ZipDatasetParams2(),
{PartialTensorShape({}),
PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ZipDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ZipDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ZipDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ZipDatasetParams1(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})},
{ZipDatasetParams2(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,558 | cpp | tensorflow/tensorflow | batch_dataset_op | tensorflow/core/kernels/data/batch_dataset_op.cc | tensorflow/core/kernels/data/batch_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_BATCH_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_BATCH_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class BatchDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Batch";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kBatchSize = "batch_size";
static constexpr const char* const kDropRemainder = "drop_remainder";
static constexpr const char* const kParallelCopy = "parallel_copy";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit BatchDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int op_version_;
bool parallel_copy_ = false;
};
}
}
#endif
#include "tensorflow/core/kernels/data/batch_dataset_op.h"
#include <algorithm>
#include <cstdlib>
#include <functional>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
constexpr const char* const BatchDatasetOp::kDatasetType;
constexpr const char* const BatchDatasetOp::kInputDataset;
constexpr const char* const BatchDatasetOp::kBatchSize;
constexpr const char* const BatchDatasetOp::kDropRemainder;
constexpr const char* const BatchDatasetOp::kParallelCopy;
constexpr const char* const BatchDatasetOp::kOutputTypes;
constexpr const char* const BatchDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kBatchDataset[] = "BatchDataset";
class BatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t batch_size, bool drop_remainder,
bool parallel_copy, const DatasetBase* input, int op_version)
: DatasetBase(DatasetContext(ctx)),
batch_size_(batch_size),
reserve_size_(drop_remainder ? batch_size
: std::min<int64_t>(batch_size, 1 << 16)),
drop_remainder_(drop_remainder),
parallel_copy_(parallel_copy),
input_(input),
op_version_(op_version),
traceme_metadata_(
{{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"},
{"parallel_copy", parallel_copy ? "true" : "false"}}) {
input_->Ref();
const auto& input_shapes = input_->output_shapes();
output_shapes_.reserve(input_shapes.size());
for (const auto& input_shape : input_shapes) {
if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) {
output_shapes_.emplace_back(
PartialTensorShape({batch_size_}).Concatenate(input_shape));
} else {
output_shapes_.emplace_back(
PartialTensorShape({-1}).Concatenate(input_shape));
}
}
random_indexing_compatible_ = absl::OkStatus();
if (!drop_remainder_) {
random_indexing_compatible_ = absl::FailedPreconditionError(absl::StrCat(
type_string(),
" does not support global shuffling with `drop_remainder=False`."));
} else if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(batch_size_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
const int64 cardinality = Cardinality();
if (index < 0 || index >= cardinality) {
return errors::OutOfRange("Index out of range [0, ", cardinality,
"):", index);
}
int batch_start_index = batch_size_ * index;
std::vector<std::vector<Tensor>> batch_elements;
int input_cardinality = input_->Cardinality();
for (int i = batch_start_index;
i < batch_start_index + batch_size_ && i < input_cardinality; ++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_->Get(ctx, i, &batch_element_tuple));
batch_elements.emplace_back(std::move(batch_element_tuple));
}
TF_RETURN_IF_ERROR(CopyBatch(AnyContext(ctx), std::move(batch_elements),
parallel_copy_, out_tensors));
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size));
Node* drop_remainder = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder));
AttrValue parallel_copy;
b->BuildAttrValue(parallel_copy_, ¶llel_copy);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, batch_size, drop_remainder},
{{kParallelCopy, parallel_copy}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
tsl::mutex_lock l(mu_);
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::vector<std::vector<Tensor>> batch_elements;
{
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
batch_elements.reserve(dataset()->reserve_size_);
*end_of_sequence = false;
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence; ++i) {
std::vector<Tensor> batch_element_tuple;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
&batch_element_tuple,
end_of_sequence));
if (!*end_of_sequence) {
batch_elements.emplace_back(std::move(batch_element_tuple));
} else {
input_impl_.reset();
}
}
ctx_with_index_mapper.MergeCheckpoint();
}
if (batch_elements.empty()) {
DCHECK(*end_of_sequence);
return absl::OkStatus();
}
if (dataset()->drop_remainder_ &&
batch_elements.size() < dataset()->batch_size_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CopyBatch(AnyContext(ctx), std::move(batch_elements),
dataset()->parallel_copy_, out_tensors));
*end_of_sequence = false;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t batch_size = dataset()->batch_size_;
return [parent_index_mapper,
batch_size](size_t element_position) -> absl::StatusOr<size_t> {
size_t batch_element_position = element_position / batch_size;
size_t input_element_offset = element_position % batch_size;
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(batch_element_position));
return shuffled_element_position * batch_size + input_element_offset;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args), dataset()->batch_size_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
IteratorContext::Params params(ctx);
params.restored_element_count =
*ctx->restored_element_count() * dataset()->batch_size_;
IteratorContext ctx_copy(params);
return RestoreInput(&ctx_copy, reader, input_impl_);
}
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t batch_size_;
const int64_t reserve_size_;
const bool drop_remainder_;
const bool parallel_copy_;
const DatasetBase* const input_;
const int op_version_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_;
const TraceMeMetadata traceme_metadata_;
};
BatchDatasetOp::BatchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->def().op() == kBatchDataset ? 1 : 2) {
if (ctx->HasAttr(kParallelCopy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, ¶llel_copy_));
}
}
void BatchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t batch_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size));
OP_REQUIRES(ctx, batch_size > 0,
errors::InvalidArgument("Batch size must be greater than zero."));
bool drop_remainder = false;
if (op_version_ > 1) {
OP_REQUIRES_OK(
ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder));
}
*output = new Dataset(ctx, batch_size, drop_remainder, parallel_copy_, input,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("BatchDataset").Device(DEVICE_CPU),
BatchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("BatchDatasetV2").Device(DEVICE_CPU),
BatchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/batch_dataset_op.h"
#include <string>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "batch_dataset";
class BatchDatasetOpTest : public DatasetOpsTestBase {};
BatchDatasetParams BatchDatasetParams1() {
return BatchDatasetParams(RangeDatasetParams(0, 12, 1),
4,
false,
true,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams2() {
return BatchDatasetParams(RangeDatasetParams(0, 12, 1),
4,
true,
false,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams3() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
3,
false,
false,
{DT_INT64},
{PartialTensorShape({-1})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams4() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
3,
true,
true,
{DT_INT64},
{PartialTensorShape({3})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams5() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
12,
true,
true,
{DT_INT64},
{PartialTensorShape({12})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams6() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
12,
false,
true,
{DT_INT64},
{PartialTensorShape({-1})},
kNodeName);
}
BatchDatasetParams BatchDatasetParams7() {
return BatchDatasetParams(RangeDatasetParams(0, 0, 1),
4,
false,
false,
{DT_INT64},
{PartialTensorShape({4})},
kNodeName);
}
BatchDatasetParams InvalidBatchSizeBatchDatasetParams() {
return BatchDatasetParams(RangeDatasetParams(0, 10, 1),
-1,
false,
false,
{DT_INT64},
{PartialTensorShape({3})},
kNodeName);
}
std::vector<GetNextTestCase<BatchDatasetParams>> GetNextTestCases() {
return {{BatchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{BatchDatasetParams4(),
CreateTensors<int64_t>(TensorShape({3}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{BatchDatasetParams5(),
{}},
{BatchDatasetParams6(),
CreateTensors<int64_t>(TensorShape({10}),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}})},
{BatchDatasetParams7(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
GetNextTestCases())
TEST_F(BatchDatasetOpTest, DatasetNodeName) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(batch_dataset_params.node_name()));
}
TEST_F(BatchDatasetOpTest, DatasetTypeString) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
name_utils::OpNameParams params;
params.op_version = batch_dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(BatchDatasetOp::kDatasetType, params)));
}
TEST_F(BatchDatasetOpTest, DatasetOutputDtypes) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<BatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{BatchDatasetParams1(),
{PartialTensorShape({4})}},
{BatchDatasetParams2(),
{PartialTensorShape({4})}},
{BatchDatasetParams3(),
{PartialTensorShape({-1})}},
{BatchDatasetParams4(),
{PartialTensorShape({3})}},
{BatchDatasetParams5(),
{PartialTensorShape({12})}},
{BatchDatasetParams6(),
{PartialTensorShape({-1})}},
{BatchDatasetParams7(),
{PartialTensorShape({4})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<BatchDatasetParams>> CardinalityTestCases() {
return {
{BatchDatasetParams1(), 3},
{BatchDatasetParams2(), 3},
{BatchDatasetParams3(), 4},
{BatchDatasetParams4(), 3},
{BatchDatasetParams5(), 0},
{BatchDatasetParams6(), 1},
{BatchDatasetParams7(), 0}};
}
DATASET_CARDINALITY_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
CardinalityTestCases())
TEST_F(BatchDatasetOpTest, IteratorOutputDtypes) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<BatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{BatchDatasetParams1(),
{PartialTensorShape({4})}},
{BatchDatasetParams2(),
{PartialTensorShape({4})}},
{BatchDatasetParams3(),
{PartialTensorShape({-1})}},
{BatchDatasetParams4(),
{PartialTensorShape({3})}},
{BatchDatasetParams5(),
{PartialTensorShape({12})}},
{BatchDatasetParams6(),
{PartialTensorShape({-1})}},
{BatchDatasetParams7(),
{PartialTensorShape({4})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(BatchDatasetOpTest, IteratorOutputPrefix) {
auto batch_dataset_params = BatchDatasetParams1();
TF_ASSERT_OK(Initialize(batch_dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = batch_dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
BatchDatasetOp::kDatasetType, batch_dataset_params.iterator_prefix(),
params)));
}
std::vector<IteratorSaveAndRestoreTestCase<BatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{BatchDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(
TensorShape({4}), {{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}})},
{BatchDatasetParams3(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8}),
CreateTensor<int64_t>(TensorShape({1}), {9})}},
{BatchDatasetParams4(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({3}), {0, 1, 2}),
CreateTensor<int64_t>(TensorShape({3}), {3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3}), {6, 7, 8})}},
{BatchDatasetParams5(),
{0, 1, 5},
{}},
{BatchDatasetParams6(),
{0, 1, 5},
{CreateTensor<int64_t>(TensorShape({10}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})}},
{BatchDatasetParams7(),
{0, 1, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(BatchDatasetOpTest, BatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(BatchDatasetOpTest, InvalidBatchSize) {
auto batch_dataset_params = InvalidBatchSizeBatchDatasetParams();
EXPECT_EQ(Initialize(batch_dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
REGISTER_OP("BatchDatasetOpTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"));
static void add_identity_nodes(Node* node, Graph& graph,
std::vector<Node*>& identity_nodes) {
for (int i = 0; i < node->num_outputs(); i++) {
Node* new_node;
std::string name = absl::StrCat("Identity", i);
TF_EXPECT_OK(NodeBuilder(name, "Identity")
.Attr("T", node->output_type(i))
.Input(node, i)
.Finalize(&graph, &new_node));
identity_nodes.push_back(new_node);
}
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST(BatchDatsetOpTest, TypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* batch_size;
Node* drop_remainder;
Node* batch_dataset_v2;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(NodeBuilder("batch_size", "BatchDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &batch_size));
TF_EXPECT_OK(NodeBuilder("drop_remainder", "BatchDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_BOOL)
.Finalize(&graph, &drop_remainder));
TF_EXPECT_OK(NodeBuilder("BatchDatasetV2", "BatchDatasetV2")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(batch_size)
.Input(drop_remainder)
.Finalize(&graph, &batch_dataset_v2));
std::vector<Node*> identity_nodes;
add_identity_nodes(batch_dataset_v2, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
}
}
} |
1,559 | cpp | tensorflow/tensorflow | map_dataset_op | tensorflow/core/kernels/data/map_dataset_op.cc | tensorflow/core/kernels/data/map_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_MAP_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_MAP_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class MapDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Map";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kUseInterOpParallelism =
"use_inter_op_parallelism";
static constexpr const char* const kPreserveCardinality =
"preserve_cardinality";
static constexpr const char* const kForceSynchronous = "force_synchronous";
explicit MapDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
bool preserve_cardinality_;
bool force_synchronous_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/map_dataset_op.h"
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
namespace data {
constexpr const char* const MapDatasetOp::kDatasetType;
constexpr const char* const MapDatasetOp::kInputDataset;
constexpr const char* const MapDatasetOp::kOtherArguments;
constexpr const char* const MapDatasetOp::kFunc;
constexpr const char* const MapDatasetOp::kTarguments;
constexpr const char* const MapDatasetOp::kOutputTypes;
constexpr const char* const MapDatasetOp::kOutputShapes;
constexpr const char* const MapDatasetOp::kUseInterOpParallelism;
constexpr const char* const MapDatasetOp::kPreserveCardinality;
constexpr const char* const MapDatasetOp::kForceSynchronous;
class MapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
bool preserve_cardinality, bool force_synchronous)
: DatasetBase(DatasetContext(ctx)),
input_(input),
preserve_cardinality_(preserve_cardinality),
force_synchronous_(force_synchronous),
captured_func_(std::move(captured_func)),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (preserve_cardinality_) {
return input_->Cardinality(options);
} else {
return kUnknownCardinality;
}
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(input_->Get(ctx, index, &args));
if (!instantiated_captured_func_) {
TF_RETURN_IF_ERROR(
captured_func_->Instantiate(InstantiateCapturedFunctionParams(ctx),
&instantiated_captured_func_));
}
return instantiated_captured_func_->RunInstantiated(args, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f_attr;
b->BuildAttrValue(captured_func_->func(), &f_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
AttrValue use_inter_op_parallelism_attr;
b->BuildAttrValue(captured_func_->use_inter_op_parallelism(),
&use_inter_op_parallelism_attr);
AttrValue preserve_cardinality_attr;
b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);
AttrValue force_synchronous_attr;
b->BuildAttrValue(force_synchronous_, &force_synchronous_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {std::make_pair(0, input_graph_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f_attr),
std::make_pair(kTarguments, other_arguments_types_attr),
std::make_pair(kUseInterOpParallelism, use_inter_op_parallelism_attr),
std::make_pair(kPreserveCardinality, preserve_cardinality_attr),
std::make_pair(kForceSynchronous, force_synchronous_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &args, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
Status s = instantiated_captured_func_->Run(ctx, std::move(args),
out_tensors, model_node());
if (errors::IsOutOfRange(s)) {
if (dataset()->preserve_cardinality_) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ", s.message());
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
if (!s.ok()) {
return AddErrorContext(s);
}
return s;
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args), 1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
};
const DatasetBase* const input_;
const bool preserve_cardinality_;
const bool force_synchronous_;
const std::unique_ptr<CapturedFunction> captured_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
mutable std::unique_ptr<InstantiatedCapturedFunction>
instantiated_captured_func_;
absl::Status random_indexing_compatible_;
};
MapDatasetOp::MapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
FunctionMetadata::Params params;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseInterOpParallelism,
¶ms.use_inter_op_parallelism));
OP_REQUIRES_OK(ctx,
FunctionMetadata::Create(ctx, kFunc, params, &func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kPreserveCardinality, &preserve_cardinality_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kForceSynchronous, &force_synchronous_));
}
void MapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output =
new Dataset(ctx, input, std::move(captured_func), output_types_,
output_shapes_, preserve_cardinality_, force_synchronous_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("MapDataset").Device(DEVICE_CPU), MapDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalMapDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle"),
MapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("MapDataset");
}
}
} | #include "tensorflow/core/kernels/data/map_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "map_dataset";
class MapDatasetOpTest : public DatasetOpsTestBase {};
MapDatasetParams MapDatasetParams1() {
auto map_dataset_params_0 = MapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
true,
"map_dataset_0");
return MapDatasetParams(
std::move(map_dataset_params_0),
{},
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
true,
"map_dataset_1");
}
MapDatasetParams MapDatasetParams2() {
auto batch_dataset_params =
BatchDatasetParams(RangeDatasetParams(10, 0, -3),
2,
false,
true,
{DT_INT64},
{PartialTensorShape({2})},
"batch_dataset");
return MapDatasetParams(
std::move(batch_dataset_params),
{},
FunctionDefHelper::FunctionRef("XAddX", {{"T", DT_INT64}}),
{test::function::XAddX()},
{},
{DT_INT64},
{PartialTensorShape({1})},
true,
false,
kNodeName);
}
MapDatasetParams MapDatasetParams3() {
return MapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
FunctionDefHelper::FunctionRef("XTimesFour", {{"T", DT_INT64}}),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
true,
kNodeName);
}
std::vector<GetNextTestCase<MapDatasetParams>> GetNextTestCases() {
return {{MapDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})},
{MapDatasetParams2(),
CreateTensors<int64_t>(TensorShape({2}), {{20, 14}, {8, 2}})},
{MapDatasetParams3(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})}};
}
ITERATOR_GET_NEXT_TEST_P(MapDatasetOpTest, MapDatasetParams, GetNextTestCases())
TEST_F(MapDatasetOpTest, DatasetNodeName) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(MapDatasetOpTest, DatasetTypeString) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(MapDatasetOp::kDatasetType)));
}
TEST_F(MapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(MapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<MapDatasetParams>> CardinalityTestCases() {
return {{MapDatasetParams1(),
4},
{MapDatasetParams2(),
kUnknownCardinality},
{MapDatasetParams3(),
4}};
}
DATASET_CARDINALITY_TEST_P(MapDatasetOpTest, MapDatasetParams,
CardinalityTestCases())
TEST_F(MapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(MapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(MapDatasetOpTest, IteratorPrefix) {
auto dataset_params = MapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
MapDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<MapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{MapDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})},
{MapDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({2}), {{20, 14}, {8, 2}})},
{MapDatasetParams3(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {12}, {24}, {36}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(MapDatasetOpTest, MapDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} |
1,560 | cpp | tensorflow/tensorflow | range_dataset_op | tensorflow/core/kernels/data/range_dataset_op.cc | tensorflow/core/kernels/data/range_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_RANGE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_RANGE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class RangeDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Range";
static constexpr const char* const kStart = "start";
static constexpr const char* const kStop = "stop";
static constexpr const char* const kStep = "step";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kReplicateOnSplit = "replicate_on_split";
explicit RangeDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
class RangeSplitProvider;
DataTypeVector output_types_;
bool replicate_on_split_ = false;
};
}
}
#endif
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include <cstdlib>
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
namespace tensorflow {
namespace data {
constexpr const char* const RangeDatasetOp::kDatasetType;
constexpr const char* const RangeDatasetOp::kStart;
constexpr const char* const RangeDatasetOp::kStop;
constexpr const char* const RangeDatasetOp::kStep;
constexpr const char* const RangeDatasetOp::kOutputTypes;
constexpr const char* const RangeDatasetOp::kOutputShapes;
constexpr const char* const RangeDatasetOp::kReplicateOnSplit;
namespace {
constexpr char kNext[] = "next";
constexpr char kHasSplitProvider[] = "has_split_provider";
constexpr char kSlash[] = "/";
constexpr char kSplitProvider[] = "split_provider";
Status ConvertOutputTypes(const tensorflow::DataTypeVector& output_dtypes,
std::vector<Tensor>* out_tensors, int64 value) {
switch (output_dtypes[0]) {
#define HANDLE_TYPE(type) \
case DataTypeToEnum<type>::value: { \
out_tensors->emplace_back(static_cast<type>(value)); \
break; \
}
TF_CALL_NUMBER_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::InvalidArgument("Unsupported data type: ",
DataTypeString(output_dtypes[0]));
}
return absl::OkStatus();
}
int64_t sgn(int64_t val) { return (0 < val) - (val < 0); }
int64_t RangeCardinality(int64_t start, int64_t stop, int64_t step) {
if (stop >= tsl::kint64max) {
return kInfiniteCardinality;
}
if (sgn(stop - start) * sgn(step) <= 0) {
return 0;
} else if (step > 0) {
return (stop - start - 1) / step + 1;
} else {
return (start - stop - 1) / -step + 1;
}
}
class RangeCounter {
public:
RangeCounter(int64_t start, int64_t stop, int64_t step)
: start_(start), stop_(stop), step_(step), next_(start) {}
int64_t GetNext(bool* end_of_counter) {
mutex_lock l(mu_);
if ((step_ > 0 && next_ >= stop_) || (step_ < 0 && next_ <= stop_)) {
*end_of_counter = true;
return -1;
}
*end_of_counter = false;
int64_t result = next_;
next_ += step_;
return result;
}
int64_t Peek() const {
mutex_lock l(mu_);
return next_;
}
void Reset() {
mutex_lock l(mu_);
next_ = start_;
}
void SetNext(int64_t value) {
mutex_lock l(mu_);
next_ = value;
}
int64_t Cardinality() const { return RangeCardinality(start_, stop_, step_); }
private:
const int64_t start_;
const int64_t stop_;
const int64_t step_;
mutable mutex mu_;
int64_t next_ TF_GUARDED_BY(mu_);
};
}
class RangeDatasetOp::RangeSplitProvider : public SplitProvider {
public:
RangeSplitProvider(int64_t start, int64_t stop, int64_t step)
: counter_(start, stop, step) {}
Status GetNext(Tensor* split, bool* end_of_splits) override {
int64_t next = counter_.GetNext(end_of_splits);
if (*end_of_splits) {
return absl::OkStatus();
}
*split = Tensor(DT_INT64, TensorShape{});
split->scalar<int64_t>()() = next;
return absl::OkStatus();
}
Status Reset() override {
counter_.Reset();
return absl::OkStatus();
}
Status Save(std::function<std::string(std::string)> key_name_fn,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_name_fn(kNext), counter_.Peek()));
return absl::OkStatus();
}
Status Restore(std::function<std::string(std::string)> key_name_fn,
IteratorStateReader* reader) override {
int64_t next;
TF_RETURN_IF_ERROR(reader->ReadScalar(key_name_fn(kNext), &next));
counter_.SetNext(next);
return absl::OkStatus();
}
int64_t Cardinality() const override { return counter_.Cardinality(); }
private:
RangeCounter counter_;
};
class RangeDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t start, int64_t stop, int64_t step,
DataTypeVector output_dtypes, bool replicate_on_split)
: DatasetBase(DatasetContext(ctx)),
start_(start),
stop_(stop),
step_(step),
output_dtypes_(output_dtypes),
replicate_on_split_(replicate_on_split) {}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return output_dtypes_;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({PartialTensorShape({})});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(start_, stop_, step_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return RangeCardinality(start_, stop_, step_);
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<RangeSplitProvider>(start_, stop_, step_));
return absl::OkStatus();
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->clear();
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return ConvertOutputTypes(output_dtypes(), out_tensors,
start_ + (index * step_));
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* start = nullptr;
Node* stop = nullptr;
Node* step = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(start_, &start));
TF_RETURN_IF_ERROR(b->AddScalar(stop_, &stop));
TF_RETURN_IF_ERROR(b->AddScalar(step_, &step));
AttrValue replicate_on_split;
b->BuildAttrValue(replicate_on_split_, &replicate_on_split);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {start, stop, step},
{std::make_pair(kReplicateOnSplit, replicate_on_split)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty() || dataset()->replicate_on_split_) {
counter_ = std::make_unique<RangeCounter>(
dataset()->start_, dataset()->stop_, dataset()->step_);
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
int64_t value;
if (split_provider_ != nullptr) {
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
value = split.scalar<int64_t>()();
} else {
value = counter_->GetNext(end_of_sequence);
if (*end_of_sequence) {
return absl::OkStatus();
}
}
out_tensors->reserve(1);
return ConvertOutputTypes(output_dtypes(), out_tensors, value);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
if (split_provider_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kHasSplitProvider, true));
TF_RETURN_IF_ERROR(split_provider_->Save(
[this](const std::string& key) {
return SplitProviderKeyNameFn(key);
},
writer));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNext, counter_->Peek()));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(ctx);
}
if (reader->Contains(prefix(), kHasSplitProvider)) {
TF_RETURN_IF_ERROR(split_provider_->Restore(
[this](const std::string& key) {
return SplitProviderKeyNameFn(key);
},
reader));
} else {
int64_t next;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNext, &next));
counter_->SetNext(next);
}
return absl::OkStatus();
}
std::string SplitProviderKeyNameFn(const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
}
private:
std::unique_ptr<RangeCounter> counter_;
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const int64_t start_;
const int64_t stop_;
const int64_t step_;
const DataTypeVector output_dtypes_;
const bool replicate_on_split_;
};
RangeDatasetOp::RangeDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
if (ctx->HasAttr(kReplicateOnSplit)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kReplicateOnSplit, &replicate_on_split_));
}
}
void RangeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
int64_t start;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kStart, &start));
int64_t stop;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kStop, &stop));
int64_t step;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kStep, &step));
OP_REQUIRES(ctx, step != 0,
errors::InvalidArgument("step must be a non-zero integer."));
*output =
new Dataset(ctx, start, stop, step, output_types_, replicate_on_split_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RangeDataset").Device(DEVICE_CPU),
RangeDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
class RangeDatasetOpTest : public DatasetOpsTestBase {};
RangeDatasetParams PositiveStepRangeDatasetParams() {
return RangeDatasetParams(0, 10, 3);
}
RangeDatasetParams NegativeStepRangeDatasetParams() {
return RangeDatasetParams(10, 0, -3);
}
RangeDatasetParams Int32OverflowRangeDatasetParames() {
return RangeDatasetParams(2'147'483'647LL, 2'147'483'649LL,
1);
}
RangeDatasetParams UnsignedInt32OverflowRangeDatasetParames() {
return RangeDatasetParams(4'294'967'295LL, 4'294'967'297LL,
1);
}
RangeDatasetParams ZeroStepRangeDatasetParams() {
return RangeDatasetParams(10, 0, 0);
}
RangeDatasetParams RangeDatasetParams1() {
return RangeDatasetParams(0, 10, 3,
{DT_INT32});
}
RangeDatasetParams RangeDatasetParams2() {
return RangeDatasetParams(0, 10, 3,
{DT_INT64});
}
std::vector<GetNextTestCase<RangeDatasetParams>> GetNextTestCases() {
return {{PositiveStepRangeDatasetParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{NegativeStepRangeDatasetParams(),
CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})},
{Int32OverflowRangeDatasetParames(),
CreateTensors<int64_t>(TensorShape({}),
{{2'147'483'647LL}, {2'147'483'648LL}})},
{UnsignedInt32OverflowRangeDatasetParames(),
CreateTensors<int64_t>(TensorShape({}),
{{4'294'967'295LL}, {4'294'967'296LL}})}};
}
ITERATOR_GET_NEXT_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
GetNextTestCases())
TEST_F(RangeDatasetOpTest, DatasetNodeName) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(range_dataset_params.node_name()));
}
TEST_F(RangeDatasetOpTest, DatasetTypeString) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(RangeDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<RangeDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{RangeDatasetParams1(),
{DT_INT32}},
{RangeDatasetParams2(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
DatasetOutputDtypesTestCases())
TEST_F(RangeDatasetOpTest, DatasetOutputShapes) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<RangeDatasetParams>> CardinalityTestCases() {
return {{PositiveStepRangeDatasetParams(),
4},
{NegativeStepRangeDatasetParams(),
4}};
}
DATASET_CARDINALITY_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<RangeDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{RangeDatasetParams1(),
{DT_INT32}},
{RangeDatasetParams2(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
IteratorOutputDtypesTestCases())
TEST_F(RangeDatasetOpTest, IteratorOutputShapes) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(RangeDatasetOpTest, IteratorPrefix) {
auto range_dataset_params = PositiveStepRangeDatasetParams();
TF_ASSERT_OK(Initialize(range_dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
RangeDatasetOp::kDatasetType, range_dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<RangeDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{PositiveStepRangeDatasetParams(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{NegativeStepRangeDatasetParams(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(RangeDatasetOpTest, RangeDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(RangeDatasetOpTest, ZeroStep) {
auto range_dataset_params = ZeroStepRangeDatasetParams();
EXPECT_EQ(Initialize(range_dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(RangeDatasetOpTest, SplitProviderPositiveStep) {
auto params = RangeDatasetParams(0, 10, 3,
{DT_INT64});
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 2, 1,
CreateTensors<int64_t>(TensorShape({}), {{3}, {9}})));
}
TEST_F(RangeDatasetOpTest, SplitProviderNegativeStep) {
auto params = RangeDatasetParams(10, 0, -3,
{DT_INT64});
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 2, 0,
CreateTensors<int64_t>(TensorShape({}), {{10}, {4}})));
}
TEST_F(RangeDatasetOpTest, SplitProviderEmpty) {
auto params = RangeDatasetParams(0, 0, 1,
{DT_INT64});
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 2,
CreateTensors<int64_t>(TensorShape({}), {})));
}
}
}
} |
1,561 | cpp | tensorflow/tensorflow | map_defun_op | tensorflow/core/kernels/data/map_defun_op.cc | tensorflow/core/kernels/data/map_defun_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_MAP_DEFUN_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_MAP_DEFUN_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class MapDefunOp : public AsyncOpKernel {
public:
static constexpr const char* const kArguments = "arguments";
static constexpr const char* const kCapturedInputs = "captured_inputs";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kTcaptured = "Tcaptured";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kMaxIntraOpParallelism =
"max_intra_op_parallelism";
explicit MapDefunOp(OpKernelConstruction* ctx);
~MapDefunOp() override = default;
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override;
private:
struct ComputeOptions;
class MapFunctionCallFrame;
void SetRunOptions(OpKernelContext* ctx,
FunctionLibraryRuntime::Options* opts,
ComputeOptions* compute_opts, bool always_collect_stats);
Status SetupArgs(OpKernelContext* ctx, ComputeOptions** compute_opts);
Status SetupOutputs(OpKernelContext* ctx, ComputeOptions* opts);
FunctionLibraryRuntime::Handle func_handle_;
std::vector<PartialTensorShape> output_shapes_;
int max_intra_op_parallelism_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/map_defun_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/batch_util.h"
#include "tensorflow/core/util/reffed_status_callback.h"
namespace tensorflow {
namespace data {
constexpr const char* const MapDefunOp::kArguments;
constexpr const char* const MapDefunOp::kCapturedInputs;
constexpr const char* const MapDefunOp::kTarguments;
constexpr const char* const MapDefunOp::kTcaptured;
constexpr const char* const MapDefunOp::kOutputTypes;
constexpr const char* const MapDefunOp::kOutputShapes;
constexpr const char* const MapDefunOp::kFunc;
constexpr const char* const MapDefunOp::kMaxIntraOpParallelism;
constexpr char kOutput[] = "output";
struct MapDefunOp::ComputeOptions {
OpInputList args;
const std::vector<TensorShape> arg_shapes;
OpInputList captured_inputs;
const int64_t batch_size;
std::function<void(std::function<void()>)> runner;
std::vector<PartialTensorShape> output_shapes TF_GUARDED_BY(mu);
OpOutputList output TF_GUARDED_BY(mu);
mutex mu;
ComputeOptions(OpKernelContext* ctx, OpInputList args,
OpInputList captured_inputs,
std::vector<TensorShape> arg_shapes, int64_t batch_size,
const std::vector<PartialTensorShape>& output_shapes_attr,
int max_parallelism)
: args(args),
arg_shapes(std::move(arg_shapes)),
captured_inputs(captured_inputs),
batch_size(batch_size),
output_shapes(output_shapes_attr) {
if (max_parallelism >= 1) {
runner = RunnerWithMaxParallelism(*ctx->runner(), max_parallelism);
}
}
};
class MapDefunOp::MapFunctionCallFrame : public CallFrameInterface {
public:
MapFunctionCallFrame(ComputeOptions* compute_opts, OpKernel* kernel,
size_t iter)
: compute_opts_(compute_opts),
kernel_(kernel),
iter_(iter),
sliced_args_(compute_opts_->args.size()) {}
~MapFunctionCallFrame() override = default;
size_t num_args() const override {
return compute_opts_->args.size() + compute_opts_->captured_inputs.size();
}
size_t num_retvals() const override {
return static_cast<size_t>(kernel_->num_outputs());
}
Status GetArg(int index, const Tensor** val) override {
if (index < 0 || index >= compute_opts_->args.size() +
compute_opts_->captured_inputs.size()) {
return errors::InvalidArgument("Mismatch in number of function inputs.");
}
if (index >= compute_opts_->args.size()) {
*val =
&compute_opts_->captured_inputs[index - compute_opts_->args.size()];
return absl::OkStatus();
}
mutex_lock l(mu_);
bool result = sliced_args_[index].CopyFrom(
compute_opts_->args[index].Slice(iter_, iter_ + 1),
compute_opts_->arg_shapes.at(index));
if (!result) {
return errors::Internal("GetArg failed.");
} else if (!sliced_args_[index].IsAligned()) {
sliced_args_[index] = tensor::DeepCopy(sliced_args_[index]);
}
*val = &sliced_args_[index];
return absl::OkStatus();
}
Status SetRetval(int index, const Tensor& val) override {
if (index < 0 || index >= kernel_->num_outputs()) {
return errors::InvalidArgument("Mismatch in number of function outputs.");
}
if (val.dtype() != kernel_->output_type(index)) {
return errors::InvalidArgument(
"Mismatch in function return type and expected output type for "
"output: ",
index);
}
Tensor* out;
{
mutex_lock l(compute_opts_->mu);
if (!compute_opts_->output_shapes.at(index).IsCompatibleWith(
val.shape())) {
return errors::InvalidArgument(
"Mismatch in function retval shape, ", val.shape(),
", and expected output shape, ",
compute_opts_->output_shapes.at(index).DebugString(), ".");
}
if (!compute_opts_->output_shapes.at(index).IsFullyDefined()) {
compute_opts_->output_shapes.at(index) = val.shape();
TensorShape actual_shape = val.shape();
actual_shape.InsertDim(0, compute_opts_->batch_size);
TF_RETURN_IF_ERROR(
compute_opts_->output.allocate(index, actual_shape, &out));
} else {
out = (compute_opts_->output)[index];
}
}
return batch_util::CopyElementToSlice(val, out, iter_);
}
private:
ComputeOptions* const compute_opts_;
const OpKernel* kernel_;
const size_t iter_;
mutex mu_;
std::vector<Tensor> sliced_args_ TF_GUARDED_BY(mu_);
};
MapDefunOp::MapDefunOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx) {
auto func_lib = ctx->function_library();
OP_REQUIRES(ctx, func_lib != nullptr,
errors::Internal("No function library."));
const NameAttrList* func;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kFunc, &func));
OP_REQUIRES_OK(ctx,
func_lib->Instantiate(func->name(), AttrSlice(&func->attr()),
&func_handle_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kMaxIntraOpParallelism, &max_intra_op_parallelism_));
OP_REQUIRES(ctx, ctx->num_inputs() >= 0,
errors::InvalidArgument("Must have at least one input."));
OP_REQUIRES(ctx, ctx->num_outputs() >= 0,
errors::InvalidArgument("Must have at least one output."));
OP_REQUIRES(ctx, ctx->num_outputs() == output_shapes_.size(),
errors::InvalidArgument(
"Length of output_shapes and output_types must match."));
}
void MapDefunOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) {
ComputeOptions* compute_opts = nullptr;
OP_REQUIRES_OK_ASYNC(ctx, SetupArgs(ctx, &compute_opts), done);
Status s = SetupOutputs(ctx, compute_opts);
if (!s.ok()) delete compute_opts;
OP_REQUIRES_OK_ASYNC(ctx, s, done);
FunctionLibraryRuntime::Options opts;
SetRunOptions(ctx, &opts, compute_opts, false);
StatusCallback callback = std::bind(
[](OpKernelContext* ctx, ComputeOptions* compute_opts, DoneCallback& done,
const Status& status) {
delete compute_opts;
ctx->SetStatus(status);
done();
},
ctx, compute_opts, std::move(done), std::placeholders::_1);
auto* refcounted = new ReffedStatusCallback(std::move(callback));
CancellationManager* parent_mgr = ctx->cancellation_manager();
for (size_t i = 0; i < static_cast<size_t>(compute_opts->batch_size); ++i) {
CancellationManager* c_mgr = new CancellationManager(parent_mgr);
opts.cancellation_manager = c_mgr;
auto* call_frame = new MapFunctionCallFrame(compute_opts, this, i);
refcounted->Ref();
ctx->function_library()->Run(
opts, func_handle_, call_frame,
[call_frame, refcounted, c_mgr](const Status& func_status) {
delete c_mgr;
delete call_frame;
refcounted->UpdateStatus(func_status);
refcounted->Unref();
});
}
refcounted->Unref();
}
void MapDefunOp::SetRunOptions(OpKernelContext* ctx,
FunctionLibraryRuntime::Options* opts,
ComputeOptions* compute_opts,
bool always_collect_stats) {
opts->rendezvous = ctx->rendezvous();
if (always_collect_stats) {
opts->stats_collector = ctx->stats_collector();
}
if (max_intra_op_parallelism_ >= 1) {
opts->runner = &compute_opts->runner;
} else {
opts->runner = ctx->runner();
}
opts->run_all_kernels_inline = ctx->run_all_kernels_inline();
}
Status MapDefunOp::SetupArgs(OpKernelContext* ctx,
ComputeOptions** compute_opts) {
OpInputList arguments;
TF_RETURN_IF_ERROR(ctx->input_list(kArguments, &arguments));
OpInputList captured_inputs;
TF_RETURN_IF_ERROR(ctx->input_list(kCapturedInputs, &captured_inputs));
int64_t batch_size = arguments[0].dims() > 0 ? arguments[0].dim_size(0) : -1;
for (size_t i = 0; i < arguments.size(); ++i) {
if (arguments[i].dims() == 0) {
return errors::InvalidArgument(
"All inputs must have rank at least 1. Input ", i,
" has a rank of 0.");
} else if (arguments[i].dim_size(0) != batch_size) {
return errors::InvalidArgument(
"All inputs must have the same dimension 0. Input ", i,
" has leading dimension ", ctx->input(i).dim_size(0),
", while all previous inputs have leading dimension ", batch_size);
}
}
std::vector<TensorShape> arg_shapes;
arg_shapes.reserve(arguments.size());
for (size_t i = 0; i < arguments.size(); ++i) {
arg_shapes.push_back(arguments[i].shape());
arg_shapes.at(i).RemoveDim(0);
}
*compute_opts =
new ComputeOptions(ctx, arguments, captured_inputs, std::move(arg_shapes),
batch_size, output_shapes_, max_intra_op_parallelism_);
return absl::OkStatus();
}
Status MapDefunOp::SetupOutputs(OpKernelContext* ctx, ComputeOptions* opts) {
mutex_lock l(opts->mu);
TF_RETURN_IF_ERROR(ctx->output_list(kOutput, &opts->output));
for (size_t i = 0; i < output_types().size(); ++i) {
if (output_shapes_.at(i).IsFullyDefined()) {
Tensor* out = nullptr;
TensorShape output_shape;
output_shapes_.at(i).AsTensorShape(&output_shape);
output_shape.InsertDim(0, opts->batch_size);
TF_RETURN_IF_ERROR(opts->output.allocate(i, output_shape, &out));
}
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("MapDefun").Device(DEVICE_CPU), MapDefunOp);
}
}
} | #include "tensorflow/core/kernels/data/map_defun_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "map_defun";
constexpr char kOpName[] = "MapDefun";
class MapDefunOpParams : public DatasetParams {
public:
MapDefunOpParams(std::vector<Tensor> arguments,
std::vector<Tensor> captured_inputs,
DataTypeVector type_arguments, DataTypeVector type_captured,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
int max_intra_op_parallelism, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
arguments_(std::move(arguments)),
captured_inputs_(std::move(captured_inputs)),
type_arguments_(std::move(type_arguments)),
type_captured_(std::move(type_captured)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
max_intra_op_parallelism_(max_intra_op_parallelism) {}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = arguments_;
input_tensors.insert(input_tensors.end(), captured_inputs_.begin(),
captured_inputs_.end());
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(arguments_.size() + captured_inputs_.size());
for (int i = 0; i < arguments_.size(); ++i) {
input_names->emplace_back(
strings::StrCat(MapDefunOp::kArguments, "_", i));
}
for (int i = 0; i < captured_inputs_.size(); ++i) {
input_names->emplace_back(
strings::StrCat(MapDefunOp::kCapturedInputs, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{MapDefunOp::kTarguments, type_arguments_},
{MapDefunOp::kTcaptured, type_captured_},
{MapDefunOp::kOutputShapes, output_shapes_},
{MapDefunOp::kOutputTypes, output_dtypes_},
{MapDefunOp::kFunc, func_},
{MapDefunOp::kMaxIntraOpParallelism, max_intra_op_parallelism_}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override { return "MapDef"; }
private:
std::vector<Tensor> arguments_;
std::vector<Tensor> captured_inputs_;
DataTypeVector type_arguments_;
DataTypeVector type_captured_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
int max_intra_op_parallelism_;
};
class MapDefunOpTest : public DatasetOpsTestBase {
protected:
Status CreateMapDefunOpKernel(const MapDefunOpParams& params,
std::unique_ptr<OpKernel>* map_defun_kernel) {
std::vector<string> input_namess;
TF_RETURN_IF_ERROR(params.GetInputNames(&input_namess));
AttributeVector attributes;
TF_RETURN_IF_ERROR(params.GetAttributes(&attributes));
NodeDef node_def =
test::function::NDef(kNodeName, kOpName, input_namess, attributes);
TF_RETURN_IF_ERROR(CreateOpKernel(node_def, map_defun_kernel));
return absl::OkStatus();
}
Status CreateMapDefunContext(OpKernel* const op_kernel,
gtl::InlinedVector<TensorValue, 4>* const inputs,
std::unique_ptr<OpKernelContext>* context) {
TF_RETURN_IF_ERROR(CheckOpKernelInput(*op_kernel, *inputs));
TF_RETURN_IF_ERROR(CreateOpKernelContext(op_kernel, inputs, context));
return absl::OkStatus();
}
};
struct TestCase {
MapDefunOpParams map_defun_op_params;
std::vector<Tensor> expected_outputs;
};
TestCase TestCase1() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{},
{DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}})},
{test::function::XTimesTwo()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}), {0, 2, 4, 6, 8, 10})}};
}
TestCase TestCase2() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5}),
CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 10, 20, 30, 40, 50})},
{},
{DT_INT64, DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}), {0, 11, 22, 33, 44, 55})}};
}
TestCase TestCase3() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{CreateTensor<int64_t>(TensorShape({3, 2}),
{10, 101, 12, 103, 14, 105})}};
}
TestCase InvalidOutputTypes() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_FLOAT},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
TestCase InvalidOutputShapes() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5})},
{CreateTensor<int64_t>(TensorShape({2}), {10, 100})},
{DT_INT64},
{DT_INT64},
{DT_INT64},
{PartialTensorShape({2, 2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
TestCase InvalidInputs() {
return {
MapDefunOpParams(
{CreateTensor<int64_t>(TensorShape({3, 2}),
{0, 1, 2, 3, 4, 5}),
CreateTensor<int64_t>(TensorShape({2, 2}),
{0, 1, 2, 3})},
{},
{DT_INT64, DT_INT64},
{},
{DT_INT64},
{PartialTensorShape({2})},
{FunctionDefHelper::FunctionRef("XAddY", {{"T", DT_INT64}})},
{test::function::XAddY()},
2, kNodeName),
{}};
}
class ParameterizedMapDefunOpTest
: public MapDefunOpTest,
public ::testing::WithParamInterface<TestCase> {};
TEST_P(ParameterizedMapDefunOpTest, NormalTests) {
TestCase test_case = GetParam();
TF_ASSERT_OK(InitializeRuntime(test_case.map_defun_op_params));
auto input_tensors = test_case.map_defun_op_params.GetInputTensors();
gtl::InlinedVector<TensorValue, 4> input_values;
for (auto& input : input_tensors) {
input_values.push_back(TensorValue(&input));
}
std::unique_ptr<OpKernel> map_defun_kernel;
TF_ASSERT_OK(
CreateMapDefunOpKernel(test_case.map_defun_op_params, &map_defun_kernel));
std::unique_ptr<OpKernelContext> context;
TF_ASSERT_OK(
CreateMapDefunContext(map_defun_kernel.get(), &input_values, &context));
TF_ASSERT_OK(RunOpKernel(map_defun_kernel.get(), context.get()));
EXPECT_EQ(context->num_outputs(), test_case.expected_outputs.size());
for (int i = 0; i < context->num_outputs(); ++i) {
TF_EXPECT_OK(ExpectEqual(*context->mutable_output(i),
test_case.expected_outputs[i]));
}
}
INSTANTIATE_TEST_SUITE_P(MapDefunOpTest, ParameterizedMapDefunOpTest,
::testing::ValuesIn(std::vector<TestCase>(
{TestCase1(), TestCase2(), TestCase3()})));
TEST_F(MapDefunOpTest, InvalidArguments) {
std::vector<TestCase> test_cases = {InvalidOutputTypes(),
InvalidOutputShapes(), InvalidInputs()};
for (auto& test_case : test_cases) {
TF_ASSERT_OK(InitializeRuntime(test_case.map_defun_op_params));
auto input_tensors = test_case.map_defun_op_params.GetInputTensors();
gtl::InlinedVector<TensorValue, 4> input_values;
for (auto& input : input_tensors) {
input_values.push_back(TensorValue(&input));
}
std::unique_ptr<OpKernel> map_defun_kernel;
TF_ASSERT_OK(CreateMapDefunOpKernel(test_case.map_defun_op_params,
&map_defun_kernel));
std::unique_ptr<OpKernelContext> context;
TF_ASSERT_OK(
CreateMapDefunContext(map_defun_kernel.get(), &input_values, &context));
EXPECT_EQ(RunOpKernel(map_defun_kernel.get(), context.get()).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,562 | cpp | tensorflow/tensorflow | shuffle_dataset_op | tensorflow/core/kernels/data/shuffle_dataset_op.cc | tensorflow/core/kernels/data/shuffle_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_SHUFFLE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_SHUFFLE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ShuffleDatasetOpBase : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kBufferSize = "buffer_size";
static constexpr const char* const kSeed = "seed";
static constexpr const char* const kSeed2 = "seed2";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kReshuffleEachIteration =
"reshuffle_each_iteration";
explicit ShuffleDatasetOpBase(OpKernelConstruction* ctx);
protected:
class ShuffleDatasetBase;
};
class ShuffleDatasetOp : public ShuffleDatasetOpBase {
public:
static constexpr const char* const kDatasetType = "Shuffle";
explicit ShuffleDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
class DatasetV2;
class DatasetV3;
int op_version_ = 0;
bool reshuffle_each_iteration_ = true;
};
class ShuffleAndRepeatDatasetOp : public ShuffleDatasetOpBase {
public:
static constexpr const char* const kDatasetType = "ShuffleAndRepeat";
static constexpr const char* const kCount = "count";
explicit ShuffleAndRepeatDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
class DatasetV2;
int op_version_ = 0;
bool reshuffle_each_iteration_ = true;
};
}
}
#endif
#include "tensorflow/core/kernels/data/shuffle_dataset_op.h"
#include <cstdint>
#include <deque>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShuffleDatasetOpBase::kInputDataset;
constexpr const char* const ShuffleDatasetOpBase::kBufferSize;
constexpr const char* const ShuffleDatasetOpBase::kSeed;
constexpr const char* const ShuffleDatasetOpBase::kSeed2;
constexpr const char* const ShuffleDatasetOpBase::kOutputTypes;
constexpr const char* const ShuffleDatasetOpBase::kOutputShapes;
constexpr const char* const
ShuffleDatasetOpBase::kReshuffleEachIteration;
constexpr const char* const ShuffleDatasetOp::kDatasetType;
constexpr const char* const
ShuffleAndRepeatDatasetOp::kDatasetType;
constexpr const char* const ShuffleAndRepeatDatasetOp::kCount;
const int64_t kLogIntervalMicros = 10 * 1000000;
const int64_t kMaxEpochsInBuffer = 3;
constexpr char kNumRandomSamples[] = "num_random_samples";
constexpr char kDataProduced[] = "data_produced";
constexpr char kEndOfInputSequence[] = "end_of_input_sequence";
constexpr char kEpoch[] = "epoch";
constexpr char kNumElements[] = "num_elements";
constexpr char kSlicesSize[] = "slices_size";
constexpr char kSlicesStart[] = "slices_start";
constexpr char kSlicesEnd[] = "slices_end";
constexpr char kSlicesReachedEndOfSequence[] = "slices_reached_end_of_sequence";
constexpr char kSeedGenerator[] = "SeedGenerator";
constexpr char kEpochNumRandomSamples[] = "epoch_num_random_samples";
constexpr char kShuffleDatasetV1[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3";
constexpr char kShuffleAndRepeatDatasetV1[] = "ShuffleAndRepeatDataset";
constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2";
ShuffleDatasetOpBase::ShuffleDatasetOpBase(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
class ShuffleDatasetOpBase::ShuffleDatasetBase : public DatasetBase {
public:
ShuffleDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
int64_t buffer_size,
std::shared_ptr<SeedGenerator> seed_generator,
int64_t count)
: DatasetBase(DatasetContext(ctx)),
input_(input),
buffer_size_(buffer_size),
seed_generator_(std::move(seed_generator)),
count_(count),
traceme_metadata_(
{{"buffer_size",
strings::Printf("%lld", static_cast<long long>(buffer_size))}}) {
input_->Ref();
}
~ShuffleDatasetBase() override { input_->Unref(); }
virtual string op_type() const = 0;
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (count_ == -1 || input_->Cardinality(options) == kInfiniteCardinality) {
return kInfiniteCardinality;
} else if (input_->Cardinality(options) == kUnknownCardinality) {
return kUnknownCardinality;
} else {
return input_->Cardinality(options) * count_;
}
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
{
mutex_lock l(mu_);
if (shuffled_indices_.empty()) {
InitializeRandomAccessIndices();
}
}
int64 shuffled_index;
{
tf_shared_lock l(mu_);
shuffled_index = shuffled_indices_[index];
}
TF_RETURN_IF_ERROR(input_->Get(ctx, shuffled_index, out_tensors));
return absl::OkStatus();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(buffer_size_, seed_generator_->seed(),
seed_generator_->seed2(), count_);
return name_utils::DatasetDebugString(op_type(), params);
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, name_utils::IteratorPrefix(op_type(), prefix)},
seed_generator_.get());
}
void InitializeRandomAccessIndices() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const int64 cardinality = Cardinality();
shuffled_indices_ = std::vector<std::int64_t>(cardinality);
std::iota(shuffled_indices_.begin(), shuffled_indices_.end(), 0);
int64_t shuffled_index = 0;
random::PhiloxRandom parent_generator =
random::PhiloxRandom(seed_generator_->seed(), seed_generator_->seed2());
random::SingleSampleAdapter<random::PhiloxRandom> generator =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator);
while (shuffled_index < cardinality) {
int64_t offset = generator() % (cardinality - shuffled_index);
std::swap(shuffled_indices_[shuffled_index + offset],
shuffled_indices_[shuffled_index]);
shuffled_index += 1;
}
}
protected:
class Iterator : public DatasetIterator<ShuffleDatasetBase> {
public:
explicit Iterator(const Params& params, SeedGenerator* seed_generator)
: DatasetIterator<ShuffleDatasetBase>(params),
seed_generator_(seed_generator),
parent_generator_(seed_generator->seed(), seed_generator->seed2()),
generator_(&parent_generator_) {
if (params.dataset->buffer_size_ == kUnknownCardinality) {
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>();
} else {
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>(
params.dataset->buffer_size_);
}
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
if (ctx->symbolic_checkpoint()) {
for (int64_t i = 0; i < buffer_->size(); ++i) {
checkpoint_indices_.insert(i);
}
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(FillBuffer(ctx));
if (num_elements_ == 0) {
DCHECK(input_impl_ == nullptr);
*end_of_sequence = true;
return absl::OkStatus();
}
*end_of_sequence = false;
ClearEmptySlices();
DCHECK(!slices_.empty());
int64_t offset =
Random() % (slices_.front()->end - slices_.front()->start);
int64_t index = (slices_.front()->start + offset) % buffer_->size();
*out_tensors = std::move(buffer_->at(index));
this->RecordBufferDequeue(ctx, *out_tensors);
std::swap(buffer_->at(index),
buffer_->at(slices_.front()->start % buffer_->size()));
checkpoint_indices_.insert(index);
checkpoint_indices_.insert(slices_.front()->start % buffer_->size());
slices_.front()->start++;
num_elements_--;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seed_, seed2_);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kEpochNumRandomSamples,
seed_generator_->num_random_samples()));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNumRandomSamples,
num_random_samples_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kSeed, seed_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kSeed2, seed2_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kEndOfInputSequence, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(this->SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kEpoch, epoch_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNumElements, num_elements_));
const std::string key_prefix = absl::StrCat(prefix(), kColon, "buffer");
if (ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(UpdateCheckpointElements(
writer, key_prefix, *buffer_, checkpoint_indices_));
checkpoint_indices_.clear();
} else {
TF_RETURN_IF_ERROR(
WriteElementsToCheckpoint(writer, key_prefix, *buffer_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kSlicesSize, slices_.size()));
for (size_t i = 0; i < slices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrJoin(std::make_tuple(kSlicesStart, i), "_"),
slices_[i]->start));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrJoin(std::make_tuple(kSlicesEnd, i), "_"),
slices_[i]->end));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(),
absl::StrJoin(std::make_tuple(kSlicesReachedEndOfSequence, i), "_"),
static_cast<int64_t>(slices_[i]->reached_end_of_sequence)));
}
if (data_produced_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(this->prefix(), kDataProduced, ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t num_random_samples;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kEpochNumRandomSamples,
&num_random_samples));
seed_generator_->set_num_random_samples(num_random_samples);
seed_generator_->Reset();
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNumRandomSamples,
&num_random_samples_));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kSeed, &seed_));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kSeed2, &seed2_));
ResetRngs();
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kEndOfInputSequence, &input_empty));
if (static_cast<bool>(!input_empty)) {
TF_RETURN_IF_ERROR(this->dataset()->input_->MakeIterator(
ctx, this, this->prefix(), &input_impl_));
TF_RETURN_IF_ERROR(this->RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
TF_RETURN_IF_ERROR(reader->ReadScalar(this->prefix(), kEpoch, &epoch_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(this->prefix(), kNumElements, &num_elements_));
size_t slices_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(this->prefix(), kSlicesSize, &temp));
slices_size = static_cast<size_t>(temp);
}
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>();
TF_RETURN_IF_ERROR(ReadElementsFromCheckpoint(
ctx, reader, absl::StrCat(prefix(), kColon, "buffer"),
buffer_.get()));
if (ctx->symbolic_checkpoint()) {
DCHECK(checkpoint_indices_.empty());
for (size_t i = 0; i < buffer_->size(); ++i) {
checkpoint_indices_.insert(i);
}
}
for (const auto& element : *buffer_) {
RecordBufferEnqueue(ctx, element);
}
if (!IsShuffleAll()) {
buffer_->resize(dataset()->buffer_size_);
}
slices_.clear();
for (size_t i = 0; i < slices_size; ++i) {
int64_t start;
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->prefix(),
absl::StrJoin(std::make_tuple(kSlicesStart, i), "_"), &start));
int64_t end;
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->prefix(), absl::StrJoin(std::make_tuple(kSlicesEnd, i), "_"),
&end));
int64_t reached_end_of_sequence;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(),
absl::StrJoin(std::make_tuple(kSlicesReachedEndOfSequence, i), "_"),
&reached_end_of_sequence));
slices_.push_back(std::make_unique<Slice>(
start, end, static_cast<bool>(reached_end_of_sequence)));
}
data_produced_ = reader->Contains(this->prefix(), kDataProduced);
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return this->dataset()->traceme_metadata_;
}
private:
struct Slice {
Slice(int64_t start, int64_t end, bool reached_end_of_sequence)
: start(start),
end(end),
reached_end_of_sequence(reached_end_of_sequence) {}
int64_t start;
int64_t end;
bool reached_end_of_sequence = false;
};
random::SingleSampleAdapter<random::PhiloxRandom>::ResultType Random()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
num_random_samples_++;
auto out = generator_();
return out;
}
bool IsServingSliceComplete() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (auto& slice : slices_) {
if (slice->start != slice->end) {
return slice->reached_end_of_sequence;
}
}
return false;
}
bool IsShuffleAll() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return dataset()->buffer_size_ == kUnknownCardinality;
}
Status FillBuffer(IteratorContext* ctx) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t start_micros = EnvTime::NowMicros();
int64_t num_log_entries = 0;
while (ShouldFillBuffer()) {
if (EnvTime::NowMicros() >
((num_log_entries + 1) * kLogIntervalMicros) + start_micros) {
num_log_entries++;
LOG_EVERY_N_SEC(INFO, 10)
<< dataset()->metadata().name() << ": "
<< "Filling up shuffle buffer (this may take a while): "
<< num_elements_ << " of " << BufferSizeString();
}
if (!input_impl_) {
TF_RETURN_IF_ERROR(PrepareNextEpoch(ctx));
}
std::vector<Tensor> input_element;
bool end_of_input_sequence = false;
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, &input_element, &end_of_input_sequence));
if (end_of_input_sequence) {
slices_.back()->reached_end_of_sequence = true;
}
if (!end_of_input_sequence) {
AddToShuffleBuffer(ctx, std::move(input_element));
continue;
}
input_impl_.reset();
if (ctx->split_providers().empty() && !data_produced_ &&
this->dataset()->count_ == -1) {
return absl::OkStatus();
}
}
if (num_log_entries > 0) {
LOG(INFO) << "Shuffle buffer filled.";
}
return absl::OkStatus();
}
bool ShouldFillBuffer() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!input_impl_ && dataset()->count_ != -1 &&
epoch_ >= dataset()->count_) {
return false;
}
if (slices_.size() > kMaxEpochsInBuffer && num_elements_ > 0) {
return false;
}
if (IsShuffleAll() && (slices_.empty() || !IsServingSliceComplete())) {
return true;
}
return num_elements_ < buffer_->size();
}
Status PrepareNextEpoch(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (epoch_ == 0) {
slices_.push_back(std::make_unique<Slice>(0, 0, false));
} else {
int64_t n = slices_.back()->end;
slices_.push_back(std::make_unique<Slice>(n, n, false));
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
}
TF_RETURN_IF_ERROR(this->dataset()->input_->MakeIterator(
ctx, this, this->prefix(), &input_impl_));
epoch_++;
return absl::OkStatus();
}
void AddToShuffleBuffer(IteratorContext* ctx, std::vector<Tensor>&& element)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
data_produced_ = true;
if (num_elements_ == 0) {
VLOG(1) << "Starting to fill up shuffle buffer of size: "
<< BufferSizeString();
}
this->RecordBufferEnqueue(ctx, element);
if (num_elements_ == buffer_->size()) {
DCHECK(IsShuffleAll());
checkpoint_indices_.insert(buffer_->size());
buffer_->push_back(element);
} else {
size_t index = slices_.back()->end % buffer_->size();
checkpoint_indices_.insert(index);
buffer_->at(index) = std::move(element);
}
num_elements_++;
slices_.back()->end++;
}
void ClearEmptySlices() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
while (slices_.front()->start == slices_.front()->end) {
slices_.pop_front();
num_random_samples_ = 0;
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
}
}
std::string BufferSizeString() {
return absl::StrCat(dataset()->buffer_size_);
}
mutex mu_;
SeedGenerator* const seed_generator_ TF_GUARDED_BY(mu_);
std::unique_ptr<std::vector<std::vector<Tensor>>> buffer_
TF_GUARDED_BY(mu_);
absl::flat_hash_set<int64_t> checkpoint_indices_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_) = nullptr;
int64_t epoch_ TF_GUARDED_BY(mu_) = 0;
int64_t num_elements_ TF_GUARDED_BY(mu_) = 0;
int64_t seed_ TF_GUARDED_BY(mu_) = 0;
int64_t seed2_ TF_GUARDED_BY(mu_) = 0;
std::deque<std::unique_ptr<Slice>> slices_ TF_GUARDED_BY(mu_);
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
bool data_produced_ TF_GUARDED_BY(mu_) = false;
};
const DatasetBase* const input_;
const int64_t buffer_size_;
const std::shared_ptr<SeedGenerator> seed_generator_;
const int64_t count_;
const TraceMeMetadata traceme_metadata_;
mutable mutex mu_;
mutable std::vector<std::int64_t> shuffled_indices_ TF_GUARDED_BY(mu_);
};
class ShuffleDatasetOp::Dataset : public ShuffleDatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~Dataset() override {
manager_->Unref();
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
AttrValue reshuffle_each_iteration;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, buffer_size_node, seed_node, seed2_node},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
class ShuffleDatasetOp::DatasetV2 : public ShuffleDatasetBase {
public:
DatasetV2(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()) {}
~DatasetV2() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, buffer_size_node, resource_handle_node},
{},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
};
class ShuffleDatasetOp::DatasetV3 : public ShuffleDatasetBase {
public:
DatasetV3(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~DatasetV3() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed | #include "tensorflow/core/kernels/data/shuffle_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kShuffleNodeName[] = "shuffle_dataset";
constexpr char kShuffleAndRepeatNodeName[] = "shuffle_and_repeat_dataset";
class ShuffleDatasetParams : public DatasetParams {
public:
template <typename T>
ShuffleDatasetParams(T input_dataset_params, int64_t buffer_size,
int64_t seed, int64_t seed2, int64_t count,
bool reshuffle_each_iteration,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
buffer_size_(buffer_size),
seed_(seed),
seed2_(seed2),
count_(count),
reshuffle_each_iteration_(reshuffle_each_iteration) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = {
CreateTensor<int64_t>(TensorShape({}), {buffer_size_}),
CreateTensor<int64_t>(TensorShape({}), {seed_}),
CreateTensor<int64_t>(TensorShape({}), {seed2_})};
if (count_ != 1) {
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {count_}));
}
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShuffleDatasetOpBase::kInputDataset);
input_names->emplace_back(ShuffleDatasetOpBase::kBufferSize);
input_names->emplace_back(ShuffleDatasetOpBase::kSeed);
input_names->emplace_back(ShuffleDatasetOpBase::kSeed2);
if (count_ != 1) {
input_names->emplace_back(ShuffleAndRepeatDatasetOp::kCount);
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("reshuffle_each_iteration",
reshuffle_each_iteration_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
if (count_ != 1) {
return ShuffleAndRepeatDatasetOp::kDatasetType;
}
return ShuffleDatasetOp::kDatasetType;
}
int64_t count() const { return count_; }
private:
int64_t buffer_size_;
int64_t seed_;
int64_t seed2_;
int64_t count_;
bool reshuffle_each_iteration_;
};
class ShuffleDatasetOpTest : public DatasetOpsTestBase {};
ShuffleDatasetParams ShuffleDatasetParams1() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
3,
1,
2,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams2() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams3() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
2,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams4() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
2,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams5() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
1,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams6() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
10,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams7() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
1,
2,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams8() {
return ShuffleDatasetParams(RangeDatasetParams(0, 3, 1),
10,
1,
2,
-1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleDatasetParamsWithUnknownCardinality() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
-2,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParamsWithInvalidBufferSize() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
-1,
1,
2,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleAndRepeatDatasetParamsWithInvalidBufferSize() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
-1,
1,
2,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleAndRepeatDatasetParamsWithInvalidCount() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
10,
1,
2,
0,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
template <typename T>
struct GetNextTestCase {
T dataset_params;
std::vector<Tensor> expected_shuffle_outputs;
std::vector<Tensor> expected_reshuffle_outputs;
};
std::vector<GetNextTestCase<ShuffleDatasetParams>> GetNextTestCases() {
return {
{ShuffleDatasetParams1(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}}),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}})},
{ShuffleDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {6}, {0}, {5}, {2}, {7}, {4}, {3}, {9}, {8}})},
{ShuffleDatasetParams3(),
CreateTensors<int64_t>(
TensorShape({}), {{0}, {2}, {1}, {3}, {5}, {6}, {4}, {7}, {8}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {0}, {2}, {3}, {4}, {5}, {6}, {7}, {9}, {8}})},
{ShuffleDatasetParams4(),
CreateTensors<int64_t>(
TensorShape({}), {{3}, {0}, {8}, {1}, {5}, {4}, {7}, {2}, {6}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{4}, {6}, {9}, {0}, {1}, {8}, {2}, {7}, {3}, {5}})},
{ShuffleDatasetParams5(),
CreateTensors<int64_t>(
TensorShape({}), {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{ShuffleDatasetParams6(),
{},
{}},
{ShuffleDatasetParams7(),
CreateTensors<int64_t>(
TensorShape({}), {{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}}),
CreateTensors<int64_t>(
TensorShape({}),
{{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}})},
{ShuffleDatasetParams8(),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}}),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})},
{ShuffleDatasetParamsWithUnknownCardinality(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {6}, {0}, {5}, {2}, {7}, {4}, {3}, {9}, {8}})}};
}
class ParameterizedGetNextTest : public ShuffleDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<ShuffleDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> shuffled_out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
shuffled_out_tensors.insert(shuffled_out_tensors.end(), next.begin(),
next.end());
if (test_case.dataset_params.count() == -1 &&
shuffled_out_tensors.size() ==
test_case.expected_shuffle_outputs.size()) {
break;
}
}
end_of_sequence = false;
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
std::vector<Tensor> reshuffled_out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
reshuffled_out_tensors.insert(reshuffled_out_tensors.end(), next.begin(),
next.end());
if (test_case.dataset_params.count() == -1 &&
reshuffled_out_tensors.size() ==
test_case.expected_shuffle_outputs.size()) {
break;
}
}
TF_EXPECT_OK(ExpectEqual(shuffled_out_tensors,
test_case.expected_shuffle_outputs,
true));
TF_EXPECT_OK(ExpectEqual(reshuffled_out_tensors,
test_case.expected_reshuffle_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(ShuffleDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
std::vector<DatasetNodeNameTestCase<ShuffleDatasetParams>>
DatasetNodeNameTestCases() {
return {{ShuffleDatasetParams1(),
kShuffleNodeName},
{ShuffleDatasetParams7(),
kShuffleAndRepeatNodeName}};
}
DATASET_NODE_NAME_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
DatasetNodeNameTestCases())
std::vector<DatasetTypeStringTestCase<ShuffleDatasetParams>>
DatasetTypeStringTestCases() {
return {{ShuffleDatasetParams1(),
name_utils::OpName(
ShuffleDatasetOp::kDatasetType)},
{ShuffleDatasetParams7(),
name_utils::OpName(ShuffleAndRepeatDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
DatasetTypeStringTestCases())
TEST_F(ShuffleDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShuffleDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<ShuffleDatasetParams>> CardinalityTestCases() {
return {{ShuffleDatasetParams1(),
10},
{ShuffleDatasetParams2(),
10},
{ShuffleDatasetParams3(),
10},
{ShuffleDatasetParams4(),
10},
{ShuffleDatasetParams5(),
10},
{ShuffleDatasetParams6(),
0},
{ShuffleDatasetParams7(),
20},
{ShuffleDatasetParams8(),
kInfiniteCardinality},
{ShuffleDatasetParamsWithUnknownCardinality(),
10}};
}
DATASET_CARDINALITY_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
CardinalityTestCases())
TEST_F(ShuffleDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShuffleDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(ShuffleDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShuffleDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
template <typename T>
struct IteratorSaveAndRestoreTestCase {
T dataset_params;
std::vector<int> breakpoints;
std::vector<Tensor> expected_shuffle_outputs;
};
std::vector<IteratorSaveAndRestoreTestCase<ShuffleDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ShuffleDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}})},
{ShuffleDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}})},
{ShuffleDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {2}, {1}, {3}, {5}, {6}, {4}, {7}, {8}, {9}})},
{ShuffleDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{3}, {0}, {8}, {1}, {5}, {4}, {7}, {2}, {6}, {9}})},
{ShuffleDatasetParams5(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{ShuffleDatasetParams6(),
{0, 4, 11},
{}},
{ShuffleDatasetParams7(),
{0, 5, 22},
CreateTensors<int64_t>(
TensorShape({}),
{{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}})},
{ShuffleDatasetParams8(),
{0, 5, 20},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})},
{ShuffleDatasetParamsWithUnknownCardinality(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}})}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public ShuffleDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<ShuffleDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, IteratorSaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_shuffle_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(ShuffleDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(ShuffleDatasetOpTest, InvalidArguments) {
std::vector<ShuffleDatasetParams> dataset_params_vec(
{ShuffleDatasetParamsWithInvalidBufferSize(),
ShuffleAndRepeatDatasetParamsWithInvalidBufferSize(),
ShuffleAndRepeatDatasetParamsWithInvalidCount()});
for (const auto& dataset_params : dataset_params_vec) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,563 | cpp | tensorflow/tensorflow | shard_dataset_op | tensorflow/core/kernels/data/shard_dataset_op.cc | tensorflow/core/kernels/data/shard_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_SHARD_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_SHARD_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ShardDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Shard";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kNumShards = "num_shards";
static constexpr const char* const kIndex = "index";
static constexpr const char* const kRequireNonEmpty = "require_non_empty";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit ShardDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
bool require_non_empty_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShardDatasetOp::kDatasetType;
constexpr const char* const ShardDatasetOp::kInputDataset;
constexpr const char* const ShardDatasetOp::kNumShards;
constexpr const char* const ShardDatasetOp::kIndex;
constexpr const char* const ShardDatasetOp::kRequireNonEmpty;
constexpr const char* const ShardDatasetOp::kOutputTypes;
constexpr const char* const ShardDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kNextIndex[] = "next_index";
constexpr char kFileShardErrorMessage[] =
"If you are using datasets with distribution strategy, consider setting "
"the auto sharding policy to either DATA or OFF using the "
"`experimental_distribute.auto_shard_policy` option of `tf.data.Options()`."
" Or, split your input files into a larger number of small files such that "
"number of files is greater than number of shards/workers.";
class ShardDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t num_shards, int64_t index,
bool require_non_empty, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)),
num_shards_(num_shards),
index_(index),
input_(input),
require_non_empty_(require_non_empty),
traceme_metadata_(
{{"index", strings::Printf("%lld", static_cast<long long>(index))},
{"num_shards",
strings::Printf("%lld", static_cast<long long>(num_shards))}}) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(num_shards_, index_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / num_shards_ + (index_ < n % num_shards_ ? 1 : 0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index_ + (num_shards_ * index), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* num_shards = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(num_shards_, &num_shards));
Node* index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(index_, &index));
AttrValue require_non_empty_attr;
b->BuildAttrValue(require_non_empty_, &require_non_empty_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, num_shards, index},
{{kRequireNonEmpty, require_non_empty_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), next_index_(0), element_count_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (dataset()->num_shards_ == kShardHint) {
return errors::FailedPrecondition(
"`tf.data.Dataset.shard(SHARD_HINT, ...)` can only be used in "
"`tf.distribute.Strategy.experimental_distribute_dataset()` with "
"`tf.data.experimental.AutoShardPolicy.HINT` policy, or tf.data "
"service with "
"`tf.data.experimental.service.ShardingPolicy.HINT` processing "
"mode.");
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
int num_to_skip =
(dataset()->index_ - next_index_) % dataset()->num_shards_;
if (num_to_skip < 0) {
num_to_skip += dataset()->num_shards_;
}
int num_skipped;
TF_RETURN_IF_ERROR(
input_impl_->Skip(ctx, num_to_skip, end_of_sequence, &num_skipped));
next_index_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &result, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
next_index_++;
if (dataset()->require_non_empty_ &&
next_index_ < dataset()->num_shards_) {
int num_skipped;
Status s = input_impl_->Skip(ctx, dataset()->num_shards_ - next_index_,
end_of_sequence, &num_skipped);
if (*end_of_sequence || errors::IsOutOfRange(s)) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: the dataset only has ",
next_index_, " file(s), which is not enough for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
} else if (!s.ok()) {
return s;
}
next_index_ = dataset()->num_shards_;
}
*out_tensors = std::move(result);
return absl::OkStatus();
}
Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
auto merge_checkpoint = gtl::MakeCleanup([&ctx_with_index_mapper] {
ctx_with_index_mapper.MergeCheckpoint();
});
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
if (*end_of_sequence && dataset()->require_non_empty_ &&
element_count_ == 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: The dataset does not have "
"enough file(s) for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
}
++element_count_;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t num_shards = dataset()->num_shards_;
int64_t shard_index = dataset()->index_;
return [parent_index_mapper, num_shards,
shard_index](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t output_index,
parent_index_mapper(element_position));
return output_index * num_shards + shard_index;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(
std::move(args), 1.0 / static_cast<double>(dataset()->num_shards_));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
element_count_ = *ctx->restored_element_count();
return RestoreInput(ctx, reader, input_impl_);
}
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kNextIndex, &next_index_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t next_index_ TF_GUARDED_BY(mu_);
size_t element_count_ TF_GUARDED_BY(mu_);
};
const int64_t num_shards_;
const int64_t index_;
const DatasetBase* const input_;
const bool require_non_empty_;
const TraceMeMetadata traceme_metadata_;
absl::Status random_indexing_compatible_;
};
ShardDatasetOp::ShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRequireNonEmpty, &require_non_empty_));
}
void ShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index = 0;
int64_t num_shards = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kNumShards, &num_shards));
OP_REQUIRES(
ctx, num_shards > 0 || num_shards == kShardHint,
errors::InvalidArgument("Number of shards must be greater than zero "
"(currently num_shards = ",
num_shards, ")."));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kIndex, &index));
OP_REQUIRES(
ctx, (index >= 0 && index < num_shards) || num_shards == kShardHint,
errors::InvalidArgument("Index must be between 0 and ", num_shards - 1,
" (currently index = ", index, ")."));
*output = new Dataset(ctx, num_shards, index, require_non_empty_, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ShardDataset").Device(DEVICE_CPU),
ShardDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "shard_dataset";
class ShardDatasetParams : public DatasetParams {
public:
template <typename T>
ShardDatasetParams(T input_dataset_params, int64_t num_shards, int64_t index,
bool require_non_empty, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_shards_(num_shards),
index_(index),
require_non_empty_(require_non_empty) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_shards_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShardDatasetOp::kInputDataset);
input_names->emplace_back(ShardDatasetOp::kNumShards);
input_names->emplace_back(ShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("require_non_empty", require_non_empty_);
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ShardDatasetOp::kDatasetType; }
private:
int64_t num_shards_;
int64_t index_;
bool require_non_empty_;
};
class ShardDatasetOpTest : public DatasetOpsTestBase {};
ShardDatasetParams ShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
0,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
7,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams5() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
4,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams6() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams7() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParamsWithNoElemForEachShard() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ShardDatasetParams>> GetNextTestCases() {
return {
{ShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{}},
{ShardDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_GET_NEXT_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
GetNextTestCases())
TEST_F(ShardDatasetOpTest, DatasetNodeName) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ShardDatasetOpTest, DatasetTypeString) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ShardDatasetOp::kDatasetType)));
}
TEST_F(ShardDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<ShardDatasetParams>> CardinalityTestCases() {
return {{ShardDatasetParams1(),
2},
{ShardDatasetParams2(),
2},
{ShardDatasetParams3(),
0},
{ShardDatasetParams4(),
1},
{ShardDatasetParams5(),
2},
{ShardDatasetParams6(),
2},
{ShardDatasetParams7(),
1}};
}
DATASET_CARDINALITY_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
CardinalityTestCases())
TEST_F(ShardDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ShardDatasetOpTest, IteratorPrefix) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShardDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ShardDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{ShardDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{0, 1},
{}},
{ShardDatasetParams4(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ShardDatasetOpTest, NoElemForEachShard) {
auto dataset_params = InvalidShardDatasetParamsWithNoElemForEachShard();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ShardDatasetOpTest, InvalidArguments) {
std::vector<ShardDatasetParams> invalid_dataset_params = {
InvalidShardDatasetParams1(), InvalidShardDatasetParams2(),
InvalidShardDatasetParams3(), InvalidShardDatasetParams4()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,564 | cpp | tensorflow/tensorflow | options_dataset_op | tensorflow/core/kernels/data/options_dataset_op.cc | tensorflow/core/kernels/data/options_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_OPTIONS_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_OPTIONS_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class OptionsDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Options";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kSerializedOptions = "serialized_options";
explicit OptionsDatasetOp(OpKernelConstruction* ctx);
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
tstring serialized_options_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
constexpr const char* const OptionsDatasetOp::kDatasetType;
constexpr const char* const OptionsDatasetOp::kInputDataset;
constexpr const char* const OptionsDatasetOp::kOutputTypes;
constexpr const char* const OptionsDatasetOp::kOutputShapes;
constexpr const char* const OptionsDatasetOp::kSerializedOptions;
class OptionsDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const string& serialized_options)
: DatasetBase(DatasetContext(ctx)),
input_(input),
serialized_options_(serialized_options) {
input_->Ref();
Options options;
OP_REQUIRES(ctx, options.ParseFromString(serialized_options),
errors::InvalidArgument(absl::StrCat(
"Could not parse ", OptionsDatasetOp::kSerializedOptions,
" as valid Options.")));
set_options(options);
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
DCHECK(false) << "OptionsDatasetOp::Dataset::MakeIteratorInternal is not "
"expected to be called because it is supposed to forward "
"the iterator to its input dataset(s).";
LOG(ERROR) << "Datasets of type " << type_string()
<< " forwards its iterator to its input dataset. "
"`MakeIteratorInternal` is not implemented.";
return nullptr;
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return input_->Get(ctx, index, out_tensors);
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
AttrValue serialized_options_attr;
b->BuildAttrValue(serialized_options_, &serialized_options_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_graph_node},
{std::make_pair(kSerializedOptions, serialized_options_attr)}, output));
return absl::OkStatus();
}
private:
const DatasetBase* input_;
const tstring serialized_options_;
absl::Status random_indexing_compatible_;
};
void OptionsDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
*output = new Dataset(ctx, input, serialized_options_);
}
OptionsDatasetOp::OptionsDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSerializedOptions, &serialized_options_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("OptionsDataset").Device(DEVICE_CPU).Priority(2),
OptionsDatasetOp);
REGISTER_KERNEL_BUILDER(Name("OptionsDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
OptionsDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptions[] = R"proto(
deterministic: true
slack: true
optimization_options { apply_default_optimizations: true autotune: true }
)proto";
class OptionsDatasetOpTest : public DatasetOpsTestBase {};
OptionsDatasetParams OptionsDatasetParams0() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams OptionsDatasetParams1() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(10, 0, -3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_1");
}
OptionsDatasetParams OptionsDatasetParams2() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 5, 1),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_2");
}
std::vector<GetNextTestCase<OptionsDatasetParams>> GetNextTestCases() {
return {{OptionsDatasetParams0(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{OptionsDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})},
{OptionsDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
ITERATOR_GET_NEXT_TEST_P(OptionsDatasetOpTest, OptionsDatasetParams,
GetNextTestCases())
TEST_F(OptionsDatasetOpTest, DatasetOptions) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
Options expected_options;
protobuf::TextFormat::ParseFromString(kOptions, &expected_options);
TF_ASSERT_OK(CheckDatasetOptions(expected_options));
}
TEST_F(OptionsDatasetOpTest, DatasetNodeName) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(OptionsDatasetOpTest, DatasetTypeString) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(OptionsDatasetOp::kDatasetType)));
}
TEST_F(OptionsDatasetOpTest, DatasetoutputDTypes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(OptionsDatasetOpTest, DatasetoutputShapes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(OptionsDatasetOpTest, DatasetCardinality) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(4));
}
TEST_F(OptionsDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(OptionsDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(OptionsDatasetOpTest, IteratorPrefix) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
RangeDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
}
}
} |
1,565 | cpp | tensorflow/tensorflow | parallel_filter_dataset_op | tensorflow/core/kernels/data/parallel_filter_dataset_op.cc | tensorflow/core/kernels/data/parallel_filter_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_FILTER_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_FILTER_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ParallelFilterDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "ParallelFilter";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kNumParallelCalls = "num_parallel_calls";
static constexpr const char* const kPredicate = "predicate";
static constexpr const char* const kDeterministic = "deterministic";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit ParallelFilterDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
DeterminismPolicy deterministic_;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
};
}
}
#endif
#include "tensorflow/core/kernels/data/parallel_filter_dataset_op.h"
#include <deque>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelFilterDatasetOp::kDatasetType;
constexpr const char* const ParallelFilterDatasetOp::kInputDataset;
constexpr const char* const
ParallelFilterDatasetOp::kOtherArguments;
constexpr const char* const
ParallelFilterDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelFilterDatasetOp::kPredicate;
constexpr const char* const
ParallelFilterDatasetOp::kDeterministic;
constexpr const char* const ParallelFilterDatasetOp::kTarguments;
constexpr const char* const ParallelFilterDatasetOp::kOutputTypes;
constexpr const char* const ParallelFilterDatasetOp::kOutputShapes;
constexpr char kComponent[] = "component";
constexpr char kReturnValues[] = "return_values";
constexpr char kPredicateValues[] = "predicate_values";
constexpr char kInvocationResults[] = "invocation_results";
constexpr char kSize[] = "size";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kErrorCode[] = "code";
constexpr char kErrorMessage[] = "error_message";
class ParallelFilterDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
int64_t num_parallel_calls, DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
num_parallel_calls_(num_parallel_calls),
deterministic_(deterministic),
captured_func_(std::move(captured_func)) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
Node* num_parallel_calls = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(static_cast<int32>(num_parallel_calls_),
&num_parallel_calls));
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
AttrValue predicate_attr;
b->BuildAttrValue(captured_func_->func(), &predicate_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {{0, input_graph_node}}, {{1, other_arguments}},
{{kDeterministic, deterministic_attr},
{kPredicate, predicate_attr},
{kTarguments, other_arguments_types_attr}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()),
autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {}
~Iterator() override {
CancelThreads(true);
input_impl_.reset();
if (deregister_fn_) deregister_fn_();
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
IteratorContext(params), this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<InvocationResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(ctx, &result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelFilterConsume",
{{"element_id", result->uid}});
});
return ProcessResult(ctx, result, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncUnknownRatioNode(
std::move(args),
{model::MakeParameter("parallelism", num_parallel_calls_, 1,
ctx->runner_threadpool_size())});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
if (num_calls_ != 0) {
return errors::FailedPrecondition(
"Unexpected outstanding calls encountered.");
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(absl::StrCat(prefix(), "::", kInvocationResults),
kSize, invocation_results_.size()));
for (size_t i = 0; i < invocation_results_.size(); i++) {
const auto& result = *(invocation_results_[i]);
std::string element_prefix =
absl::StrCat(prefix(), "::", kInvocationResults, "::", i);
TF_RETURN_IF_ERROR(
WriteStatusLocked(writer, element_prefix, result.status));
TF_RETURN_IF_ERROR(WriteComponentsLocked(
writer, absl::StrCat(element_prefix, "::", kReturnValues),
result.return_values));
TF_RETURN_IF_ERROR(WriteComponentsLocked(
writer, absl::StrCat(element_prefix, "::", kPredicateValues),
result.predicate_values));
if (result.end_of_input) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kEndOfInput, ""));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t invocation_results_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", kInvocationResults),
kSize, &invocation_results_size));
DCHECK(invocation_results_.empty());
for (size_t i = 0; i < invocation_results_size; i++) {
invocation_results_.push_back(std::make_shared<InvocationResult>());
auto& result = *invocation_results_.back();
std::string element_prefix =
absl::StrCat(prefix(), "::", kInvocationResults, "::", i);
TF_RETURN_IF_ERROR(
ReadStatusLocked(reader, element_prefix, &result.status));
TF_RETURN_IF_ERROR(ReadComponentsLocked(
ctx, reader, absl::StrCat(element_prefix, "::", kReturnValues),
&result.return_values));
TF_RETURN_IF_ERROR(ReadComponentsLocked(
ctx, reader, absl::StrCat(element_prefix, "::", kPredicateValues),
&result.predicate_values));
result.end_of_input = reader->Contains(element_prefix, kEndOfInput);
RecordBufferEnqueue(ctx, result.return_values);
result.notification.Notify();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
data::TraceMeMetadata result;
result.push_back(
std::make_pair("autotune", autotune_ ? "true" : "false"));
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct InvocationResult {
InvocationResult() : uid(tensorflow::EnvTime::NowNanos()) {}
Notification notification;
Status status;
std::vector<Tensor> return_values;
std::vector<Tensor> predicate_values;
bool end_of_input = false;
const int64_t uid;
};
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto ctx_copy = std::make_shared<IteratorContext>(*ctx);
runner_thread_ = ctx->StartThread(
"tf_data_parallel_filter",
std::bind(&Iterator::RunnerThread, this, ctx_copy));
}
}
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->notification.Notify();
cond_var_->notify_all();
}
void CallFunction(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelFilterProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
result->status = input_impl_->GetNext(ctx.get(), &input_element,
&result->end_of_input);
if (result->end_of_input || !result->status.ok()) {
CallCompleted(ctx, result);
return;
}
result->return_values = input_element;
auto done = [this, ctx, result](Status status) {
result->status.Update(status);
if (status.ok() && (result->predicate_values.size() != 1 ||
result->predicate_values[0].dtype() != DT_BOOL ||
result->predicate_values[0].NumElements() != 1)) {
result->status.Update(errors::InvalidArgument(
"Filter predicate `predicate` must return a scalar bool."));
}
RecordBufferEnqueue(ctx.get(), result->return_values);
CallCompleted(ctx, result);
};
if (dataset()->captured_func_->use_inter_op_parallelism()) {
instantiated_captured_func_->RunAsync(
ctx.get(), std::move(input_element), &result->predicate_values,
std::move(done), model_node());
} else {
auto fn = std::bind(
[this, ctx, result](std::vector<Tensor> input_element) {
return instantiated_captured_func_->Run(
ctx.get(), std::move(input_element),
&result->predicate_values, model_node());
},
std::move(input_element));
(*ctx->runner())(
[this, ctx, fn = std::move(fn), done = std::move(done)]() {
Status s;
if (IsRecording(ctx.get())) {
s = fn();
} else {
RecordStart(ctx.get());
s = fn();
RecordStop(ctx.get());
}
done(s);
});
}
}
Status ProcessResult(IteratorContext* ctx,
const std::shared_ptr<InvocationResult>& result,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) {
if (!result->end_of_input && result->status.ok()) {
*out_tensors = std::move(result->return_values);
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(result->status)) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
result->status.message());
}
*end_of_sequence = result->end_of_input;
return result->status;
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
std::vector<std::shared_ptr<InvocationResult>> new_calls;
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
invocation_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
invocation_results_.push_back(std::make_shared<InvocationResult>());
new_calls.push_back(invocation_results_.back());
num_calls_++;
}
cond_var_->notify_all();
}
for (const auto& call : new_calls) {
CallFunction(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(IteratorContext* ctx,
std::shared_ptr<InvocationResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
auto PredicateReady = [](const InvocationResult* result) -> bool {
return result->status.ok() && !result->end_of_input;
};
auto GetPredicateValue = [](const InvocationResult* result) -> bool {
return result->predicate_values[0].scalar<bool>()();
};
while (!invocation_results_.empty() &&
invocation_results_.front()->notification.HasBeenNotified() &&
PredicateReady(invocation_results_.front().get()) &&
!GetPredicateValue(invocation_results_.front().get())) {
RecordBufferDequeue(ctx, invocation_results_.front()->return_values);
invocation_results_.pop_front();
cond_var_->notify_all();
}
if (!deterministic_) {
for (auto it = invocation_results_.begin();
it != invocation_results_.end(); ++it) {
if ((*it)->notification.HasBeenNotified() &&
(it == invocation_results_.begin() ||
(PredicateReady(it->get()) && GetPredicateValue(it->get())))) {
std::swap(*result, *it);
invocation_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else {
if (!invocation_results_.empty() &&
invocation_results_.front()->notification.HasBeenNotified()) {
std::swap(*result, invocation_results_.front());
invocation_results_.pop_front();
if (!(*result)->end_of_input) {
RecordBufferDequeue(ctx, (*result)->return_values);
}
cond_var_->notify_all();
return false;
}
}
return true;
}
Status WriteComponentsLocked(IteratorStateWriter* writer,
const std::string& prefix,
const std::vector<Tensor>& values)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, kSize, values.size()));
for (size_t j = 0; j < values.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix, absl::StrCat(kComponent, "[", j, "]"), values[j]));
}
return absl::OkStatus();
}
Status ReadComponentsLocked(IteratorContext* ctx,
IteratorStateReader* reader,
const std::string& prefix,
std::vector<Tensor>* values)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t size;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix, kSize, &size));
size_t num_return_values = static_cast<size_t>(size);
if (num_return_values != size) {
return errors::InvalidArgument(prefix, ",", kSize, ": ", size,
" is not a valid value of type size_t.");
}
values->reserve(num_return_values);
for (size_t j = 0; j < num_return_values; j++) {
values->emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix, absl::StrCat(kComponent, "[", j, "]"),
&values->back()));
}
return absl::OkStatus();
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const std::string& key, const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
key, kErrorCode, static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(key, kErrorMessage,
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader, const std::string& key,
Status* status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(key, kErrorCode, &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(
reader->ReadScalar(key, kErrorMessage, &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
const bool deterministic_;
const bool autotune_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<InvocationResult>> invocation_results_
TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
};
const DatasetBase* const input_;
const int64_t num_parallel_calls_;
const DeterminismPolicy deterministic_;
const std::unique_ptr<CapturedFunction> captured_func_;
};
ParallelFilterDatasetOp::ParallelFilterDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kPredicate, {},
&func_metadata_));
OP_REQUIRES(ctx, func_metadata_->short_circuit_info().indices.size() <= 1,
errors::InvalidArgument(
"predicate function has more than one return value."));
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(ctx,
DeterminismPolicy::FromString(deterministic, &deterministic_));
}
void ParallelFilterDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t num_parallel_calls;
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
if (num_parallel_calls == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, num_parallel_calls, deterministic_,
std::move(captured_func));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ParallelFilterDataset").Device(DEVICE_CPU),
ParallelFilterDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ParallelFilterDataset");
}
}
} | #include "tensorflow/core/kernels/data/parallel_filter_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_map_dataset";
class ParallelFilterDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelFilterDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int num_parallel_calls, const std::string& deterministic,
FunctionDefHelper::AttrValueWrapper pred_func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
num_parallel_calls_(num_parallel_calls),
deterministic_(deterministic),
pred_func_(std::move(pred_func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size());
input_names->emplace_back(ParallelFilterDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelFilterDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelFilterDatasetOp::kNumParallelCalls);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{"predicate", pred_func_}, {"Targuments", type_arguments_},
{"output_shapes", output_shapes_}, {"output_types", output_dtypes_},
{"deterministic", deterministic_}, {"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelFilterDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int num_parallel_calls_;
std::string deterministic_;
FunctionDefHelper::AttrValueWrapper pred_func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class ParallelFilterDatasetOpTest : public DatasetOpsTestBase {};
ParallelFilterDatasetParams ParallelFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kNondeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
4,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
4,
DeterminismPolicy::kNondeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
model::kAutotune,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams InputHasNoElementParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("GetUnique",
{{"T", DT_INT64}, {"out_idx", DT_INT32}}),
{test::function::Unique()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("NonZero", {{"T", DT_INT64}}),
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ParallelFilterDatasetParams>> GetNextTestCases() {
return {{ParallelFilterDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams2(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams3(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams4(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams5(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams6(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{InputHasNoElementParams(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams, GetNextTestCases())
TEST_F(ParallelFilterDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelFilterDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelFilterDatasetOp::kDatasetType)));
}
TEST_F(ParallelFilterDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<ParallelFilterDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ParallelFilterDatasetParams1(),
{PartialTensorShape({1})}},
{InputHasNoElementParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ParallelFilterDatasetParams>>
CardinalityTestCases() {
return {{ParallelFilterDatasetParams1(),
kUnknownCardinality},
{InputHasNoElementParams(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams, CardinalityTestCases())
TEST_F(ParallelFilterDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<ParallelFilterDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ParallelFilterDatasetParams1(),
{PartialTensorShape({1})}},
{InputHasNoElementParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ParallelFilterDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelFilterDatasetOp::kDatasetType,
dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelFilterDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelFilterDatasetParams1(),
{0, 2, 6},
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{InputHasNoElementParams(),
{0, 2, 6},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidPredicateFuncTest
: public ParallelFilterDatasetOpTest,
public ::testing::WithParamInterface<ParallelFilterDatasetParams> {};
TEST_P(ParameterizedInvalidPredicateFuncTest, InvalidPredicateFunc) {
auto dataset_params = GetParam();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_SUITE_P(
ParallelFilterDatasetOpTest, ParameterizedInvalidPredicateFuncTest,
::testing::ValuesIn({InvalidPredFuncFilterDatasetParams1(),
InvalidPredFuncFilterDatasetParams2(),
InvalidPredFuncFilterDatasetParams3()}));
}
}
} |
1,566 | cpp | tensorflow/tensorflow | prefetch_dataset_op | tensorflow/core/kernels/data/prefetch_dataset_op.cc | tensorflow/core/kernels/data/prefetch_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/kernels/data/prefetch_autotuner.h"
namespace tensorflow {
namespace data {
class PrefetchDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Prefetch";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kBufferSize = model::kBufferSize;
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kSlackPeriod = "slack_period";
static constexpr const char* const kLegacyAutotune = "legacy_autotune";
static constexpr const char* const kBufferSizeMin = "buffer_size_min";
explicit PrefetchDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
int64_t slack_period_ = 0;
bool legacy_autotune_ = true;
int64_t buffer_size_min_ = 0;
};
}
}
#endif
#include "tensorflow/core/kernels/data/prefetch_dataset_op.h"
#include <algorithm>
#include <deque>
#include <limits>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/prefetch_autotuner.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/mutex.h"
namespace tensorflow {
namespace data {
constexpr const char* const PrefetchDatasetOp::kDatasetType;
constexpr const char* const PrefetchDatasetOp::kInputDataset;
constexpr const char* const PrefetchDatasetOp::kBufferSize;
constexpr const char* const PrefetchDatasetOp::kOutputTypes;
constexpr const char* const PrefetchDatasetOp::kOutputShapes;
constexpr const char* const PrefetchDatasetOp::kSlackPeriod;
constexpr const char* const PrefetchDatasetOp::kLegacyAutotune;
constexpr const char* const PrefetchDatasetOp::kBufferSizeMin;
namespace {
constexpr double kSleepFactor = 0.2;
constexpr char kBuffer[] = "buffer";
constexpr char kStatus[] = "status";
constexpr char kSizeSuffix[] = ".size";
constexpr char kCodeSuffix[] = ".code";
constexpr char kErrorMessageSuffix[] = ".error_message";
}
class PrefetchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t slack_period, bool legacy_autotune, int64_t buffer_size_min)
: DatasetBase(DatasetContext(ctx)),
input_(input),
buffer_size_(buffer_size),
slack_period_(slack_period),
legacy_autotune_(legacy_autotune),
buffer_size_min_(buffer_size_min) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return input_->Get(ctx, index, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size));
AttrValue slack_period_attr;
b->BuildAttrValue(slack_period_, &slack_period_attr);
AttrValue legacy_autotune_attr;
b->BuildAttrValue(legacy_autotune_, &legacy_autotune_attr);
AttrValue buffer_size_min_attr;
b->BuildAttrValue(buffer_size_min_, &buffer_size_min_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, buffer_size},
{std::make_pair(kSlackPeriod, slack_period_attr),
std::make_pair(kLegacyAutotune, legacy_autotune_attr),
std::make_pair(kBufferSizeMin, buffer_size_min_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
buffer_size_min_(params.dataset->buffer_size_min_),
legacy_autotune_(params.dataset->legacy_autotune_),
buffer_size_(std::make_shared<model::SharedState>(
legacy_autotune_ ? 0 : params.dataset->buffer_size_, mu_,
cond_var_)) {
slack_us_ = 0;
}
~Iterator() override {
CancelThreads();
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
auto_tuner_ = std::make_unique<PrefetchAutotuner>(
dataset()->buffer_size_, dataset()->buffer_size_min_,
ctx->ram_budget_manager());
interleave_depth_ = ctx->interleave_depth();
if (buffer_size_->value == model::kAutotune) {
buffer_size_->value = buffer_size_min_;
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(), [this]() { CancelThreads(); },
&deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
IteratorContext iter_ctx(params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&iter_ctx, this, prefix(), &input_impl_));
if (ctx->warm_start() && !ctx->is_restoring()) {
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
}
ctx->MergeCheckpoint(iter_ctx.checkpoint());
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const auto& stats_aggregator = ctx->stats_aggregator();
{
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
while (buffer_.empty() && !prefetch_thread_finished_ &&
buffer_limit() != 0) {
if (legacy_autotune_) {
auto_tuner_->RecordEmpty();
buffer_size_->value = auto_tuner_->buffer_limit();
}
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (!buffer_.empty()) {
return Consume(ctx, out_tensors, end_of_sequence);
}
if (prefetch_thread_finished_) {
*end_of_sequence = true;
return absl::OkStatus();
}
DCHECK_EQ(buffer_limit(), 0);
}
mutex_lock input_l(input_mu_);
{
mutex_lock l(*mu_);
if (stats_aggregator) {
stats_aggregator->AddScalar(
stats_utils::BufferSizeScalarName(dataset()->node_name()),
static_cast<float>(buffer_.size()), num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferCapacityScalarName(dataset()->node_name()),
static_cast<float>(buffer_limit()), num_elements());
}
}
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
double buffer_size_min = buffer_size_min_;
double buffer_size_max = std::numeric_limits<int64_t>::max();
if (buffer_size_->value != model::kAutotune && buffer_size_->value != 0) {
buffer_size_min = buffer_size_->value;
buffer_size_max = buffer_size_->value;
}
return model::MakeAsyncKnownRatioNode(
std::move(args),
1,
{model::MakeParameter(kBufferSize, buffer_size_, buffer_size_min,
buffer_size_max)},
legacy_autotune_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
mutex_lock input_l(input_mu_);
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBufferSize, buffer_.size()));
for (size_t i = 0; i < buffer_.size(); i++) {
auto& buffer_element = buffer_[i];
TF_RETURN_IF_ERROR(WriteStatus(writer, i, buffer_element.status));
if (buffer_element.status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, kSizeSuffix), buffer_element.value.size()));
for (size_t j = 0; j < buffer_element.value.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, "[", j, "]"), buffer_element.value[j]));
}
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
tsl::mutex_lock l(input_mu_);
return RestoreInput(ctx, reader, input_impl_);
}
mutex_lock input_l(input_mu_);
mutex_lock l(*mu_);
DCHECK(!prefetch_thread_);
DCHECK(buffer_.empty());
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
if (!ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(RestoreBuffer(ctx, reader));
}
if (ctx->warm_start()) {
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
}
cond_var_->notify_all();
return absl::OkStatus();
}
data::TraceMeMetadata GetTraceMeMetadata() const override {
int64_t limit = -1, size = -1;
data::TraceMeMetadata result;
if (mu_->try_lock()) {
limit = buffer_limit();
size = buffer_.size();
if (!buffer_.empty()) {
std::vector<std::string> shapes(buffer_.front().value.size());
for (const auto& component : buffer_.front().value) {
shapes.push_back(component.shape().DebugString());
}
result.push_back(std::make_pair("next_element_shapes",
absl::StrJoin(shapes, ",")));
}
mu_->unlock();
}
result.push_back(std::make_pair(
"buffer_limit",
limit == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(limit))));
result.push_back(std::make_pair(
"autotune",
dataset()->buffer_size_ == model::kAutotune ? "true" : "false"));
result.push_back(std::make_pair(
"autotune_mode", legacy_autotune_ ? "legacy" : "performance"));
if (dataset()->slack_period_ > 0) {
result.push_back(std::make_pair(
"slack",
strings::Printf("%lld", static_cast<long long>(slack_us_.load()))));
}
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct BufferElement {
explicit BufferElement(IteratorContext* ctx)
: uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
Status status;
std::vector<Tensor> value;
int64_t created_us;
const uint64 uid;
MemoryCheckpoint checkpoint;
};
Status RestoreBuffer(IteratorContext* const ctx,
IteratorStateReader* const reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
size_t buffer_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kBufferSize, &temp));
buffer_size = static_cast<size_t>(temp);
}
for (size_t i = 0; i < buffer_size; i++) {
buffer_.emplace_back(ctx);
auto& buffer_element = buffer_.back();
TF_RETURN_IF_ERROR(ReadStatus(reader, i, &buffer_element.status));
if (buffer_element.status.ok()) {
size_t value_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, kSizeSuffix), &temp));
value_size = static_cast<size_t>(temp);
}
buffer_element.value.reserve(value_size);
for (size_t j = 0; j < value_size; j++) {
buffer_element.value.emplace_back();
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, "[", j, "]"),
&buffer_element.value.back()));
}
}
RecordBufferEnqueue(ctx, buffer_element.value);
}
return absl::OkStatus();
}
int64_t buffer_limit() const TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (legacy_autotune_) {
return auto_tuner_->buffer_limit();
}
return buffer_size_->value;
}
void CancelThreads() TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
}
Status Consume(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const auto& stats_aggregator = ctx->stats_aggregator();
if (stats_aggregator) {
double buffer_limit_ = buffer_limit();
stats_aggregator->AddToHistogram(
stats_utils::BufferUtilizationHistogramName(dataset()->node_name()),
{static_cast<float>(buffer_.size()) /
static_cast<float>(buffer_limit_)},
num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferSizeScalarName(dataset()->node_name()),
static_cast<float>(buffer_.size()), num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferCapacityScalarName(dataset()->node_name()),
static_cast<float>(buffer_limit_), num_elements());
}
Status s = buffer_.front().status;
if (s.ok()) {
int64_t buffer_element_id = buffer_.front().uid;
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"PrefetchConsume", {{"element_id", buffer_element_id}});
},
profiler::kInfo);
if (dataset()->slack_period_ > 0 &&
(num_elements() + 1) % dataset()->slack_period_ == 0) {
int64_t slack_us = EnvTime::NowMicros() - buffer_.front().created_us;
slack_us_ = kSleepFactor * slack_us_ + slack_us;
VLOG(2) << "Setting slack_us_: " << slack_us_;
}
*out_tensors = std::move(buffer_.front().value);
ctx->MergeCheckpoint(&buffer_.front().checkpoint);
RecordBufferDequeue(ctx, *out_tensors);
if (legacy_autotune_ && !auto_tuner_->HasElementSize()) {
auto_tuner_->SetElementSize(GetAllocatedBytes(*out_tensors));
}
} else {
RecordBufferDequeue(ctx, buffer_.front().value);
}
if (legacy_autotune_) {
auto_tuner_->RecordConsumption(buffer_.size());
buffer_size_->value = auto_tuner_->buffer_limit();
}
buffer_.pop_front();
*end_of_sequence = false;
cond_var_->notify_all();
return s;
}
Status EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!prefetch_thread_) {
std::shared_ptr<IteratorContext> new_ctx =
std::make_shared<IteratorContext>(*ctx);
prefetch_thread_ = ctx->StartThread(
"tf_data_prefetch", [this, new_ctx]() { PrefetchThread(new_ctx); });
}
return absl::OkStatus();
}
void PrefetchThread(const std::shared_ptr<IteratorContext>& ctx) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
int num_produced = 0;
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && buffer_.size() >= buffer_limit()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
prefetch_thread_finished_ = true;
cond_var_->notify_all();
return;
}
}
if (dataset()->slack_period_ > 0 &&
num_produced % dataset()->slack_period_ == 0) {
VLOG(2) << "Sleeping for: " << slack_us_ * kSleepFactor;
ctx->env()->SleepForMicroseconds(slack_us_ * kSleepFactor);
}
mutex_lock input_l(input_mu_);
bool end_of_sequence = false;
BufferElement buffer_element(ctx.get());
{
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"PrefetchProduce", {{"element_id", buffer_element.uid}});
},
profiler::kInfo);
buffer_element.status = input_impl_->GetNext(
ctx.get(), &buffer_element.value, &end_of_sequence);
buffer_element.checkpoint.Merge(ctx->checkpoint());
}
if (buffer_element.status.ok() && end_of_sequence) {
mutex_lock l(*mu_);
prefetch_thread_finished_ = true;
cond_var_->notify_all();
return;
}
{
mutex_lock l(*mu_);
RecordBufferEnqueue(ctx.get(), buffer_element.value);
buffer_element.created_us = EnvTime::NowMicros();
buffer_.push_back(std::move(buffer_element));
cond_var_->notify_all();
}
++num_produced;
}
}
Status WriteStatus(IteratorStateWriter* writer, size_t index,
const Status& status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(absl::StrCat(prefix(), "::", index), CodeKey(),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
absl::StrCat(prefix(), "::", index), ErrorMessageKey(),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatus(IteratorStateReader* reader, size_t index, Status* status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(absl::StrCat(prefix(), "::", index),
CodeKey(), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", index),
ErrorMessageKey(), &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
string CodeKey() { return absl::StrCat(kStatus, kCodeSuffix); }
string ErrorMessageKey() {
return absl::StrCat(kStatus, kErrorMessageSuffix);
}
const std::shared_ptr<mutex> mu_;
mutex input_mu_ TF_ACQUIRED_BEFORE(*mu_);
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(input_mu_);
const std::shared_ptr<condition_variable> cond_var_;
const int64_t buffer_size_min_;
std::unique_ptr<PrefetchAutotuner> auto_tuner_ TF_GUARDED_BY(*mu_);
std::deque<BufferElement> buffer_ TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
bool prefetch_thread_finished_ TF_GUARDED_BY(*mu_) = false;
const bool legacy_autotune_;
std::atomic<int64_t> slack_us_;
const std::shared_ptr<model::SharedState> buffer_size_;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> prefetch_thread_ TF_GUARDED_BY(*mu_);
};
const DatasetBase* const input_;
const int64_t buffer_size_;
const int64_t slack_period_;
const bool legacy_autotune_ = true;
const int64_t buffer_size_min_ = 0;
absl::Status random_indexing_compatible_;
TraceMeMetadata traceme_metadata_;
};
PrefetchDatasetOp::PrefetchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kSlackPeriod)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSlackPeriod, &slack_period_));
}
if (ctx->HasAttr(kLegacyAutotune)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kLegacyAutotune, &legacy_autotune_));
}
if (ctx->HasAttr(kBufferSizeMin)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kBufferSizeMin, &buffer_size_min_));
}
if (GetExperiments().contains("autotune_buffer_optimization")) {
legacy_autotune_ = false;
buffer_size_min_ = std::max(static_cast<int64_t>(1), buffer_size_min_);
}
}
void PrefetchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t buffer_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(ctx, buffer_size >= 0 || buffer_size == model::kAutotune,
errors::InvalidArgument("buffer_size must be >= 0 or set "
"buffer_size to be ",
model::kAutotune, " for auto-tuning"));
if (buffer_size == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, buffer_size, slack_period_,
legacy_autotune_, buffer_size_min_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset").Device(DEVICE_CPU).Priority(2),
PrefetchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset")
.Device(DEVICE_GPU)
.HostMemory("buffer_size")
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
PrefetchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/prefetch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "prefetch_dataset";
class PrefetchDatasetOpTest : public DatasetOpsTestBase {};
class PrefetchDatasetParams : public DatasetParams {
public:
template <typename T>
PrefetchDatasetParams(T input_dataset_params, int64_t buffer_size,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int64_t slack_period, bool legacy_autotune,
int64_t buffer_size_min, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
buffer_size_(buffer_size),
slack_period_(slack_period),
legacy_autotune_(legacy_autotune),
buffer_size_min_(buffer_size_min) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {buffer_size_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(PrefetchDatasetOp::kInputDataset);
input_names->emplace_back(PrefetchDatasetOp::kBufferSize);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("slack_period", slack_period_);
attr_vector->emplace_back("legacy_autotune", legacy_autotune_);
attr_vector->emplace_back("buffer_size_min", buffer_size_min_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return PrefetchDatasetOp::kDatasetType;
}
private:
int64_t buffer_size_;
int64_t slack_period_;
bool legacy_autotune_;
int64_t buffer_size_min_;
};
PrefetchDatasetParams PrefetchDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
5,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
0,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
5,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
5,
false,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
3,
kNodeName);
}
PrefetchDatasetParams InvalidBufferSizePrefetchDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-2,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
std::vector<GetNextTestCase<PrefetchDatasetParams>> GetNextTestCases() {
return {
{PrefetchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{PrefetchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams3(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams5(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams6(),
CreateTensors<int64_t>(
TensorShape{1},
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})}};
}
ITERATOR_GET_NEXT_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
GetNextTestCases())
TEST_F(PrefetchDatasetOpTest, DatasetNodeName) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(PrefetchDatasetOpTest, DatasetTypeString) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(PrefetchDatasetOp::kDatasetType)));
}
TEST_F(PrefetchDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(PrefetchDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<PrefetchDatasetParams>> CardinalityTestCases() {
return {{PrefetchDatasetParams1(),
10},
{PrefetchDatasetParams2(),
10},
{PrefetchDatasetParams3(),
10},
{PrefetchDatasetParams4(),
10},
{PrefetchDatasetParams5(),
10}};
}
DATASET_CARDINALITY_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
CardinalityTestCases())
TEST_F(PrefetchDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(PrefetchDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(PrefetchDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
PrefetchDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<PrefetchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PrefetchDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{PrefetchDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams5(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1},
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(PrefetchDatasetOpTest, InvalidBufferSize) {
auto dataset_params = InvalidBufferSizePrefetchDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(), error::INVALID_ARGUMENT);
}
}
}
} |
1,567 | cpp | tensorflow/tensorflow | assert_next_dataset_op | tensorflow/core/kernels/data/experimental/assert_next_dataset_op.cc | tensorflow/core/kernels/data/experimental/assert_next_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_ASSERT_NEXT_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_ASSERT_NEXT_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class AssertNextDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "AssertNext";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kTransformations = "transformations";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit AssertNextDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h"
#include <map>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const AssertNextDatasetOp::kInputDataset;
constexpr const char* const AssertNextDatasetOp::kDatasetType;
constexpr const char* const AssertNextDatasetOp::kTransformations;
constexpr const char* const AssertNextDatasetOp::kOutputTypes;
constexpr const char* const AssertNextDatasetOp::kOutputShapes;
class AssertNextDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const std::vector<tstring>& transformations,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
transformations_(transformations),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* transformations_node = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, transformations_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
std::vector<string> tokens =
absl::StrSplit(prefix(), ':', absl::SkipEmpty());
if (dataset()->transformations_.size() > tokens.size() - 2) {
return errors::InvalidArgument(
"Asserted next ", dataset()->transformations_.size(),
" transformations but encountered only ", tokens.size() - 2, ".");
}
int n = tokens.size();
for (size_t i = 0; i < dataset()->transformations_.size(); ++i) {
if (!MatchesAnyVersion(dataset()->transformations_[i],
tokens[n - 2 - i])) {
return errors::InvalidArgument("Asserted transformation matching ",
dataset()->transformations_[i],
" at offset ", i, " but encountered ",
tokens[n - 2 - i],
" transformation instead.");
}
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
};
const DatasetBase* input_;
const std::vector<tstring> transformations_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
};
AssertNextDatasetOp::AssertNextDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void AssertNextDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::vector<tstring> transformations;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kTransformations,
&transformations));
*output =
new Dataset(ctx, input, transformations, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AssertNextDataset").Device(DEVICE_CPU),
AssertNextDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalAssertNextDataset").Device(DEVICE_CPU),
AssertNextDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "assert_next_dataset";
class AssertNextDatasetParams : public DatasetParams {
public:
template <typename T>
AssertNextDatasetParams(T input_dataset_params,
const std::vector<tstring>& transformations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
transformations_(transformations) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
int num_transformations = transformations_.size();
return {CreateTensor<tstring>(TensorShape({num_transformations}),
transformations_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() + 1);
input_names->emplace_back(AssertNextDatasetOp::kInputDataset);
input_names->emplace_back(AssertNextDatasetOp::kTransformations);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{AssertNextDatasetOp::kOutputShapes, output_shapes_},
{AssertNextDatasetOp::kOutputTypes, output_dtypes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return AssertNextDatasetOp::kDatasetType;
}
private:
std::vector<tstring> transformations_;
};
class AssertNextDatasetOpTest : public DatasetOpsTestBase {};
AssertNextDatasetParams AssertNextDatasetParams1() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams AssertNextDatasetParams2() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams InvalidAssertNextDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(std::move(take_dataset_params),
{"Whoops"},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams ShortAssertNextDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType, "Whoops"},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AssertNextDatasetParams>> GetNextTestCases() {
return {{AssertNextDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertNextDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_GET_NEXT_TEST_P(AssertNextDatasetOpTest, AssertNextDatasetParams,
GetNextTestCases())
TEST_F(AssertNextDatasetOpTest, DatasetNodeName) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertNextDatasetOpTest, DatasetTypeString) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(AssertNextDatasetOp::kDatasetType)));
}
TEST_F(AssertNextDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(AssertNextDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertNextDatasetOpTest, Cardinality) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(3));
}
TEST_F(AssertNextDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(AssertNextDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertNextDatasetOpTest, IteratorPrefix) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
AssertNextDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<AssertNextDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{AssertNextDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertNextDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(AssertNextDatasetOpTest,
AssertNextDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(AssertNextDatasetOpTest, InvalidArguments) {
auto dataset_params = InvalidAssertNextDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(AssertNextDatasetOpTest, ShortAssertNext) {
auto dataset_params = ShortAssertNextDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,568 | cpp | tensorflow/tensorflow | random_dataset_op | tensorflow/core/kernels/data/experimental/random_dataset_op.cc | tensorflow/core/kernels/data/experimental/random_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_RANDOM_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_RANDOM_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class RandomDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Random";
static constexpr const char* const kSeed = "seed";
static constexpr const char* const kSeed2 = "seed2";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kRerandomizeEachIteration =
"rerandomize_each_iteration";
explicit RandomDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
int32_t op_version_;
bool rerandomize_each_iteration_ = false;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/random_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const RandomDatasetOp::kDatasetType;
constexpr const char* const RandomDatasetOp::kSeed;
constexpr const char* const RandomDatasetOp::kSeed2;
constexpr const char* const RandomDatasetOp::kOutputTypes;
constexpr const char* const RandomDatasetOp::kOutputShapes;
constexpr const char* const
RandomDatasetOp::kRerandomizeEachIteration;
namespace {
constexpr char kRandomDatasetV1[] = "RandomDataset";
constexpr char kRandomDatasetV2[] = "RandomDatasetV2";
constexpr char kSeedGenerator[] = "SeedGenerator";
constexpr char kEpochNumRandomSamples[] = "epoch_num_random_samples";
constexpr char kNumRandomSamples[] = "num_random_samples";
}
class RandomDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, RandomSeeds&& seeds,
SeedGeneratorManager* manager, ResourceHandle&& resource_handle,
bool owns_resource, int op_version)
: DatasetBase(DatasetContext(ctx)),
seeds_(std::move(seeds)),
op_version_(op_version),
manager_(manager),
resource_handle_(resource_handle),
resource_mgr_(ctx->resource_manager()),
owns_resource_(owns_resource) {}
~Dataset() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s;
}
}
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(std::make_unique<IndexSplitProvider>(kint64max));
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, strings::StrCat(prefix, "::Random")},
manager_->get().get());
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_INT64});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(seeds_.input_seed(), seeds_.input_seed2());
return name_utils::DatasetDebugString(RandomDatasetOp::kDatasetType,
params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return kInfiniteCardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
if (op_version_ == 1) {
return b->AddDataset(this, {seed_node, seed2_node}, output);
}
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
AttrValue rerandomize_each_iteration;
b->BuildAttrValue(manager_->get()->reshuffle_each_iteration(),
&rerandomize_each_iteration);
return b->AddDataset(
this, {seed_node, seed2_node, resource_handle_node},
{std::make_pair(kRerandomizeEachIteration, rerandomize_each_iteration)},
output);
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
Iterator(const Params& params, SeedGenerator* seed_generator)
: DatasetIterator<Dataset>(params),
seed_generator_(seed_generator),
parent_generator_(seed_generator_->seed(), seed_generator_->seed2()),
generator_(&parent_generator_) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
out_tensors->reserve(1);
mutex_lock l(mu_);
out_tensors->emplace_back(ctx->allocator({}), DT_INT64, TensorShape({}));
out_tensors->back().scalar<int64_t>()() = Random();
*end_of_sequence = false;
return absl::OkStatus();
}
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kEpochNumRandomSamples),
seed_generator_->num_random_samples()));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kNumRandomSamples),
num_random_samples_));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kSeed), seed_));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kSeed2), seed2_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t num_random_samples;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kEpochNumRandomSamples),
&num_random_samples));
seed_generator_->set_num_random_samples(num_random_samples);
seed_generator_->Reset();
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kNumRandomSamples),
&num_random_samples_));
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kSeed), &seed_));
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kSeed2), &seed2_));
ResetRngs();
return absl::OkStatus();
}
protected:
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seed_, seed2_);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
private:
random::SingleSampleAdapter<random::PhiloxRandom>::ResultType Random()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
num_random_samples_++;
auto out = generator_();
return out;
}
mutex mu_;
SeedGenerator* const seed_generator_ TF_GUARDED_BY(mu_);
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
int64_t seed_ TF_GUARDED_BY(mu_) = 0;
int64_t seed2_ TF_GUARDED_BY(mu_) = 0;
};
private:
const RandomSeeds seeds_;
const int op_version_;
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const bool owns_resource_;
};
RandomDatasetOp::RandomDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kRandomDatasetV2) {
op_version_ = 2;
} else if (op_name == kRandomDatasetV1) {
op_version_ = 1;
}
if (ctx->HasAttr(kRerandomizeEachIteration)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRerandomizeEachIteration,
&rerandomize_each_iteration_));
}
}
void RandomDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, "seed", &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, "seed2", &seed2));
RandomSeeds seeds(seed, seed2);
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_",
resource_id_counter.fetch_add(1));
SeedGeneratorManager* manager = nullptr;
ResourceHandle handle;
bool owns_resource = true;
if (op_version_ == 2) {
OP_REQUIRES_OK(ctx, HandleFromInput(ctx, 2, &handle));
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
} else {
OP_REQUIRES_OK(ctx, s);
}
}
if (owns_resource) {
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[rerandomize = rerandomize_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (rerandomize) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGenerator>(ctx, container, name);
}
*output = new RandomDatasetOp::Dataset(ctx, std::move(seeds), manager,
std::move(handle), owns_resource,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name(kRandomDatasetV1).Device(DEVICE_CPU),
RandomDatasetOp);
REGISTER_KERNEL_BUILDER(Name(kRandomDatasetV2).Device(DEVICE_CPU),
RandomDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalRandomDataset").Device(DEVICE_CPU),
RandomDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/random_dataset_op.h"
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "random_dataset";
constexpr char kIteratorPrefix[] = "Iterator";
constexpr int kCount = 10;
void GenerateExpectedEpochData(int64_t seed, int64_t seed2, int count,
std::vector<Tensor>* epoch_data) {
auto parent_generator = random::PhiloxRandom(seed, seed2);
auto generator =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator);
for (int i = 0; i < count; ++i) {
epoch_data->push_back(
CreateTensor<int64_t>(TensorShape({}), {generator()}));
}
}
std::vector<Tensor> GenerateExpectedData(int64_t seed, int64_t seed2, int count,
bool rerandomize_each_iteration,
int iterations) {
RandomSeedGenerator parent_seed_generator(RandomSeeds(seed, seed2));
std::vector<Tensor> ret;
for (int j = 0; j < iterations; ++j) {
if (rerandomize_each_iteration) {
parent_seed_generator.GenerateSeeds(&seed, &seed2);
}
GenerateExpectedEpochData(seed, seed2, count, &ret);
}
return ret;
}
std::vector<Tensor> GenerateExpectedSaveAndRestoreData(
int64_t seed, int64_t seed2, int count, bool rerandomize_each_iteration) {
RandomSeedGenerator parent_seed_generator(RandomSeeds(seed, seed2));
if (rerandomize_each_iteration) {
parent_seed_generator.GenerateSeeds(&seed, &seed2);
parent_seed_generator.GenerateSeeds(&seed, &seed2);
}
std::vector<Tensor> ret;
GenerateExpectedEpochData(seed, seed2, count, &ret);
return ret;
}
class RandomDatasetParams : public DatasetParams {
public:
RandomDatasetParams(int64_t seed, int64_t seed2, int32_t op_version,
bool rerandomize_each_iteration,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
seed_(CreateTensor<int64_t>(TensorShape({}), {seed})),
seed2_(CreateTensor<int64_t>(TensorShape({}), {seed2})),
dummy_resource_handle_(CreateDummyResourceHandle()),
seed_generator_resource_(CreateTensor<ResourceHandle>(
TensorShape({}), {dummy_resource_handle_})),
rerandomize_each_iteration_(rerandomize_each_iteration) {
op_version_ = op_version;
}
ResourceHandle CreateDummyResourceHandle() { return ResourceHandle(); }
virtual std::vector<Tensor> GetInputTensors() const override {
return {seed_, seed2_, seed_generator_resource_};
}
virtual Status GetInputNames(
std::vector<string>* input_names) const override {
*input_names = {RandomDatasetOp::kSeed, RandomDatasetOp::kSeed2};
if (op_version_ == 2) {
input_names->emplace_back("seed_generator");
}
return absl::OkStatus();
}
virtual Status GetAttributes(AttributeVector* attributes) const override {
*attributes = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
if (op_version_ == 2) {
attributes->emplace_back("rerandomize_each_iteration",
rerandomize_each_iteration_);
}
return absl::OkStatus();
}
virtual string dataset_type() const override {
return RandomDatasetOp::kDatasetType;
}
private:
Tensor seed_;
Tensor seed2_;
ResourceHandle dummy_resource_handle_;
Tensor seed_generator_resource_;
bool rerandomize_each_iteration_;
};
class RandomDatasetOpTest : public DatasetOpsTestBase {};
RandomDatasetParams FortyTwo() {
return {42,
42,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed() {
return {1000,
42,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2() {
return {42,
1000,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams FortyTwoV2RerandomizeEachIterationFalse() {
return {42,
42,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeedV2RerandomizeEachIterationFalse() {
return {1000,
42,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2V2RerandomizeEachIterationFalse() {
return {42,
1000,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams FortyTwoV2RerandomizeEachIterationTrue() {
return {42,
42,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeedV2RerandomizeEachIterationTrue() {
return {1000,
42,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2V2RerandomizeEachIterationTrue() {
return {42,
1000,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
class ParameterizedGetNextTest : public RandomDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<RandomDatasetParams>> {};
std::vector<GetNextTestCase<RandomDatasetParams>> GetNextTestCases() {
return {{FortyTwo(),
GenerateExpectedData(
42, 42, kCount,
false,
2)},
{ChangeSeed(),
GenerateExpectedData(
1000, 42, kCount,
false,
2)},
{ChangeSeed2(),
GenerateExpectedData(
42, 1000, kCount,
false,
2)},
{FortyTwoV2RerandomizeEachIterationFalse(),
GenerateExpectedData(
42, 42, kCount,
false,
2)},
{ChangeSeedV2RerandomizeEachIterationFalse(),
GenerateExpectedData(
1000, 42, kCount,
false,
2)},
{ChangeSeed2V2RerandomizeEachIterationFalse(),
GenerateExpectedData(
42, 1000, kCount,
false,
2)},
{FortyTwoV2RerandomizeEachIterationTrue(),
GenerateExpectedData(
42, 42, kCount,
true, 2)},
{ChangeSeedV2RerandomizeEachIterationTrue(),
GenerateExpectedData(
1000, 42, kCount,
true, 2)},
{ChangeSeed2V2RerandomizeEachIterationTrue(),
GenerateExpectedData(
42, 1000, kCount,
true, 2)}};
}
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (out_tensors.size() < kCount) {
std::vector<Tensor> next;
TF_ASSERT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
while (out_tensors.size() < 2 * kCount) {
std::vector<Tensor> next;
TF_ASSERT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_ASSERT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(
RandomDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(
std::vector<GetNextTestCase<RandomDatasetParams>>(GetNextTestCases())));
std::vector<DatasetNodeNameTestCase<RandomDatasetParams>>
DatasetNodeNameTestCases() {
return {{FortyTwo(), kNodeName}};
}
DATASET_NODE_NAME_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetNodeNameTestCases());
std::vector<DatasetTypeStringTestCase<RandomDatasetParams>>
DatasetTypeStringTestCases() {
return {{FortyTwo(),
name_utils::OpName(
RandomDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetTypeStringTestCases());
std::vector<DatasetOutputDtypesTestCase<RandomDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{FortyTwo(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetOutputDtypesTestCases());
std::vector<DatasetOutputShapesTestCase<RandomDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FortyTwo(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetOutputShapesTestCases());
std::vector<CardinalityTestCase<RandomDatasetParams>> CardinalityTestCases() {
return {{FortyTwo(),
kInfiniteCardinality}};
}
DATASET_CARDINALITY_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
CardinalityTestCases());
std::vector<IteratorOutputDtypesTestCase<RandomDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{FortyTwo(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputDtypesTestCases());
std::vector<IteratorOutputShapesTestCase<RandomDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FortyTwo(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputShapesTestCases());
std::vector<IteratorPrefixTestCase<RandomDatasetParams>>
IteratorOutputPrefixTestCases() {
return {{FortyTwo(),
name_utils::IteratorPrefix(
RandomDatasetOp::kDatasetType, kIteratorPrefix)}};
}
ITERATOR_PREFIX_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputPrefixTestCases());
std::vector<IteratorSaveAndRestoreTestCase<RandomDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FortyTwo(), {2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
false)},
{FortyTwoV2RerandomizeEachIterationFalse(),
{2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
false)},
{FortyTwoV2RerandomizeEachIterationTrue(),
{2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
true)}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorSaveAndRestoreTestCases());
}
}
}
} |
1,569 | cpp | tensorflow/tensorflow | save_dataset_op | tensorflow/core/kernels/data/experimental/save_dataset_op.cc | tensorflow/core/kernels/data/experimental/save_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_SAVE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_SAVE_DATASET_OP_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/data/iterator_ops.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
namespace experimental {
class SaveDatasetOp : public HybridAsyncOpKernel {
public:
static constexpr const char* const kCompression = "compression";
static constexpr const char* const kPath = "path";
static constexpr const char* const kShardFunc = "shard_func";
static constexpr const char* const kShardFuncOtherArgs =
"shard_func_other_args";
static constexpr const char* const kUseShardFunc = "use_shard_func";
explicit SaveDatasetOp(OpKernelConstruction* ctx);
Status DoCompute(OpKernelContext* ctx) override;
private:
static constexpr const int kFileFormatVersion = 2;
Status ConsumeElement();
Status GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
int64_t* shard_index);
Status WriteData(OpKernelContext* ctx, DatasetBase* dataset,
std::unique_ptr<CapturedFunction> captured_func,
const std::string& run_dir, uint64* num_elements);
Status WriteMetadataFile(Env* env, const std::string& path, uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized);
bool use_shard_func_;
std::string compression_;
std::shared_ptr<FunctionMetadata> func_metadata_;
};
class SaveDatasetV2Op : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kPath = "path";
static constexpr const char* const kCompression = "compression";
static constexpr const char* const kDatasetType = "SaveV2";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kShardFunc = "shard_func";
static constexpr const char* const kShardFuncOtherArgs =
"shard_func_other_args";
static constexpr const char* const kUseShardFunc = "use_shard_func";
static constexpr const char* const kShardFuncTarguments = "Tshard_func_args";
explicit SaveDatasetV2Op(OpKernelConstruction* ctx);
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
static constexpr const int kFileFormatVersion = 2;
tstring path_;
std::string compression_;
std::unique_ptr<CapturedFunction> shard_func_;
bool use_shard_func_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
std::shared_ptr<FunctionMetadata> func_metadata_;
std::string writer_prefix_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/hash_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const SaveDatasetOp::kCompression;
constexpr const char* const SaveDatasetOp::kPath;
constexpr const char* const SaveDatasetOp::kShardFunc;
constexpr const char* const SaveDatasetOp::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetOp::kUseShardFunc;
constexpr const int SaveDatasetOp::kFileFormatVersion;
constexpr const char* const SaveDatasetV2Op::kInputDataset;
constexpr const char* const SaveDatasetV2Op::kPath;
constexpr const char* const SaveDatasetV2Op::kCompression;
constexpr const char* const SaveDatasetV2Op::kDatasetType;
constexpr const char* const SaveDatasetV2Op::kOutputTypes;
constexpr const char* const SaveDatasetV2Op::kOutputShapes;
constexpr const char* const SaveDatasetV2Op::kShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetV2Op::kUseShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncTarguments;
constexpr const int SaveDatasetV2Op::kFileFormatVersion;
SaveDatasetOp::SaveDatasetOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_save_dataset") {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
}
Status SaveDatasetOp::DoCompute(OpKernelContext* ctx) {
metrics::RecordTFDataFetchOp("SaveDatasetOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
TF_RETURN_IF_ERROR(ParseScalarArgument(ctx, kPath, &path));
auto run_id = random::New64();
auto run_dir = snapshot_util::RunDirectory(path, run_id);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir));
TF_RETURN_IF_ERROR(
WriteMetadataFile(ctx->env(), path, run_id, dataset->output_dtypes(),
0, false));
std::unique_ptr<CapturedFunction> captured_func;
TF_RETURN_IF_ERROR(CapturedFunction::Create(
ctx, func_metadata_, kShardFuncOtherArgs, &captured_func));
uint64 num_elements = 0;
TF_RETURN_IF_ERROR(WriteData(ctx, dataset, std::move(captured_func), run_dir,
&num_elements));
TF_RETURN_IF_ERROR(WriteMetadataFile(ctx->env(), path, run_id,
dataset->output_dtypes(), num_elements,
true));
return absl::OkStatus();
}
Status SaveDatasetOp::WriteData(OpKernelContext* ctx, DatasetBase* dataset,
std::unique_ptr<CapturedFunction> captured_func,
const std::string& run_dir,
uint64* num_elements) {
IteratorContext::Params params(ctx);
auto function_handle_cache =
std::make_unique<FunctionHandleCache>(params.flr);
params.function_handle_cache = function_handle_cache.get();
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func;
TF_RETURN_IF_ERROR(
captured_func->Instantiate(&iter_ctx, &instantiated_captured_func));
DatasetBase* finalized_dataset;
TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(
&iter_ctx, nullptr, "Save", &iterator));
mutex mu;
Status status;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers;
while (true) {
if (ctx->cancellation_manager()->IsCancelled()) {
return errors::Cancelled("Operation was cancelled");
}
std::vector<Tensor> element;
bool end_of_input;
TF_RETURN_IF_ERROR(iterator->GetNext(&iter_ctx, &element, &end_of_input));
if (end_of_input) {
break;
}
(*num_elements)++;
int64_t shard_index = -1;
TF_RETURN_IF_ERROR(GetShardIndex(
&iter_ctx, instantiated_captured_func.get(), element, &shard_index));
if (writers.count(shard_index) == 0) {
const auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir, shard_index);
auto writer_thread = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
0, compression_, kFileFormatVersion,
finalized_dataset->output_dtypes(), [&mu, &status](Status s) {
mutex_lock l(mu);
status.Update(s);
});
writers.insert({shard_index, std::move(writer_thread)});
}
writers[shard_index]->Write(element);
}
for (auto& writer : writers) {
writer.second->SignalEOF();
}
writers.clear();
return status;
}
Status SaveDatasetOp::GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
int64_t* shard_index) {
if (!use_shard_func_) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument("`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status SaveDatasetOp::WriteMetadataFile(Env* env, const std::string& path,
uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
class SaveDatasetV2Op::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, const tstring& path,
const std::string& compression,
std::unique_ptr<CapturedFunction> shard_func, bool use_shard_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
path_(path),
compression_(compression),
shard_func_(std::move(shard_func)),
use_shard_func_(use_shard_func) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* path_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(path_, &path_node));
std::vector<Node*> shard_func_other_args;
DataTypeVector shard_func_other_args_types;
TF_RETURN_IF_ERROR(shard_func_->AddToGraph(ctx, b, &shard_func_other_args,
&shard_func_other_args_types));
AttrValue compression_attr;
b->BuildAttrValue(compression_, &compression_attr);
AttrValue shard_func_attr;
b->BuildAttrValue(shard_func_->func(), &shard_func_attr);
AttrValue use_shard_func_attr;
b->BuildAttrValue(use_shard_func_, &use_shard_func_attr);
AttrValue shard_func_arguments_types_attr;
b->BuildAttrValue(shard_func_other_args_types,
&shard_func_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node), std::make_pair(1, path_node)},
{std::make_pair(2, shard_func_other_args)},
{std::make_pair(kCompression, compression_attr),
std::make_pair(kShardFunc, shard_func_attr),
std::make_pair(kUseShardFunc, use_shard_func_attr),
std::make_pair(kShardFuncTarguments, shard_func_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
static constexpr const char* const kIteratorName = "Writer";
static constexpr const char* const kRunId = "run_id";
static constexpr const char* const kCurrentCheckpointId =
"current_checkpoint_id";
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
writers_closed_(false),
run_id_(0),
current_checkpoint_id_(0) {}
~Iterator() override {
mutex_lock l(mu_);
SignalEOF(true);
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
dataset()->shard_func_->Instantiate(ctx, &instantiated_shard_func_));
if (!ctx->is_restoring()) {
run_id_ = random::New64();
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = false;
snapshot_util::AsyncWriter* current_writer;
{
std::vector<Tensor> output_tensors;
mutex_lock l(mu_);
{
mutex_lock wsl(writer_status_mu_);
if (!writer_status_.ok() || writers_closed_) {
*end_of_sequence = true;
return writer_status_;
}
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
SignalEOF(true);
{
mutex_lock wsl(writer_status_mu_);
TF_RETURN_IF_ERROR(writer_status_);
}
return WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
dataset()->Cardinality(), true);
}
(num_elements_)++;
int64_t shard_index = 0;
TF_RETURN_IF_ERROR(
GetShardIndex(ctx, instantiated_shard_func_.get(), *out_tensors,
dataset()->use_shard_func_, &shard_index));
if (writers_.count(shard_index) == 0) {
auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir_, shard_index);
auto writer = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
current_checkpoint_id_, dataset()->compression_,
kFileFormatVersion, dataset()->output_dtypes(), [this](Status s) {
if (!s.ok()) {
mutex_lock l(writer_status_mu_);
writer_status_ = s;
}
});
writers_.insert({shard_index, std::move(writer)});
}
current_writer = writers_[shard_index].get();
}
current_writer->Write(*out_tensors);
return absl::OkStatus();
}
protected:
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kRunId),
static_cast<int64_t>(run_id_)));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kCurrentCheckpointId),
static_cast<int64_t>(current_checkpoint_id_)));
SignalEOF(false);
writers_.clear();
current_checkpoint_id_++;
return SaveInput(ctx, writer, input_impl_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t run_id_signed;
int64_t current_checkpoint_id;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kRunId), &run_id_signed));
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kCurrentCheckpointId),
¤t_checkpoint_id));
run_id_ = static_cast<uint64>(run_id_signed);
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
current_checkpoint_id_ = static_cast<uint64>(current_checkpoint_id);
if (ctx->is_restoring()) {
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return RestoreInput(ctx, reader, input_impl_);
}
private:
Status GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
bool use_shard_func, int64_t* shard_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!use_shard_func) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument(
"`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status WriteMetadataFile(Env* env, const std::string& path, uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
void SignalEOF(bool mark_closed) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!writers_closed_) {
for (auto& writer : writers_) {
writer.second->SignalEOF();
}
writers_.clear();
writers_closed_ = mark_closed;
}
}
mutex mu_;
mutex writer_status_mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t num_elements_;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers_ TF_GUARDED_BY(mu_);
Status writer_status_ TF_GUARDED_BY(writer_status_mu_);
bool writers_closed_ TF_GUARDED_BY(mu_);
uint64 run_id_ TF_GUARDED_BY(mu_);
tstring run_dir_ TF_GUARDED_BY(mu_);
uint64 current_checkpoint_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_shard_func_
TF_GUARDED_BY(mu_);
};
const DatasetBase* input_;
const tstring path_;
const std::string compression_;
const std::unique_ptr<CapturedFunction> shard_func_;
const bool use_shard_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const std::shared_ptr<FunctionMetadata> func_metadata_;
const std::string writer_prefix_;
};
SaveDatasetV2Op::SaveDatasetV2Op(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
}
void SaveDatasetV2Op::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
DatasetBase* dataset;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kPath, &path));
std::unique_ptr<CapturedFunction> shard_func;
OP_REQUIRES_OK(
ctx, CapturedFunction::Create(ctx, func_metadata_, kShardFuncOtherArgs,
&shard_func));
*output = new Dataset(ctx, dataset, path, compression_, std::move(shard_func),
use_shard_func_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SaveDataset").Device(DEVICE_CPU), SaveDatasetOp);
REGISTER_KERNEL_BUILDER(Name("SaveDatasetV2").Device(DEVICE_CPU),
SaveDatasetV2Op);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr char kSaveDatasetV2NodeName[] = "save_dataset_v2";
class SaveDatasetV2Params : public DatasetParams {
public:
template <typename T>
SaveDatasetV2Params(T input_dataset_params, const tstring& path,
const std::string& compression,
FunctionDefHelper::AttrValueWrapper shard_func,
std::vector<FunctionDef> func_lib, bool use_shard_func,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name, DataTypeVector type_arguments)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
path_(path),
compression_(compression),
shard_func_(shard_func),
func_lib_(std::move(func_lib)),
use_shard_func_(use_shard_func),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors;
input_tensors.emplace_back(CreateTensor<tstring>(TensorShape({}), {path_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(SaveDatasetV2Op::kInputDataset);
input_names->emplace_back(SaveDatasetV2Op::kPath);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(SaveDatasetV2Op::kCompression, compression_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFunc, shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kUseShardFunc, use_shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFuncTarguments,
type_arguments_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputTypes, output_dtypes_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputShapes, output_shapes_);
return absl::OkStatus();
}
string path() const { return path_; }
string dataset_type() const override { return SaveDatasetV2Op::kDatasetType; }
string op_name() const override { return "SaveDatasetV2"; }
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::string path_;
std::string compression_;
FunctionDefHelper::AttrValueWrapper shard_func_;
std::vector<FunctionDef> func_lib_;
bool use_shard_func_;
DataTypeVector type_arguments_;
};
class SaveDatasetV2OpTest : public DatasetOpsTestBase {
public:
Status Initialize(const DatasetParams& dataset_params) {
TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params));
auto params = static_cast<const SaveDatasetV2Params&>(dataset_params);
save_filename_ = params.path();
return absl::OkStatus();
}
protected:
std::string save_filename_;
};
SaveDatasetV2Params SaveDatasetV2Params1() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 10, 2),
io::JoinPath(testing::TmpDir(), "save_data"),
"",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
false,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
SaveDatasetV2Params SaveDatasetV2Params2() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 5, 1),
io::JoinPath(testing::TmpDir(), "save_data"),
"GZIP",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
true,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
std::vector<GetNextTestCase<SaveDatasetV2Params>> GetNextTestCases() {
return {{
SaveDatasetV2Params1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedGetNextTest : public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
GetNextTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(SaveDatasetV2OpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(SaveDatasetV2OpTest, DatasetNodeName) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SaveDatasetV2OpTest, DatasetTypeString) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString("SaveDatasetV2"));
}
TEST_F(SaveDatasetV2OpTest, DatasetOutputDtypes) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
std::vector<DatasetOutputDtypesTestCase<SaveDatasetV2Params>>
DatasetOutputDtypesTestCases() {
return {{SaveDatasetV2Params1(),
{DT_INT64}},
{SaveDatasetV2Params2(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SaveDatasetV2Params>>
DatasetOutputShapesTestCases() {
return {{SaveDatasetV2Params1(),
{PartialTensorShape({})}},
{SaveDatasetV2Params2(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SaveDatasetV2Params>> CardinalityTestCases() {
return {{SaveDatasetV2Params1(),
5},
{SaveDatasetV2Params2(),
5}};
}
DATASET_CARDINALITY_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
CardinalityTestCases())
TEST_F(SaveDatasetV2OpTest, IteratorPrefix) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
SaveDatasetV2Op::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>>
IteratorSaveAndRestoreTestCases() {
return {{SaveDatasetV2Params1(),
{0, 2, 4, 6, 8},
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(SaveDatasetV2OpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} |
1,570 | cpp | tensorflow/tensorflow | map_and_batch_dataset_op | tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc | tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_MAP_AND_BATCH_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_MAP_AND_BATCH_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class MapAndBatchDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "MapAndBatch";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kBatchSize = "batch_size";
static constexpr const char* const kNumParallelCalls = "num_parallel_calls";
static constexpr const char* const kDropRemainder = "drop_remainder";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kPreserveCardinality =
"preserve_cardinality";
explicit MapAndBatchDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
bool preserve_cardinality_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.h"
#include <atomic>
#include <functional>
#include <utility>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/inplace_ops_functor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const MapAndBatchDatasetOp::kDatasetType;
constexpr const char* const MapAndBatchDatasetOp::kInputDataset;
constexpr const char* const MapAndBatchDatasetOp::kOtherArguments;
constexpr const char* const MapAndBatchDatasetOp::kBatchSize;
constexpr const char* const
MapAndBatchDatasetOp::kNumParallelCalls;
constexpr const char* const MapAndBatchDatasetOp::kDropRemainder;
constexpr const char* const MapAndBatchDatasetOp::kFunc;
constexpr const char* const MapAndBatchDatasetOp::kTarguments;
constexpr const char* const MapAndBatchDatasetOp::kOutputTypes;
constexpr const char* const MapAndBatchDatasetOp::kOutputShapes;
constexpr const char* const
MapAndBatchDatasetOp::kPreserveCardinality;
namespace {
constexpr int64_t kMaxBatchResults = 16;
constexpr char kParallelism[] = "parallelism";
constexpr char kCallCounter[] = "call_counter";
constexpr char kBatchResultsSize[] = "batch_results_size";
constexpr char kTFDataMapAndBatch[] = "tf_data_map_and_batch";
constexpr char kBatchResults[] = "batch_results";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kNumCalls[] = "num_calls";
constexpr char kNumElements[] = "num_elements";
constexpr char kOutputAllocated[] = "output_allocated";
constexpr char kStatus[] = "status";
inline int64_t CeilDiv(int64_t x, int64_t y) { return (x + y - 1) / y; }
}
class MapAndBatchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t batch_size,
int64_t num_parallel_calls, bool drop_remainder,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality)
: DatasetBase(DatasetContext(ctx)),
input_(input),
batch_size_(batch_size),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
output_types_(output_types),
output_shapes_(output_shapes),
captured_func_(std::move(captured_func)),
preserve_cardinality_(preserve_cardinality),
traceme_metadata_(
{{"autotune",
num_parallel_calls == model::kAutotune ? "true" : "false"},
{"batch_size",
strings::Printf("%lld", static_cast<long long>(batch_size))},
{"drop_remainder", drop_remainder ? "true" : "false"}}) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (!preserve_cardinality_) {
return kUnknownCardinality;
}
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* batch_size_node;
TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size_node));
Node* num_parallel_calls_node;
TF_RETURN_IF_ERROR(
b->AddScalar(num_parallel_calls_, &num_parallel_calls_node));
Node* drop_remainder_node;
TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
AttrValue preserve_cardinality_attr;
b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node),
std::make_pair(2, batch_size_node),
std::make_pair(3, num_parallel_calls_node),
std::make_pair(4, drop_remainder_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f),
std::make_pair(kTarguments, other_arguments_types_attr),
std::make_pair(kPreserveCardinality,
preserve_cardinality_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)) {
max_batch_results_ = std::min(
kMaxBatchResults,
CeilDiv(params.dataset->num_parallel_calls_ == model::kAutotune
? GetCpuBudget()
: params.dataset->num_parallel_calls_,
params.dataset->batch_size_));
}
~Iterator() override {
CancelThreads(true);
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
IteratorContext iter_ctx(params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&iter_ctx, this, prefix(), &input_impl_));
ctx->MergeCheckpoint(iter_ctx.checkpoint());
TF_RETURN_IF_ERROR(dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_));
if (ctx->warm_start() && !ctx->is_restoring()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<BatchResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (!cancelled_ && (batch_results_.empty() ||
batch_results_.front()->num_calls > 0)) {
++waiting_;
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
--waiting_;
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
std::swap(result, batch_results_.front());
batch_results_.pop_front();
cond_var_->notify_all();
}
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("MapAndBatchConsume",
{{"element_id", result->uid}});
});
auto cleanup = gtl::MakeCleanup([result] { result->output.clear(); });
mutex_lock l(result->mu);
if (result->output_allocated) {
RecordBufferDequeue(ctx, result->output);
}
ctx->MergeCheckpoint(&result->checkpoint);
TF_RETURN_IF_ERROR(
ProcessBatch(dataset()->batch_size_, result->num_elements,
dataset()->drop_remainder_, result->status, ctx,
out_tensors, end_of_sequence, &result->output));
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncKnownRatioNode(
std::move(args), dataset()->batch_size_,
{model::MakeParameter(kParallelism, num_parallel_calls_, 1,
ctx->runner_threadpool_size())});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
if (ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCallCounter, 0));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kBatchResultsSize, 0));
return absl::OkStatus();
}
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
DCHECK_EQ(num_calls_, 0);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCallCounter, call_counter_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kBatchResultsSize,
batch_results_.size()));
for (size_t i = 0; i < batch_results_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteBatchResult(writer, i));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
DCHECK(!runner_thread_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCallCounter, &call_counter_));
int64_t batch_results_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBatchResultsSize, &batch_results_size));
DCHECK(batch_results_.empty());
for (int i = 0; i < batch_results_size; ++i) {
TF_RETURN_IF_ERROR(ReadBatchResult(ctx, reader, i));
}
if (ctx->warm_start()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
int64_t max_batch_results = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
max_batch_results = max_batch_results_;
mu_->unlock();
}
auto result = dataset()->traceme_metadata_;
result.push_back(std::make_pair(
"max_batch_results",
strings::Printf("%lld", static_cast<long long>(max_batch_results))));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct BatchResult {
explicit BatchResult(int64_t batch_size, IteratorContext* ctx)
: end_of_input(false),
num_elements(0),
output_allocated(false),
status(absl::OkStatus()),
status_offset(-1),
num_calls(batch_size),
checkpoint(MemoryCheckpoint{ctx->id_registry()}),
uid(tensorflow::EnvTime::NowNanos()) {}
void UpdateStatus(const Status& s, int64_t offset) {
if (TF_PREDICT_FALSE(!s.ok())) {
mutex_lock l(mu);
if (status.ok() || offset < status_offset) {
status = s;
status_offset = offset;
}
}
}
mutex mu;
bool end_of_input TF_GUARDED_BY(mu);
int64_t num_elements TF_GUARDED_BY(mu);
std::vector<Tensor> output;
bool output_allocated TF_GUARDED_BY(mu);
Status status TF_GUARDED_BY(mu);
int64_t status_offset TF_GUARDED_BY(mu);
int64_t num_calls TF_GUARDED_BY(&Iterator::mu_);
MemoryCheckpoint checkpoint TF_GUARDED_BY(mu);
const uint64 uid = -1;
};
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<BatchResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->num_calls--;
const auto& stats_aggregator = ctx->stats_aggregator();
if (stats_aggregator) {
stats_aggregator->AddScalar(
stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),
static_cast<float>(num_calls_) /
static_cast<float>(num_parallel_calls_->value),
num_elements());
}
cond_var_->notify_all();
}
void CallFunction(std::shared_ptr<IteratorContext> ctx,
const std::shared_ptr<BatchResult>& result,
int64_t offset) TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("MapAndBatchProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
bool end_of_input = false;
Status status =
input_impl_->GetNext(ctx.get(), &input_element, &end_of_input);
bool return_early;
{
mutex_lock l(result->mu);
result->checkpoint.Merge(ctx->checkpoint());
result->end_of_input = result->end_of_input || end_of_input;
result->status.Update(status);
return_early = result->end_of_input || !result->status.ok();
}
if (return_early) {
CallCompleted(ctx, result);
return;
}
std::shared_ptr<std::vector<Tensor>> return_values =
std::make_shared<std::vector<Tensor>>();
auto done = [this, ctx, result, return_values, offset](Status status) {
if (dataset()->preserve_cardinality_ && errors::IsOutOfRange(status)) {
status = errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
status.message());
}
result->UpdateStatus(status, offset);
if (status.ok()) {
Status allocate_status =
EnsureOutputAllocated(ctx, result, return_values);
if (!allocate_status.ok()) {
result->UpdateStatus(allocate_status, offset);
} else {
for (size_t i = 0; i < return_values->size(); ++i) {
Tensor& tensor = return_values->at(i);
Tensor* batch = &(result->output)[i];
if (tensor.NumElements() !=
(batch->NumElements() / batch->dim_size(0))) {
TensorShape batch_shape = batch->shape();
batch_shape.RemoveDim(0);
result->UpdateStatus(
errors::InvalidArgument(
"Cannot add tensor to the batch: number of elements "
"does not match. Shapes are: [tensor]: ",
tensor.shape().DebugString(),
", [batch]: ", batch_shape.DebugString()),
offset);
break;
}
Status copy_status = batch_util::CopyElementToSlice(
std::move(tensor), batch, offset);
if (!copy_status.ok()) {
result->UpdateStatus(copy_status, offset);
break;
}
}
}
{
mutex_lock l(result->mu);
result->num_elements++;
}
}
CallCompleted(ctx, result);
};
instantiated_captured_func_->RunAsync(ctx.get(), std::move(input_element),
return_values.get(),
std::move(done), model_node());
}
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto new_ctx = std::make_shared<IteratorContext>(*ctx);
runner_thread_ =
ctx->StartThread(kTFDataMapAndBatch,
std::bind(&Iterator::RunnerThread, this, new_ctx));
}
}
Status EnsureOutputAllocated(
const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<BatchResult>& result,
const std::shared_ptr<std::vector<Tensor>>& return_values) {
mutex_lock l(result->mu);
if (result->output_allocated) {
return absl::OkStatus();
}
const size_t num_components = return_values->size();
result->output.reserve(num_components);
for (size_t i = 0; i < num_components; ++i) {
TensorShape component_shape({dataset()->batch_size_});
component_shape.AppendShape(return_values->at(i).shape());
AllocatorAttributes attr;
attr.set_gpu_compatible(true);
result->output.emplace_back(ctx->allocator(attr),
return_values->at(i).dtype(),
component_shape);
if (!result->output.back().IsInitialized()) {
return errors::ResourceExhausted(
"Failed to allocate memory for the batch of component ", i);
}
}
RecordBufferEnqueue(ctx.get(), result->output);
result->output_allocated = true;
return absl::OkStatus();
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
std::vector<std::pair<std::shared_ptr<BatchResult>, int64_t>> new_calls;
RecordStart(ctx.get());
auto stop_cleanup =
gtl::MakeCleanup([this, &ctx]() { RecordStop(ctx.get()); });
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
(batch_results_.size() > max_batch_results_ ||
(batch_results_.size() == max_batch_results_ &&
call_counter_ % dataset()->batch_size_ == 0));
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
if (waiting_ > 0 && num_calls_ < num_parallel_calls_->value &&
max_batch_results_ < kMaxBatchResults) {
max_batch_results_++;
continue;
}
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
if (call_counter_ % dataset()->batch_size_ == 0) {
batch_results_.push_back(std::make_shared<BatchResult>(
dataset()->batch_size_, ctx.get()));
}
int64_t offset = call_counter_++ % dataset()->batch_size_;
new_calls.emplace_back(batch_results_.back(), offset);
num_calls_++;
}
}
const auto& stats_aggregator = ctx->stats_aggregator();
if (stats_aggregator) {
mutex_lock l(*mu_);
stats_aggregator->AddScalar(
stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),
static_cast<float>(num_calls_) /
static_cast<float>(num_parallel_calls_->value),
num_elements());
}
for (const auto& call : new_calls) {
CallFunction(ctx, call.first, call.second);
}
new_calls.clear();
}
}
Status ReadBatchResult(IteratorContext* ctx, IteratorStateReader* reader,
size_t index) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
batch_results_.push_back(
std::make_shared<BatchResult>(dataset()->batch_size_, ctx));
std::shared_ptr<BatchResult> result = batch_results_.back();
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
result->end_of_input = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumCalls),
&result->num_calls));
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
&result->num_elements));
result->output_allocated = reader->Contains(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated));
TF_RETURN_IF_ERROR(ReadBatch(ctx, reader, dataset()->batch_size_,
prefix(), batch_prefix, &result->output));
TF_RETURN_IF_ERROR(ReadStatus(prefix(),
strings::StrCat(batch_prefix, "_", kStatus),
reader, &result->status));
if (result->output_allocated) {
RecordBufferEnqueue(ctx, result->output);
}
return absl::OkStatus();
}
Status WriteBatchResult(IteratorStateWriter* writer, size_t index)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
std::shared_ptr<BatchResult> result = batch_results_[index];
string batch_prefix = strings::StrCat(kBatchResults, "_", index);
mutex_lock l(result->mu);
if (result->end_of_input) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kEndOfInput), ""));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumCalls),
result->num_calls));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kNumElements),
result->num_elements));
if (result->output_allocated) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(batch_prefix, "_", kOutputAllocated),
""));
}
TF_RETURN_IF_ERROR(WriteBatch(dataset()->batch_size_,
result->num_elements, prefix(),
batch_prefix, writer, &result->output));
TF_RETURN_IF_ERROR(
WriteStatus(prefix(), strings::StrCat(batch_prefix, "_", kStatus),
result->status, writer));
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
std::unique_ptr<CancellationManager> cancellation_manager_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
int64_t call_counter_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<BatchResult>> batch_results_ TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
int64_t waiting_ TF_GUARDED_BY(*mu_) = 0;
int64_t max_batch_results_ TF_GUARDED_BY(*mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::function<void()> deregister_fn_; | #include "tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "map_and_batch_dataset";
class MapAndBatchDatasetParams : public DatasetParams {
public:
template <typename T>
MapAndBatchDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int64_t batch_size, int64_t num_parallel_calls, bool drop_remainder,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
bool preserve_cardinality, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
batch_size_(batch_size),
num_parallel_calls_(num_parallel_calls),
drop_remainder_(drop_remainder),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)),
preserve_cardinality_(preserve_cardinality) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> inputs = other_arguments_;
inputs.emplace_back(CreateTensor<int64_t>(TensorShape({}), {batch_size_}));
inputs.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
inputs.emplace_back(CreateTensor<bool>(TensorShape({}), {drop_remainder_}));
return inputs;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size() + 3);
input_names->emplace_back(MapAndBatchDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(MapAndBatchDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(MapAndBatchDatasetOp::kBatchSize);
input_names->emplace_back(MapAndBatchDatasetOp::kNumParallelCalls);
input_names->emplace_back(MapAndBatchDatasetOp::kDropRemainder);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"preserve_cardinality", preserve_cardinality_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override {
return MapAndBatchDatasetOp::kDatasetType;
}
private:
std::vector<Tensor> other_arguments_;
int64_t batch_size_;
int64_t num_parallel_calls_;
bool drop_remainder_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
bool preserve_cardinality_;
};
class MapAndBatchDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MapFunc(const string& func_name,
const DataType& dtype) {
return FunctionDefHelper::FunctionRef(func_name, {{"T", dtype}});
}
MapAndBatchDatasetParams MapAndBatchDatasetParams1() {
return MapAndBatchDatasetParams(RangeDatasetParams(0, 10, 2),
{},
2,
1,
true,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams2() {
return MapAndBatchDatasetParams(RangeDatasetParams(0, 10, 2),
{},
2,
2,
true,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
true,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams3() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
3,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
true,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams4() {
return MapAndBatchDatasetParams(RangeDatasetParams(0, 10, 2),
{},
2,
4,
true,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams5() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
model::kAutotune,
true,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
true,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams MapAndBatchDatasetParams6() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
4,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams InvalidNumParallelCallsMapAndBatchDatasetParams() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
2,
-4,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
MapAndBatchDatasetParams InvalidBatchSizeMapAndBatchDatasetParams() {
return MapAndBatchDatasetParams(
RangeDatasetParams(0, 10, 2),
{},
-2,
2,
false,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
false,
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
std::vector<GetNextTestCase<MapAndBatchDatasetParams>> GetNextTestCases() {
return {{MapAndBatchDatasetParams1(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams2(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams3(),
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}},
{MapAndBatchDatasetParams4(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams5(),
CreateTensors<int64_t>(TensorShape({2}), {{0, 8}, {16, 24}})},
{MapAndBatchDatasetParams6(),
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}}};
}
ITERATOR_GET_NEXT_TEST_P(MapAndBatchDatasetOpTest, MapAndBatchDatasetParams,
GetNextTestCases())
TEST_F(MapAndBatchDatasetOpTest, DatasetNodeName) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(MapAndBatchDatasetOpTest, DatasetTypeString) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(MapAndBatchDatasetOp::kDatasetType)));
}
TEST_F(MapAndBatchDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<MapAndBatchDatasetParams>>
DatasetOutputShapesTestCases() {
return {{MapAndBatchDatasetParams1(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams2(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams3(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams4(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams5(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams6(),
{PartialTensorShape({2})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(MapAndBatchDatasetOpTest, MapAndBatchDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<MapAndBatchDatasetParams>>
CardinalityTestCases() {
return {{MapAndBatchDatasetParams1(),
kUnknownCardinality},
{MapAndBatchDatasetParams2(),
2},
{MapAndBatchDatasetParams3(),
3},
{MapAndBatchDatasetParams4(),
kUnknownCardinality},
{MapAndBatchDatasetParams5(),
2},
{MapAndBatchDatasetParams6(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(MapAndBatchDatasetOpTest, MapAndBatchDatasetParams,
CardinalityTestCases())
TEST_F(MapAndBatchDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<MapAndBatchDatasetParams>>
IteratorOutputShapesTestCases() {
return {{MapAndBatchDatasetParams1(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams2(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams3(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams4(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams5(),
{PartialTensorShape({2})}},
{MapAndBatchDatasetParams6(),
{PartialTensorShape({2})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(MapAndBatchDatasetOpTest,
MapAndBatchDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(MapAndBatchDatasetOpTest, IteratorPrefix) {
auto dataset_params = MapAndBatchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
MapAndBatchDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<MapAndBatchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{MapAndBatchDatasetParams1(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams2(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams3(),
{0, 1, 4},
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}},
{MapAndBatchDatasetParams4(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 4}, {8, 12}})},
{MapAndBatchDatasetParams5(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape({2}), {{0, 8}, {16, 24}})},
{MapAndBatchDatasetParams6(),
{0, 1, 4},
{CreateTensor<int64_t>(TensorShape({2}), {0, 8}),
CreateTensor<int64_t>(TensorShape({2}), {16, 24}),
CreateTensor<int64_t>(TensorShape({1}), {32})}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(MapAndBatchDatasetOpTest,
MapAndBatchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(MapAndBatchDatasetOpTest, InvalidBatchSize) {
auto dataset_params = InvalidBatchSizeMapAndBatchDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(MapAndBatchDatasetOpTest, InvalidNumParallel) {
auto dataset_params = InvalidNumParallelCallsMapAndBatchDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,571 | cpp | tensorflow/tensorflow | sampling_dataset_op | tensorflow/core/kernels/data/experimental/sampling_dataset_op.cc | tensorflow/core/kernels/data/experimental/sampling_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_SAMPLING_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_SAMPLING_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class SamplingDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Sampling";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kRate = "rate";
static constexpr const char* const kSeed = "seed";
static constexpr const char* const kSeed2 = "seed2";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit SamplingDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/sampling_dataset_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/simple_philox.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const SamplingDatasetOp::kDatasetType;
constexpr const char* const SamplingDatasetOp::kInputDataset;
constexpr const char* const SamplingDatasetOp::kRate;
constexpr const char* const SamplingDatasetOp::kSeed;
constexpr const char* const SamplingDatasetOp::kSeed2;
constexpr const char* const SamplingDatasetOp::kOutputTypes;
constexpr const char* const SamplingDatasetOp::kOutputShapes;
class SamplingDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, float rate, int64_t seed, int64_t seed2,
const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)),
rate_(rate),
seeds_(seed, seed2),
input_(input) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::unique_ptr<IteratorBase>(
new Iterator({this, name_utils::IteratorPrefix(kDatasetType, prefix)},
seeds_.first, seeds_.second));
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* rate = nullptr;
Node* seed = nullptr;
Node* seed2 = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(rate_, &rate));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.first, &seed));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.second, &seed2));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, rate, seed, seed2}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params, int64_t seed, int64_t seed2)
: DatasetIterator<Dataset>(params),
seeds_(MaybeOverrideSeeds({seed, seed2})),
parent_generator_(seeds_.first, seeds_.second),
generator_(&parent_generator_) {}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
bool rand_val_hit;
do {
{
tf_shared_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
}
if (*end_of_sequence) {
mutex_lock l(mu_);
input_impl_.reset();
return absl::OkStatus();
}
float rand_val = Random();
rand_val_hit = rand_val < dataset()->rate_;
if (!rand_val_hit) {
out_tensors->clear();
}
} while (!rand_val_hit);
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seeds_.first, seeds_.second);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
this->full_name("num_random_samples"), num_random_samples_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(this->full_name("seed"), seeds_.first));
TF_RETURN_IF_ERROR(
writer->WriteScalar(this->full_name("seed2"), seeds_.second));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name("input_impl_empty"), ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->full_name("num_random_samples"), &num_random_samples_));
int64_t seed;
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name("seed"), &seed));
int64_t seed2;
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name("seed2"), &seed2));
seeds_ = {seed, seed2};
ResetRngs();
if (!reader->Contains(full_name("input_impl_empty"))) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
mutex mu_;
std::pair<int64_t, int64_t> seeds_ TF_GUARDED_BY(mu_);
private:
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
float Random() {
mutex_lock l(mu_);
num_random_samples_++;
uint32 random_uint = generator_();
return random::Uint32ToFloat(random_uint);
}
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
};
const float rate_;
const std::pair<int64_t, int64_t> seeds_;
const DatasetBase* const input_;
};
SamplingDatasetOp::SamplingDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void SamplingDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
float rate;
int64_t seed;
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<float>(ctx, kRate, &rate));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
*output = new Dataset(ctx, rate, seed, seed2, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SamplingDataset").Device(DEVICE_CPU),
SamplingDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/sampling_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "sampling_dataset";
constexpr int64_t kRandomSeed = 42;
constexpr int64_t kRandomSeed2 = 7;
class SamplingDatasetParams : public DatasetParams {
public:
template <typename T>
SamplingDatasetParams(T input_dataset_params, float rate,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
rate_(rate) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
Tensor rate = CreateTensor<float>(TensorShape({}), {rate_});
Tensor seed_tensor = CreateTensor<int64_t>(TensorShape({}), {seed_tensor_});
Tensor seed2_tensor =
CreateTensor<int64_t>(TensorShape({}), {seed2_tensor_});
return {rate, seed_tensor, seed2_tensor};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {SamplingDatasetOp::kInputDataset, SamplingDatasetOp::kRate,
SamplingDatasetOp::kSeed, SamplingDatasetOp::kSeed2};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{SamplingDatasetOp::kOutputTypes, output_dtypes_},
{SamplingDatasetOp::kOutputShapes, output_shapes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return SamplingDatasetOp::kDatasetType;
}
private:
float rate_;
int64_t seed_tensor_ = kRandomSeed;
int64_t seed2_tensor_ = kRandomSeed2;
};
class SamplingDatasetOpTest : public DatasetOpsTestBase {};
SamplingDatasetParams OneHundredPercentSampleParams() {
return SamplingDatasetParams(RangeDatasetParams(0, 3, 1),
1.0,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
SamplingDatasetParams TenPercentSampleParams() {
return SamplingDatasetParams(RangeDatasetParams(0, 20, 1),
0.1,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
SamplingDatasetParams ZeroPercentSampleParams() {
return SamplingDatasetParams(RangeDatasetParams(0, 20, 1),
0.0,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<SamplingDatasetParams>> GetNextTestCases() {
return {
{OneHundredPercentSampleParams(),
CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}})},
{TenPercentSampleParams(),
CreateTensors<int64_t>(TensorShape({}),
{{9}, {11}, {19}})},
{ZeroPercentSampleParams(), {}}};
}
ITERATOR_GET_NEXT_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
GetNextTestCases())
std::vector<DatasetNodeNameTestCase<SamplingDatasetParams>>
DatasetNodeNameTestCases() {
return {{TenPercentSampleParams(),
kNodeName}};
}
DATASET_NODE_NAME_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetNodeNameTestCases())
std::vector<DatasetTypeStringTestCase<SamplingDatasetParams>>
DatasetTypeStringTestCases() {
return {{TenPercentSampleParams(),
name_utils::OpName(
SamplingDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetTypeStringTestCases())
std::vector<DatasetOutputDtypesTestCase<SamplingDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{TenPercentSampleParams(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SamplingDatasetParams>>
DatasetOutputShapesTestCases() {
return {{TenPercentSampleParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SamplingDatasetParams>> CardinalityTestCases() {
return {{OneHundredPercentSampleParams(),
kUnknownCardinality},
{TenPercentSampleParams(),
kUnknownCardinality},
{ZeroPercentSampleParams(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<SamplingDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{TenPercentSampleParams(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<SamplingDatasetParams>>
IteratorOutputShapesTestCases() {
return {{TenPercentSampleParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<SamplingDatasetParams>>
IteratorOutputPrefixTestCases() {
return {{TenPercentSampleParams(),
name_utils::IteratorPrefix(
SamplingDatasetOp::kDatasetType,
TenPercentSampleParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorOutputPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<SamplingDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{OneHundredPercentSampleParams(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{TenPercentSampleParams(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{9}, {11}, {19}})},
{ZeroPercentSampleParams(),
{0, 2, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(SamplingDatasetOpTest, SamplingDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
}
} |
1,572 | cpp | tensorflow/tensorflow | assert_prev_dataset_op | tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.cc | tensorflow/core/kernels/data/experimental/assert_prev_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_ASSERT_PREV_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_ASSERT_PREV_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class AssertPrevDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr char kDatasetType[] = "AssertPrev";
static constexpr char kInputDataset[] = "input_dataset";
static constexpr char kTransformations[] = "transformations";
static constexpr char kOutputTypes[] = "output_types";
static constexpr char kOutputShapes[] = "output_shapes";
explicit AssertPrevDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.h"
#include <map>
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr char AssertPrevDatasetOp::kInputDataset[];
constexpr char AssertPrevDatasetOp::kDatasetType[];
constexpr char AssertPrevDatasetOp::kTransformations[];
constexpr char AssertPrevDatasetOp::kOutputTypes[];
constexpr char AssertPrevDatasetOp::kOutputShapes[];
namespace {
absl::StatusOr<NameAttrList> GetAssertions(const tstring& transformation) {
NameAttrList assertions;
if (!std::is_base_of<protobuf::Message, NameAttrList>()) {
return errors::InvalidArgument(
"Portable proto implementations are not supported.");
}
if (!protobuf::TextFormat::ParseFromString(
transformation, reinterpret_cast<protobuf::Message*>(&assertions))) {
return errors::InvalidArgument("Couldn't parse transformation '",
transformation, "'.");
}
return assertions;
}
absl::StatusOr<const DatasetBase*> GetPreviousDataset(
const DatasetBase& dataset) {
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset.InputDatasets(&inputs));
if (inputs.empty()) {
return errors::InvalidArgument("No previous transformation found.");
}
return inputs.back();
}
Status CheckOpName(const DatasetBase& dataset, const NameAttrList& assertions) {
if (!MatchesAnyVersion(assertions.name(), dataset.type_string())) {
return errors::InvalidArgument("Asserted transformation matching '",
assertions.name(), "', but found '",
dataset.type_string(), "'.");
}
return absl::OkStatus();
}
absl::StatusOr<NodeDef> GetDatasetNode(const DatasetBase& dataset,
absl::string_view op_name) {
SerializationContext serialization_ctx((SerializationContext::Params()));
GraphDefBuilder b;
GraphDef graph_def;
TF_RETURN_IF_ERROR(
AsGraphDef(&dataset, std::move(serialization_ctx), &graph_def));
TF_ASSIGN_OR_RETURN(NodeDef node, GetDatasetNodeDef(graph_def));
return node;
}
Status CheckAttributes(const DatasetBase& dataset,
const NameAttrList& assertions) {
if (assertions.attr().empty()) return absl::OkStatus();
TF_ASSIGN_OR_RETURN(NodeDef node, GetDatasetNode(dataset, assertions.name()));
std::vector<std::string> attrs_not_found;
for (const auto& attr : assertions.attr()) {
auto it = node.attr().find(attr.first);
if (it != node.attr().end()) {
if (!std::is_base_of<protobuf::Message, AttrValue>()) {
return errors::InvalidArgument(
"Portable proto implementations are not supported.");
}
if (!protobuf::util::MessageDifferencer::Equivalent(
*reinterpret_cast<const protobuf::Message*>(&it->second),
*reinterpret_cast<const protobuf::Message*>(&attr.second))) {
return errors::InvalidArgument(
"Asserted attribute '", attr.first, "' having a value of '",
attr.second.DebugString(), "', but found value of '",
it->second.DebugString(), "'.");
}
} else {
return errors::InvalidArgument(
"Asserted attribute '", attr.first, "' having a value of '",
attr.second.DebugString(), "', but found no such attribute defined.");
}
}
return absl::OkStatus();
}
Status CheckTransformation(const DatasetBase& dataset,
const tstring& transformation) {
TF_ASSIGN_OR_RETURN(NameAttrList assertions, GetAssertions(transformation));
TF_RETURN_IF_ERROR(CheckOpName(dataset, assertions));
TF_RETURN_IF_ERROR(CheckAttributes(dataset, assertions));
return absl::OkStatus();
}
}
class AssertPrevDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const std::vector<tstring>& transformations,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
transformations_(transformations),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* transformations_node = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, transformations_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
const DatasetBase* current_dataset = dataset();
for (int i = 0; i < dataset()->transformations_.size(); ++i) {
absl::StatusOr<const DatasetBase*> previous_dataset =
GetPreviousDataset(*current_dataset);
if (!previous_dataset.ok()) {
return errors::InvalidArgument(
"Asserted previous ", dataset()->transformations_.size(),
" transformations but encountered only ", i, ".");
}
Status s = CheckTransformation(**previous_dataset,
dataset()->transformations_[i]);
if (!s.ok()) {
return errors::InvalidArgument(
"Failure checking transformations at offset ", i, ": ",
s.message());
}
current_dataset = *previous_dataset;
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
};
const DatasetBase* input_;
const std::vector<tstring> transformations_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
};
AssertPrevDatasetOp::AssertPrevDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void AssertPrevDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::vector<tstring> transformations;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kTransformations,
&transformations));
*output =
new Dataset(ctx, input, transformations, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AssertPrevDataset").Device(DEVICE_CPU),
AssertPrevDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "assert_prev_dataset";
std::string GetTransformation(
absl::string_view name,
std::initializer_list<std::pair<std::string, bool>> attrs = {}) {
NameAttrList message;
message.set_name(absl::StrCat(name, "Dataset"));
for (const auto& attr : attrs) {
AttrValue value;
value.set_b(attr.second);
message.mutable_attr()->insert({attr.first, value});
}
std::string output;
protobuf::TextFormat::PrintToString(message, &output);
return output;
}
class AssertPrevDatasetParams : public DatasetParams {
public:
template <typename T>
AssertPrevDatasetParams(T input_dataset_params,
const std::vector<tstring>& transformations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
transformations_(transformations) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
int num_transformations = transformations_.size();
return {CreateTensor<tstring>(TensorShape({num_transformations}),
transformations_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() + 1);
input_names->emplace_back(AssertPrevDatasetOp::kInputDataset);
input_names->emplace_back(AssertPrevDatasetOp::kTransformations);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{AssertPrevDatasetOp::kOutputShapes, output_shapes_},
{AssertPrevDatasetOp::kOutputTypes, output_dtypes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return AssertPrevDatasetOp::kDatasetType;
}
private:
std::vector<tstring> transformations_;
};
class AssertPrevDatasetOpTest : public DatasetOpsTestBase {};
AssertPrevDatasetParams AssertPrevDatasetParams1() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType)},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams AssertPrevDatasetParams2() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(RangeDatasetOp::kDatasetType)},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams AssertPrevDatasetParams2WithAttrs() {
TakeDatasetParams take_dataset_params = TakeDatasetParams(
TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset"),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(TensorSliceDatasetOp::kDatasetType,
{{"is_files", false}})},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams InvalidAssertPrevDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation("Whoops")},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams ShortAssertPrevDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(RangeDatasetOp::kDatasetType),
GetTransformation("Whoops")},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AssertPrevDatasetParams>> GetNextTestCases() {
return {{AssertPrevDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertPrevDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_GET_NEXT_TEST_P(AssertPrevDatasetOpTest, AssertPrevDatasetParams,
GetNextTestCases())
TEST_F(AssertPrevDatasetOpTest, DatasetNodeName) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertPrevDatasetOpTest, DatasetAttrs) {
auto dataset_params = AssertPrevDatasetParams2WithAttrs();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertPrevDatasetOpTest, DatasetTypeString) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(AssertPrevDatasetOp::kDatasetType)));
}
TEST_F(AssertPrevDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(AssertPrevDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertPrevDatasetOpTest, Cardinality) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(3));
}
TEST_F(AssertPrevDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(AssertPrevDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertPrevDatasetOpTest, IteratorPrefix) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
AssertPrevDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<AssertPrevDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{AssertPrevDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertPrevDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(AssertPrevDatasetOpTest,
AssertPrevDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(AssertPrevDatasetOpTest, InvalidArguments) {
auto dataset_params = InvalidAssertPrevDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(AssertPrevDatasetOpTest, ShortAssertPrev) {
auto dataset_params = ShortAssertPrevDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,573 | cpp | tensorflow/tensorflow | auto_shard_dataset_op | tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.cc | tensorflow/core/kernels/data/experimental/auto_shard_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_AUTO_SHARD_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_AUTO_SHARD_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class AutoShardDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "AutoShard";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kNumWorkers = "num_workers";
static constexpr const char* const kIndex = "index";
static constexpr const char* const kAutoShardPolicy = "auto_shard_policy";
static constexpr const char* const kNumReplicas = "num_replicas";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit AutoShardDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
static RewriterConfig CreateConfig(int64_t num_workers, int64_t index,
int64_t auto_shard_policy,
int64_t num_replicas);
int64_t auto_shard_policy_;
int64_t num_replicas_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const AutoShardDatasetOp::kAutoShardPolicy;
constexpr const char* const AutoShardDatasetOp::kDatasetType;
constexpr const char* const AutoShardDatasetOp::kInputDataset;
constexpr const char* const AutoShardDatasetOp::kNumWorkers;
constexpr const char* const AutoShardDatasetOp::kNumReplicas;
constexpr const char* const AutoShardDatasetOp::kIndex;
constexpr const char* const AutoShardDatasetOp::kOutputTypes;
constexpr const char* const AutoShardDatasetOp::kOutputShapes;
constexpr char kOptimizerName[] = "tf_auto_shard";
AutoShardDatasetOp::AutoShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), auto_shard_policy_(0) {
if (ctx->HasAttr(kAutoShardPolicy)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kAutoShardPolicy, &auto_shard_policy_));
}
if (ctx->HasAttr(kNumReplicas)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kNumReplicas, &num_replicas_));
}
}
void AutoShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index, num_workers, auto_shard_policy, num_replicas;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kNumWorkers, &num_workers));
OP_REQUIRES(
ctx, num_workers > 0,
errors::InvalidArgument("num_workers must be greater than zero."));
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kIndex, &index));
OP_REQUIRES(
ctx, index >= 0 && index < num_workers,
errors::InvalidArgument("index must be between 0 and ", num_workers - 1));
auto_shard_policy = auto_shard_policy_;
if (input->options().distribute_options().auto_shard_policy() !=
AutoShardPolicy::AUTO) {
auto_shard_policy =
input->options().distribute_options().auto_shard_policy();
}
num_replicas = num_replicas_;
auto config_factory = [num_workers, index, auto_shard_policy,
num_replicas]() {
return CreateConfig(num_workers, index, auto_shard_policy, num_replicas);
};
core::RefCountPtr<DatasetBase> rewritten;
OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten));
*output = rewritten.release();
}
RewriterConfig AutoShardDatasetOp::CreateConfig(int64_t num_workers,
int64_t index,
int64_t auto_shard_policy,
int64_t num_replicas) {
RewriterConfig rewriter_config;
rewriter_config.set_fail_on_optimizer_errors(true);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.add_optimizers(kOptimizerName);
auto custom_optimizer = rewriter_config.add_custom_optimizers();
custom_optimizer->set_name(kOptimizerName);
const std::array<std::pair<const char* const, int64_t>, 4> attr_pairs = {
{{kNumWorkers, num_workers},
{kIndex, index},
{kAutoShardPolicy, auto_shard_policy},
{kNumReplicas, num_replicas}}};
for (const auto& pair : attr_pairs) {
AttrValue attr;
attr.set_i(pair.second);
(*custom_optimizer->mutable_parameter_map())[pair.first] = attr;
}
return rewriter_config;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AutoShardDataset").Device(DEVICE_CPU),
AutoShardDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalAutoShardDataset").Device(DEVICE_CPU),
AutoShardDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include <string>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "auto_shard_dataset";
class AutoShardDatasetParams : public DatasetParams {
public:
template <typename T>
AutoShardDatasetParams(T input_dataset_params, int64_t num_workers,
int64_t index, int auto_shard_policy,
int64_t num_replicas, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_workers_(num_workers),
num_replicas_(num_replicas),
index_(index),
auto_shard_policy_(auto_shard_policy) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_workers_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(AutoShardDatasetOp::kInputDataset);
input_names->emplace_back(AutoShardDatasetOp::kNumWorkers);
input_names->emplace_back(AutoShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(AutoShardDatasetOp::kAutoShardPolicy,
auto_shard_policy_);
attr_vector->emplace_back(AutoShardDatasetOp::kNumReplicas, num_replicas_);
attr_vector->emplace_back(AutoShardDatasetOp::kOutputTypes, output_dtypes_);
attr_vector->emplace_back(AutoShardDatasetOp::kOutputShapes,
output_shapes_);
return absl::OkStatus();
}
string dataset_type() const override {
return AutoShardDatasetOp::kDatasetType;
}
private:
int64_t num_workers_;
int64_t num_replicas_;
int64_t index_;
int auto_shard_policy_;
};
class AutoShardDatasetOpTest : public DatasetOpsTestBase {};
AutoShardDatasetParams AutoShardDatasetParams1() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams2() {
return AutoShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams3() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
0,
4,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams4() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams5() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams6() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AutoShardDatasetParams AutoShardDatasetParams7() {
return AutoShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
0,
5,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AutoShardDatasetParams>> GetNextTestCases() {
return {
{AutoShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{AutoShardDatasetParams2(),
{}},
{AutoShardDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})}};
}
ITERATOR_GET_NEXT_TEST_P(AutoShardDatasetOpTest, AutoShardDatasetParams,
GetNextTestCases())
TEST_F(AutoShardDatasetOpTest, InvalidArguments) {
std::vector<AutoShardDatasetParams> invalid_dataset_params = {
AutoShardDatasetParams4(), AutoShardDatasetParams5(),
AutoShardDatasetParams6(), AutoShardDatasetParams7()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
REGISTER_OP("AutoShardDatasetOpTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"));
static void add_identity_nodes(Node* node, Graph& graph,
std::vector<Node*>& identity_nodes) {
for (int i = 0; i < node->num_outputs(); i++) {
Node* new_node;
std::string name = absl::StrCat("Identity", i);
TF_EXPECT_OK(NodeBuilder(name, "Identity")
.Attr("T", node->output_type(i))
.Input(node, i)
.Finalize(&graph, &new_node));
identity_nodes.push_back(new_node);
}
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST_F(AutoShardDatasetOpTest, AutoShardDatasetTypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* num_workers;
Node* index;
Node* auto_shard_dataset;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_workers", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &num_workers));
TF_EXPECT_OK(NodeBuilder("index", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &index));
TF_EXPECT_OK(NodeBuilder("AutoShardDataset", "AutoShardDataset")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(num_workers)
.Input(index)
.Finalize(&graph, &auto_shard_dataset));
std::vector<Node*> identity_nodes;
add_identity_nodes(auto_shard_dataset, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
TEST_F(AutoShardDatasetOpTest, RebatchDatasetTypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* num_replicas;
Node* rebatch_dataset;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_replicas", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &num_replicas));
TF_EXPECT_OK(NodeBuilder("RebatchDataset", "RebatchDataset")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(num_replicas)
.Finalize(&graph, &rebatch_dataset));
std::vector<Node*> identity_nodes;
add_identity_nodes(rebatch_dataset, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
TEST_F(AutoShardDatasetOpTest, RebatchDatasetV2TypeInference) {
Graph graph(OpRegistry::Global());
Node* input_dataset;
Node* batch_sizes;
Node* drop_remainder;
Node* rebatch_dataset_v2;
FullTypeDef input_dataset_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_RAGGED
args { type_id: TFT_STRING }
}
}
})pb",
&input_dataset_t));
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_dataset", "Const")
.Attr("value", tensor_proto)
.Attr("dtype", DT_VARIANT)
.Finalize(&graph, &input_dataset));
(*input_dataset->mutable_def()->mutable_experimental_type()) =
input_dataset_t;
TF_EXPECT_OK(
NodeBuilder("num_replicas", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_INT64)
.Finalize(&graph, &batch_sizes));
TF_EXPECT_OK(
NodeBuilder("drop_remainder", "AutoShardDatasetOpTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_BOOL)
.Finalize(&graph, &drop_remainder));
TF_EXPECT_OK(NodeBuilder("RebatchDatasetV2", "RebatchDatasetV2")
.Attr("output_types", {DT_VARIANT})
.Attr("output_shapes", {TensorShape({1})})
.Input(input_dataset)
.Input(batch_sizes)
.Input(drop_remainder)
.Finalize(&graph, &rebatch_dataset_v2));
std::vector<Node*> identity_nodes;
add_identity_nodes(rebatch_dataset_v2, graph, identity_nodes);
TF_EXPECT_OK(type_inference(graph));
EXPECT_TRUE(full_type::IsEqual(identity_nodes[0]->def().experimental_type(),
input_dataset_t))
<< "fulltype is\n"
<< identity_nodes[0]->def().experimental_type().DebugString()
<< "\nexpected\n"
<< input_dataset_t.DebugString();
}
}
}
}
} |
1,574 | cpp | tensorflow/tensorflow | directed_interleave_dataset_op | tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op.cc | tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_DIRECTED_INTERLEAVE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_DIRECTED_INTERLEAVE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class DirectedInterleaveDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "DirectedInterleave";
static constexpr const char* const kSelectorInputDataset =
"selector_input_dataset";
static constexpr const char* const kDataInputDatasets = "data_input_datasets";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kNumInputDatasets = "N";
static constexpr const char* const kStopOnEmptyDataset =
"stop_on_empty_dataset";
explicit DirectedInterleaveDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
bool stop_on_empty_dataset_ = false;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const
DirectedInterleaveDatasetOp::kDatasetType;
constexpr const char* const
DirectedInterleaveDatasetOp::kSelectorInputDataset;
constexpr const char* const
DirectedInterleaveDatasetOp::kDataInputDatasets;
constexpr const char* const
DirectedInterleaveDatasetOp::kStopOnEmptyDataset;
constexpr const char* const
DirectedInterleaveDatasetOp::kOutputTypes;
constexpr const char* const
DirectedInterleaveDatasetOp::kOutputShapes;
constexpr const char* const
DirectedInterleaveDatasetOp::kNumInputDatasets;
constexpr char kCycleLength[] = "cycle_length";
constexpr char kDataInputImplEmpty[] = "data_input_impl_empty";
constexpr char kSelectorInputImplEmpty[] = "selector_input_impl_empty";
class DirectedInterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* selector_input,
std::vector<DatasetBase*> data_inputs, bool stop_on_empty_dataset)
: DatasetBase(DatasetContext(ctx)),
selector_input_(selector_input),
data_inputs_(std::move(data_inputs)),
stop_on_empty_dataset_(stop_on_empty_dataset) {
selector_input_->Ref();
output_shapes_ = data_inputs_[0]->output_shapes();
data_inputs_[0]->Ref();
for (size_t i = 1; i < data_inputs_.size(); ++i) {
const DatasetBase* data_input = data_inputs_[i];
data_input->Ref();
for (size_t j = 0; j < output_shapes_.size(); ++j) {
output_shapes_[j] = MostSpecificCompatibleShape(
output_shapes_[j], data_input->output_shapes()[j]);
}
}
}
~Dataset() override {
selector_input_->Unref();
for (DatasetBase* data_input : data_inputs_) {
data_input->Unref();
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return data_inputs_[0]->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
for (const auto& input : data_inputs_) {
int64_t n = input->Cardinality(options);
if (n == kInfiniteCardinality) {
return n;
}
}
return kUnknownCardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(selector_input_);
for (const auto& data_input : data_inputs_) {
inputs->push_back(data_input);
}
return absl::OkStatus();
}
Status CheckExternalState() const override {
for (const auto& input : data_inputs_) {
TF_RETURN_IF_ERROR(input->CheckExternalState());
}
return selector_input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* selector_input_node;
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, selector_input_, &selector_input_node));
std::vector<Node*> data_input_nodes(data_inputs_.size());
for (size_t i = 0; i < data_inputs_.size(); ++i) {
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, data_inputs_[i], &data_input_nodes[i]));
}
AttrValue stop_on_empty_dataset_attr;
b->BuildAttrValue(stop_on_empty_dataset_, &stop_on_empty_dataset_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{{0, selector_input_node}},
{{1, data_input_nodes}},
{std::make_pair(kStopOnEmptyDataset, stop_on_empty_dataset_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
num_active_inputs_(params.dataset->data_inputs_.size()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
TF_RETURN_IF_ERROR(dataset()->selector_input_->MakeIterator(
&input_contexts_[0], this, prefix(), &selector_input_impl_));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
data_input_impls_.resize(dataset()->data_inputs_.size());
for (size_t i = 0; i < data_input_impls_.size(); ++i) {
const DatasetBase* data_input = dataset()->data_inputs_[i];
TF_RETURN_IF_ERROR(data_input->MakeIterator(
&input_contexts_[i + 1], this,
strings::StrCat(prefix(), "[", i, "]"), &data_input_impls_[i]));
ctx->MergeCheckpoint(input_contexts_[i + 1].checkpoint());
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!selector_input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (true) {
std::vector<Tensor> selector_result;
*end_of_sequence = false;
TF_RETURN_IF_ERROR(selector_input_impl_->GetNext(
&input_contexts_[0], &selector_result, end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
if (*end_of_sequence) {
ResetInputs();
return absl::OkStatus();
}
int64_t selected_input = selector_result[0].scalar<int64_t>()();
if (selected_input < 0 || selected_input >= data_input_impls_.size()) {
return errors::InvalidArgument(
"Selector index out of range: ", selected_input,
" >= ", data_input_impls_.size());
}
if (data_input_impls_[selected_input]) {
bool end_of_selected_input = false;
TF_RETURN_IF_ERROR(data_input_impls_[selected_input]->GetNext(
&input_contexts_[selected_input + 1], out_tensors,
&end_of_selected_input));
ctx->MergeCheckpoint(
input_contexts_[selected_input + 1].checkpoint());
if (!end_of_selected_input) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(data_input_impls_[selected_input]->prefix());
if (dataset()->stop_on_empty_dataset_) {
*end_of_sequence = true;
ResetInputs();
return absl::OkStatus();
}
data_input_impls_[selected_input].reset();
--num_active_inputs_;
if (num_active_inputs_ == 0) {
selector_input_impl_.reset();
*end_of_sequence = true;
return absl::OkStatus();
}
}
VLOG(2) << "DirectedInterleave selected an exhausted input: "
<< selected_input;
}
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args),
{model::MakeNonTunableParameter(kCycleLength, 1)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kSelectorInputImplEmpty),
static_cast<int64_t>(!selector_input_impl_)));
if (selector_input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, selector_input_impl_));
}
for (size_t i = 0; i < data_input_impls_.size(); ++i) {
const auto& data_input_impl = data_input_impls_[i];
TF_RETURN_IF_ERROR(writer->WriteScalar(
full_name(strings::StrCat(kDataInputImplEmpty, "[", i, "]")),
static_cast<int64_t>(!data_input_impl)));
if (data_input_impl) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, data_input_impl));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kSelectorInputImplEmpty), &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, selector_input_impl_));
} else {
selector_input_impl_.reset();
}
for (size_t i = 0; i < data_input_impls_.size(); ++i) {
TF_RETURN_IF_ERROR(reader->ReadScalar(
full_name(strings::StrCat(kDataInputImplEmpty, "[", i, "]")),
&input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, data_input_impls_[i]));
} else {
data_input_impls_[i].reset();
}
}
return absl::OkStatus();
}
private:
void ResetInputs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
selector_input_impl_.reset();
for (auto& data_input_impl : data_input_impls_) {
data_input_impl.reset();
}
num_active_inputs_ = 0;
}
mutex mu_;
std::vector<IteratorContext> input_contexts_;
std::unique_ptr<IteratorBase> selector_input_impl_ TF_GUARDED_BY(mu_);
std::vector<std::unique_ptr<IteratorBase>> data_input_impls_
TF_GUARDED_BY(mu_);
int64_t num_active_inputs_ TF_GUARDED_BY(mu_);
};
static PartialTensorShape MostSpecificCompatibleShape(
const PartialTensorShape& ts1, const PartialTensorShape& ts2) {
PartialTensorShape output_tensorshape;
if (ts1.dims() != ts2.dims() || ts1.unknown_rank() || ts2.unknown_rank())
return output_tensorshape;
auto dims1 = ts1.dim_sizes();
auto dims2 = ts2.dim_sizes();
for (int d = 0; d < ts1.dims(); ++d) {
if (dims1[d] == dims2[d])
output_tensorshape.Concatenate(dims1[d]);
else
output_tensorshape.Concatenate(-1);
}
return output_tensorshape;
}
const DatasetBase* const selector_input_;
const std::vector<DatasetBase*> data_inputs_;
std::vector<PartialTensorShape> output_shapes_;
const bool stop_on_empty_dataset_;
};
DirectedInterleaveDatasetOp::DirectedInterleaveDatasetOp(
OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
if (ctx->HasAttr(kStopOnEmptyDataset)) {
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kStopOnEmptyDataset, &stop_on_empty_dataset_));
}
}
void DirectedInterleaveDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
DatasetBase* selector_input;
OP_REQUIRES_OK(ctx,
GetDatasetFromVariantTensor(ctx->input(0), &selector_input));
OP_REQUIRES(
ctx,
selector_input->output_dtypes().size() == 1 &&
selector_input->output_dtypes()[0] == DT_INT64 &&
selector_input->output_shapes().size() == 1 &&
selector_input->output_shapes()[0].IsCompatibleWith(
PartialTensorShape({})),
errors::InvalidArgument(
"The selector input must be a dataset of scalar int64 elements."));
std::vector<DatasetBase*> data_inputs;
for (size_t i = 1; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
data_inputs.push_back(input);
OP_REQUIRES(ctx, data_inputs[0]->output_dtypes() == input->output_dtypes(),
errors::InvalidArgument(
"All inputs must have the same output_dtypes. First input "
"has types ",
DataTypeVectorString(data_inputs[0]->output_dtypes()),
", and input ", i - 1, " has types ",
DataTypeVectorString(input->output_dtypes())));
}
*output = new Dataset(ctx, selector_input, std::move(data_inputs),
stop_on_empty_dataset_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("DirectedInterleaveDataset").Device(DEVICE_CPU),
DirectedInterleaveDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalDirectedInterleaveDataset").Device(DEVICE_CPU),
DirectedInterleaveDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/directed_interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "directed_interleave_dataset";
class DirectedInterleaveDatasetParams : public DatasetParams {
public:
template <typename S, typename T>
DirectedInterleaveDatasetParams(S selector_input_dataset_params,
std::vector<T> input_dataset_params_vec,
bool stop_on_empty_dataset,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int num_input_datasets, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
stop_on_empty_dataset_(stop_on_empty_dataset),
num_input_datasets_(input_dataset_params_vec.size()) {
input_dataset_params_.push_back(
std::make_unique<S>(selector_input_dataset_params));
for (auto input_dataset_params : input_dataset_params_vec) {
input_dataset_params_.push_back(
std::make_unique<T>(input_dataset_params));
}
if (!input_dataset_params_vec.empty()) {
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params_vec[0].dataset_type(),
input_dataset_params_vec[0].iterator_prefix());
}
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(
DirectedInterleaveDatasetOp::kSelectorInputDataset);
for (int i = 0; i < num_input_datasets_; ++i) {
input_names->emplace_back(absl::StrCat(
DirectedInterleaveDatasetOp::kDataInputDatasets, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kOutputTypes,
output_dtypes_);
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kOutputShapes,
output_shapes_);
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kNumInputDatasets,
num_input_datasets_);
attr_vector->emplace_back(DirectedInterleaveDatasetOp::kStopOnEmptyDataset,
stop_on_empty_dataset_);
return absl::OkStatus();
}
string dataset_type() const override {
return DirectedInterleaveDatasetOp::kDatasetType;
}
private:
bool stop_on_empty_dataset_;
int32 num_input_datasets_;
};
class DirectedInterleaveDatasetOpTest : public DatasetOpsTestBase {};
DirectedInterleaveDatasetParams AlternateInputsParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams SelectExhaustedInputParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 2, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams OneInputDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 0})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 6, 1)},
false,
{DT_INT64},
{PartialTensorShape({})},
1,
kNodeName);
}
DirectedInterleaveDatasetParams ZeroInputDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 0})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{},
false,
{DT_INT64},
{PartialTensorShape({})},
0,
kNodeName);
}
DirectedInterleaveDatasetParams StopOnEmptyDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 50, 1)},
true,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams SkipEmptyDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 50, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams EmptyInputDatasetParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 0, 0, 0, 0, 0})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 0, 1),
RangeDatasetParams(10, 50, 1)},
true,
{DT_INT64},
{PartialTensorShape({})},
0,
kNodeName);
}
DirectedInterleaveDatasetParams LargeNumInputDatasetsParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
5,
kNodeName);
}
DirectedInterleaveDatasetParams SmallNumInputDatasetsParams() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
1,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidSelectorOuputDataType() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int32>(TensorShape{6}, {0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidSelectorOuputShape() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6, 1},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidSelectorValues() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{2, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
DirectedInterleaveDatasetParams InvalidInputDatasetsDataType() {
auto selector_input_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{6},
{0, 1, 0, 1, 0, 1})},
"tensor_slice");
return DirectedInterleaveDatasetParams(
selector_input_dataset_params,
std::vector<RangeDatasetParams>{
RangeDatasetParams(0, 3, 1, {DT_INT32}),
RangeDatasetParams(10, 13, 1, {DT_INT64})},
false,
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
std::vector<GetNextTestCase<DirectedInterleaveDatasetParams>>
GetNextTestCases() {
return {{AlternateInputsParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {2}, {12}})}},
{SelectExhaustedInputParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {12}})}},
{OneInputDatasetParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {1}, {2}, {3}, {4}, {5}})}},
{StopOnEmptyDatasetParams(),
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}})}},
{SkipEmptyDatasetParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {1}, {2}, {10}})}},
{EmptyInputDatasetParams(),
{CreateTensors<int64_t>(TensorShape({}), {})}},
{LargeNumInputDatasetsParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {2}, {12}})}},
{SmallNumInputDatasetsParams(),
{CreateTensors<int64_t>(
TensorShape({}), {{0}, {10}, {1}, {11}, {2}, {12}})}}};
}
ITERATOR_GET_NEXT_TEST_P(DirectedInterleaveDatasetOpTest,
DirectedInterleaveDatasetParams, GetNextTestCases())
TEST_F(DirectedInterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(DirectedInterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(DirectedInterleaveDatasetOp::kDatasetType)));
}
TEST_F(DirectedInterleaveDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(DirectedInterleaveDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(DirectedInterleaveDatasetOpTest, Cardinality) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(DirectedInterleaveDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(DirectedInterleaveDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(DirectedInterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = AlternateInputsParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(DirectedInterleaveDatasetOp::kDatasetType,
dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<DirectedInterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{AlternateInputsParams(),
{0, 5, 8},
CreateTensors<int64_t>(TensorShape{}, {{0}, {10}, {1}, {11}, {2}, {12}}),
true},
{SelectExhaustedInputParams(),
{0, 4, 8},
CreateTensors<int64_t>(TensorShape{}, {{0}, {10}, {1}, {11}, {12}}),
true},
{OneInputDatasetParams(),
{0, 5, 8},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}})}},
{StopOnEmptyDatasetParams(),
{0, 2, 4},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}},
{SkipEmptyDatasetParams(),
{0, 2, 4},
{CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {10}})}},
{EmptyInputDatasetParams(),
{0, 2, 4},
{CreateTensors<int64_t>(TensorShape({}), {})}},
{LargeNumInputDatasetsParams(),
{0, 5, 8},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {10}, {1}, {11}, {2}, {12}})}},
{SmallNumInputDatasetsParams(),
{0, 5, 8},
{CreateTensors<int64_t>(TensorShape({}),
{{0}, {10}, {1}, {11}, {2}, {12}})}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(DirectedInterleaveDatasetOpTest,
DirectedInterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(DirectedInterleaveDatasetOpTest, InvalidArguments) {
std::vector<DirectedInterleaveDatasetParams> invalid_params_vec = {
InvalidSelectorOuputDataType(), InvalidSelectorOuputShape(),
InvalidInputDatasetsDataType(), ZeroInputDatasetParams()};
for (auto& dataset_params : invalid_params_vec) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
TEST_F(DirectedInterleaveDatasetOpTest, InvalidSelectorValues) {
auto dataset_params = InvalidSelectorValues();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> next;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
1,575 | cpp | tensorflow/tensorflow | list_dataset_op | tensorflow/core/kernels/data/experimental/list_dataset_op.cc | tensorflow/core/kernels/data/experimental/list_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_LIST_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_LIST_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ListDatasetOp : public DatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "List";
static constexpr const char* const kTensors = "tensors";
static constexpr const char* const kTinputTypes = "Tinput_types";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit ListDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
class Dataset;
DataTypeVector input_types_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/list_dataset_op.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const ListDatasetOp::kDatasetType;
constexpr const char* const ListDatasetOp::kTensors;
constexpr const char* const ListDatasetOp::kTinputTypes;
constexpr const char* const ListDatasetOp::kOutputTypes;
constexpr const char* const ListDatasetOp::kOutputShapes;
class ListDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors,
const DataTypeVector& input_types, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
int num_components)
: DatasetBase(DatasetContext(ctx)),
tensors_(std::move(tensors)),
num_elements_(tensors_.size() / num_components),
num_components_(num_components),
input_types_(input_types),
output_types_(output_types),
output_shapes_(output_shapes) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<IndexSplitProvider>(num_elements_));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return num_elements_;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
absl::Status Get(OpKernelContext* ctx, int64_t index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
absl::Status Get(AnyContext ctx, int64_t index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->clear();
out_tensors->reserve(num_components_);
for (int i = 0; i < num_components_; ++i) {
out_tensors->push_back(tensors_[i + num_components_ * index]);
}
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> tensors;
tensors.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
tensors.emplace_back(node);
}
AttrValue input_types;
b->BuildAttrValue(input_types_, &input_types);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, tensors}},
{{kTinputTypes, input_types}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty()) {
split_provider_ =
std::make_shared<IndexSplitProvider>(dataset()->num_elements_);
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
int64_t index = split.scalar<int64_t>()();
out_tensors->reserve(dataset()->num_components_);
for (size_t i = 0; i < dataset()->num_components_; ++i) {
out_tensors->push_back(
dataset()->tensors_[i + dataset()->num_components_ * index]);
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return split_provider_->Save(
[this](const std::string& key) { return full_name(key); }, writer);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(ctx);
}
return split_provider_->Restore(
[this](const std::string& key) { return full_name(key); }, reader);
}
private:
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
int64 num_elements_;
size_t num_components_;
DataTypeVector input_types_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
ListDatasetOp::ListDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kTinputTypes, &input_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void ListDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kTensors, &inputs));
std::vector<Tensor> tensors(inputs.begin(), inputs.end());
*output = new Dataset(ctx, std::move(tensors), input_types_, output_types_,
output_shapes_, output_shapes_.size());
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ListDataset").Device(DEVICE_CPU), ListDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/experimental/list_dataset_op.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_list_dataset";
class ListDatasetOpTest : public DatasetOpsTestBase {};
class ListDatasetParams : public DatasetParams {
public:
ListDatasetParams(std::vector<std::vector<Tensor>> elements, string node_name)
: DatasetParams(ListOutputTypes(elements), ListOutputShapes(elements),
std::move(node_name)) {
input_types_.reserve(elements.size() * elements.front().size());
tensors_.reserve(elements.size() * elements.front().size());
for (const auto& element : elements) {
for (const auto& tensor : element) {
input_types_.push_back(tensor.dtype());
tensors_.emplace_back(std::move(tensor));
}
}
}
std::vector<Tensor> GetInputTensors() const override { return tensors_; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(tensors_.size());
for (int i = 0; i < tensors_.size(); ++i) {
input_names->emplace_back(absl::StrCat("tensors_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"Tinput_types", input_types_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return "List"; }
int64_t num_elements() const {
return tensors_.size() / num_tensors_per_element();
}
size_t num_tensors_per_element() const { return output_shapes_.size(); }
private:
DataTypeVector ListInputTypes(
const std::vector<std::vector<Tensor>>& input_elements) {
DataTypeVector input_types;
for (const auto& element : input_elements) {
for (const auto& tensor : element) {
input_types.emplace_back(tensor.dtype());
}
}
return input_types;
}
DataTypeVector ListOutputTypes(
const std::vector<std::vector<Tensor>>& input_elements) {
DataTypeVector output_types;
for (const auto& tensor : input_elements.front()) {
output_types.emplace_back(tensor.dtype());
}
return output_types;
}
std::vector<PartialTensorShape> ListOutputShapes(
const std::vector<std::vector<Tensor>>& input_elements) {
std::vector<PartialTensorShape> output_shapes;
for (const auto& tensor : input_elements.front()) {
gtl::InlinedVector<int64_t, 4> partial_dim_sizes;
partial_dim_sizes.reserve(tensor.dims());
for (int i = 0; i < tensor.dims(); ++i) {
partial_dim_sizes.push_back(tensor.dim_size(i));
}
output_shapes.emplace_back(std::move(partial_dim_sizes));
}
return output_shapes;
}
public:
std::vector<Tensor> tensors_;
DataTypeVector input_types_;
};
ListDatasetParams PlainListDatasetParams() {
std::vector<std::vector<Tensor>> elements = {
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"})},
{CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}};
return {std::move(elements), kNodeName};
}
ListDatasetParams NestedListDatasetParams() {
std::vector<std::vector<Tensor>> elements = {
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3})},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}};
return {std::move(elements), kNodeName};
}
std::vector<GetNextTestCase<ListDatasetParams>> GetNextTestCases() {
return {
{PlainListDatasetParams(),
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedListDatasetParams(),
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedGetNextTest
: public ListDatasetOpTest,
public ::testing::WithParamInterface<GetNextTestCase<ListDatasetParams>> {
};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
size_t num_tensors_per_element =
test_case.dataset_params.num_tensors_per_element();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_element = 0;
while (true) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (end_of_sequence) {
EXPECT_TRUE(out_tensors.empty());
break;
}
for (int i = 0; i < out_tensors.size(); ++i) {
EXPECT_LT(i + num_tensors_per_element * cur_element,
test_case.expected_outputs.size());
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i + num_tensors_per_element * cur_element]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case
.expected_outputs[i + num_tensors_per_element * cur_element]));
}
}
cur_element++;
}
}
INSTANTIATE_TEST_SUITE_P(ListDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(ListDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ListDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ListDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ListDatasetParams>>
DatasetOutputTypesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_dtypes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetOutputTypesTestCases())
std::vector<DatasetOutputShapesTestCase<ListDatasetParams>>
DatasetOutputShapesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_shapes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ListDatasetParams>>
DatasetCardinalityTestCases() {
return {{PlainListDatasetParams(), 2},
{NestedListDatasetParams(), 2}};
}
DATASET_CARDINALITY_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ListDatasetParams>>
IteratorOutputTypesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_dtypes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
IteratorOutputTypesTestCases())
std::vector<IteratorOutputShapesTestCase<ListDatasetParams>>
IteratorOutputShapesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_shapes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ListDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ListDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ListDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PlainListDatasetParams(),
{0, 1, 2},
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedListDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public ListDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<ListDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_context;
TF_ASSERT_OK(CreateSerializationContext(&serialization_context));
int cur_iteration = 0;
bool end_of_sequence = false;
auto params = static_cast<ListDatasetParams&>(test_case.dataset_params);
int64_t num_elements = params.num_elements();
size_t num_tensors_per_element = params.num_tensors_per_element();
std::vector<Tensor> out_tensors;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_elements) {
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output =
out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i + num_tensors_per_element *
(cur_iteration - 1)]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_element *
(cur_iteration - 1)]));
}
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, "Iterator",
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_SUITE_P(
ListDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(ListDatasetOpTest, SplitProvider) {
auto params =
ListDatasetParams({{CreateTensor<int64_t>(TensorShape({}), {6})},
{CreateTensor<int64_t>(TensorShape({}), {2})},
{CreateTensor<int64_t>(TensorShape({}), {3})},
{CreateTensor<int64_t>(TensorShape({}), {8})},
{CreateTensor<int64_t>(TensorShape({}), {7})},
{CreateTensor<int64_t>(TensorShape({}), {0})},
{CreateTensor<int64_t>(TensorShape({}), {10})}},
kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}),
{{6}, {2}, {3}, {8}, {7}, {0}, {10}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {{2}, {7}})));
}
}
}
} |
1,576 | cpp | tensorflow/tensorflow | unique_dataset_op | tensorflow/core/kernels/data/experimental/unique_dataset_op.cc | tensorflow/core/kernels/data/experimental/unique_dataset_op_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_UNIQUE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_UNIQUE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class UniqueDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Unique";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit UniqueDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/unique_dataset_op.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/hash/hash.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const UniqueDatasetOp::kDatasetType;
constexpr const char* const UniqueDatasetOp::kInputDataset;
constexpr const char* const UniqueDatasetOp::kOutputTypes;
constexpr const char* const UniqueDatasetOp::kOutputShapes;
class UniqueDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), input_(input) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, strings::StrCat(prefix, "::Unique")});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return strings::StrCat("UniqueDatasetOp::Dataset");
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const typename Iterator::Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
bool saw_new_value;
do {
saw_new_value = false;
out_tensors->clear();
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
break;
}
DCHECK_EQ(1, out_tensors->size());
saw_new_value = unique_elements_.insert((*out_tensors)[0]).second;
} while (!saw_new_value);
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeUnknownRatioNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name("input_impl_empty"), ""));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("unique_elements_size"),
unique_elements_.size()));
size_t i = 0;
for (const Tensor& t : unique_elements_) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
full_name(strings::StrCat("unique_elements[", i++, "]")), t));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (!reader->Contains(full_name("input_impl_empty"))) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
int64_t num_unique_elements;
unique_elements_.clear();
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("unique_elements_size"),
&num_unique_elements));
for (int64_t i = 0; i < num_unique_elements; ++i) {
Tensor unique_element;
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), full_name(strings::StrCat("unique_elements[", i, "]")),
&unique_element));
auto insert_result = unique_elements_.insert(unique_element);
if (!insert_result.second) {
return errors::InvalidArgument(
"Checkpoint contained two unique elements with the same "
"value.");
}
}
return absl::OkStatus();
}
private:
struct TensorHash {
size_t operator()(const Tensor& t) const {
if (t.dtype() == DT_INT32 || t.dtype() == DT_INT64) {
return Hash64(t.tensor_data().data(), t.tensor_data().size());
} else {
DCHECK_EQ(DT_STRING, t.dtype());
auto flat_t = t.flat<tstring>();
uint64 hash = 0;
for (int64_t i = 0; i < t.NumElements(); ++i) {
hash = Hash64Combine(hash, Hash64(flat_t(i)));
}
return static_cast<size_t>(hash);
}
}
};
struct TensorKeyEqual {
bool operator()(const Tensor& lhs, const Tensor& rhs) const {
if (lhs.shape() != rhs.shape() || lhs.dtype() != rhs.dtype()) {
return false;
}
switch (lhs.dtype()) {
#define HANDLE_TYPE(T) \
case T: \
do { \
auto lhs_flat = lhs.flat<EnumToDataType<T>::Type>(); \
auto rhs_flat = rhs.flat<EnumToDataType<T>::Type>(); \
for (int64_t i = 0; i < lhs.NumElements(); ++i) { \
if (lhs_flat(i) != rhs_flat(i)) { \
return false; \
} \
} \
return true; \
} while (0)
HANDLE_TYPE(DT_INT32);
HANDLE_TYPE(DT_INT64);
HANDLE_TYPE(DT_STRING);
default:
DCHECK(false) << "UniqueDataset unhandled data type: "
<< DataTypeString(lhs.dtype());
return false;
}
}
};
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unordered_set<Tensor, TensorHash, TensorKeyEqual> unique_elements_
TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
};
void UniqueDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
OP_REQUIRES(ctx, input->output_dtypes().size() == 1,
errors::InvalidArgument("UniqueDataset only supports "
"inputs with a single component."));
DataType input_dtype = input->output_dtypes()[0];
OP_REQUIRES(ctx,
input_dtype == DT_INT32 || input_dtype == DT_INT64 ||
input_dtype == DT_STRING,
errors::InvalidArgument(
"UniqueDataset only supports inputs with a single "
"`tf.int32`, `tf.int64`, or `tf.string` component."));
*output = new Dataset(ctx, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("UniqueDataset").Device(DEVICE_CPU),
UniqueDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalUniqueDataset").Device(DEVICE_CPU),
UniqueDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/unique_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "unique_dataset";
class UniqueDatasetParams : public DatasetParams {
public:
template <typename T>
UniqueDatasetParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
kNodeName) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(UniqueDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attributes) const override {
*attributes = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return UniqueDatasetOp::kDatasetType; }
};
class UniqueDatasetOpTest : public DatasetOpsTestBase {};
UniqueDatasetParams NormalCaseParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{12, 1},
{1, 1, 2, 3, 5, 8, 13, 3, 21, 8, 8, 34})},
"tensor_slice_dataset");
return UniqueDatasetParams(tensor_slice_dataset_params,
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams LastRecordIsDuplicateParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{11, 1},
{1, 1, 2, 3, 5, 8, 13, 3, 21, 8, 8})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams AllRecordsTheSameParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{5, 1}, {1, 1, 1, 1, 1})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams EmptyInputParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0, 1}, {})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({1})});
}
UniqueDatasetParams StringParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(
TensorShape{11, 1},
{"one", "One", "two", "three", "five", "eight", "thirteen",
"twenty-one", "eight", "eight", "thirty-four"})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_STRING},
{PartialTensorShape({1})});
}
UniqueDatasetParams TwoComponentsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{
CreateTensor<int64_t>(TensorShape{1, 1}, {1}),
CreateTensor<int64_t>(TensorShape{1, 1}, {42}),
},
"tensor_slice_dataset");
return UniqueDatasetParams(
std::move(tensor_slice_dataset_params),
{DT_INT64, DT_INT64},
{PartialTensorShape({1}), PartialTensorShape({1})});
}
UniqueDatasetParams NoInputParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_INT64},
{PartialTensorShape({})});
}
UniqueDatasetParams FP32Params() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<float>(TensorShape{1, 1}, {3.14})},
"tensor_slice_dataset");
return UniqueDatasetParams(std::move(tensor_slice_dataset_params),
{DT_FLOAT},
{PartialTensorShape({1})});
}
std::vector<GetNextTestCase<UniqueDatasetParams>> GetNextTestCases() {
return {{NormalCaseParams(),
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}, {34}})},
{LastRecordIsDuplicateParams(),
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}})},
{AllRecordsTheSameParams(),
CreateTensors<int64_t>(TensorShape({1}), {{1}})},
{EmptyInputParams(),
CreateTensors<int64_t>(TensorShape({1}), {})},
{StringParams(),
CreateTensors<tstring>(TensorShape({1}), {{"one"},
{"One"},
{"two"},
{"three"},
{"five"},
{"eight"},
{"thirteen"},
{"twenty-one"},
{"thirty-four"}})}};
}
ITERATOR_GET_NEXT_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
GetNextTestCases())
TEST_F(UniqueDatasetOpTest, DatasetNodeName) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(UniqueDatasetOpTest, DatasetTypeString) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(UniqueDatasetOp::kDatasetType)));
}
TEST_F(UniqueDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(UniqueDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({1})}));
}
std::vector<CardinalityTestCase<UniqueDatasetParams>> CardinalityTestCases() {
return {{NormalCaseParams(),
kUnknownCardinality},
{EmptyInputParams(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<UniqueDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{NormalCaseParams(),
{DT_INT64}},
{StringParams(),
{DT_STRING}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<UniqueDatasetParams>>
IteratorOutputShapesTestCases() {
return {{NormalCaseParams(),
{PartialTensorShape({1})}},
{StringParams(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(UniqueDatasetOpTest, IteratorPrefix) {
auto dataset_params = NormalCaseParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
UniqueDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<UniqueDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{NormalCaseParams(),
{0, 2, 6, 8},
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}, {34}})},
{LastRecordIsDuplicateParams(),
{0, 2, 6, 8},
CreateTensors<int64_t>(TensorShape({1}),
{{1}, {2}, {3}, {5}, {8}, {13}, {21}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidInputTest
: public UniqueDatasetOpTest,
public ::testing::WithParamInterface<UniqueDatasetParams> {};
TEST_P(ParameterizedInvalidInputTest, InvalidInput) {
auto dataset_params = GetParam();
auto result = Initialize(dataset_params);
EXPECT_FALSE(result.ok());
}
INSTANTIATE_TEST_SUITE_P(FilterDatasetOpTest, ParameterizedInvalidInputTest,
::testing::ValuesIn({TwoComponentsParams(),
NoInputParams(), FP32Params()}));
}
}
}
} |
1,577 | cpp | tensorflow/tensorflow | schema | tensorflow/core/summary/schema.cc | tensorflow/core/summary/schema_test.cc | #ifndef TENSORFLOW_CORE_SUMMARY_SCHEMA_H_
#define TENSORFLOW_CORE_SUMMARY_SCHEMA_H_
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/db/sqlite.h"
namespace tensorflow {
constexpr uint32 kTensorboardSqliteApplicationId = 0xfeedabee;
Status SetupTensorboardSqliteDb(Sqlite* db);
}
#endif
#include "tensorflow/core/summary/schema.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status Run(Sqlite* db, const char* sql) {
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db->Prepare(sql, &stmt));
return stmt.StepAndReset();
}
}
Status SetupTensorboardSqliteDb(Sqlite* db) {
TF_RETURN_IF_ERROR(
db->PrepareOrDie(strings::StrCat("PRAGMA application_id=",
kTensorboardSqliteApplicationId))
.StepAndReset());
db->PrepareOrDie("PRAGMA user_version=0").StepAndResetOrDie();
Status s;
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Ids (
id INTEGER PRIMARY KEY
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Descriptions (
id INTEGER PRIMARY KEY,
description TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tensors (
rowid INTEGER PRIMARY KEY,
series INTEGER,
step INTEGER,
dtype INTEGER,
computed_time REAL,
shape TEXT,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TensorSeriesStepIndex
ON
Tensors (series, step)
WHERE
series IS NOT NULL
AND step IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS TensorStrings (
rowid INTEGER PRIMARY KEY,
tensor_rowid INTEGER NOT NULL,
idx INTEGER NOT NULL,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TensorStringIndex
ON TensorStrings (tensor_rowid, idx)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tags (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
tag_id INTEGER NOT NULL,
inserted_time DOUBLE,
tag_name TEXT,
display_name TEXT,
plugin_name TEXT,
plugin_data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TagIdIndex
ON Tags (tag_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TagRunNameIndex
ON
Tags (run_id, tag_name)
WHERE
run_id IS NOT NULL
AND tag_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Runs (
rowid INTEGER PRIMARY KEY,
experiment_id INTEGER,
run_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
finished_time REAL,
run_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunIdIndex
ON Runs (run_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunNameIndex
ON Runs (experiment_id, run_name)
WHERE run_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Experiments (
rowid INTEGER PRIMARY KEY,
user_id INTEGER,
experiment_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
is_watching INTEGER,
experiment_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentIdIndex
ON Experiments (experiment_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentNameIndex
ON Experiments (user_id, experiment_name)
WHERE experiment_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Users (
rowid INTEGER PRIMARY KEY,
user_id INTEGER NOT NULL,
inserted_time REAL,
user_name TEXT,
email TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserIdIndex
ON Users (user_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserNameIndex
ON Users (user_name)
WHERE user_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserEmailIndex
ON Users (email)
WHERE email IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Graphs (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
graph_id INTEGER NOT NULL,
inserted_time REAL,
graph_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphIdIndex
ON Graphs (graph_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphRunIndex
ON Graphs (run_id)
WHERE run_id IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Nodes (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
node_name TEXT,
op TEXT,
device TEXT,
node_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeIdIndex
ON Nodes (graph_id, node_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeNameIndex
ON Nodes (graph_id, node_name)
WHERE node_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS NodeInputs (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
idx INTEGER NOT NULL,
input_node_id INTEGER NOT NULL,
input_node_idx INTEGER,
is_control INTEGER
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeInputsIndex
ON NodeInputs (graph_id, node_id, idx)
)sql"));
return s;
}
} | #include "tensorflow/core/summary/schema.h"
#include <memory>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(SchemaTest, SmokeTestTensorboardSchema) {
Sqlite* db;
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db));
core::ScopedUnref unref_db(db);
TF_ASSERT_OK(SetupTensorboardSqliteDb(db));
}
}
} |
1,578 | cpp | tensorflow/tensorflow | summary_file_writer | tensorflow/core/summary/summary_file_writer.cc | tensorflow/core/summary/summary_file_writer_test.cc | #ifndef TENSORFLOW_CORE_SUMMARY_SUMMARY_FILE_WRITER_H_
#define TENSORFLOW_CORE_SUMMARY_SUMMARY_FILE_WRITER_H_
#include "tensorflow/core/kernels/summary_interface.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Status CreateSummaryFileWriter(int max_queue, int flush_millis,
const string& logdir,
const string& filename_suffix, Env* env,
SummaryWriterInterface** result);
}
#endif
#include "tensorflow/core/summary/summary_file_writer.h"
#include <memory>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/summary/summary_converter.h"
#include "tensorflow/core/util/events_writer.h"
namespace tensorflow {
namespace {
class SummaryFileWriter : public SummaryWriterInterface {
public:
SummaryFileWriter(int max_queue, int flush_millis, Env* env)
: SummaryWriterInterface(),
is_initialized_(false),
max_queue_(max_queue),
flush_millis_(flush_millis),
env_(env) {}
Status Initialize(const string& logdir, const string& filename_suffix) {
const Status is_dir = env_->IsDirectory(logdir);
if (!is_dir.ok()) {
if (is_dir.code() != tensorflow::error::NOT_FOUND) {
return is_dir;
}
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(logdir));
}
int32_t pid = env_->GetProcessId();
static std::atomic<int64_t> file_id_counter(0);
string sep = absl::StartsWith(filename_suffix, ".") ? "" : ".";
const string uniquified_filename_suffix = absl::StrCat(
".", pid, ".", file_id_counter.fetch_add(1), sep, filename_suffix);
mutex_lock ml(mu_);
events_writer_ =
std::make_unique<EventsWriter>(io::JoinPath(logdir, "events"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
events_writer_->InitWithSuffix(uniquified_filename_suffix),
"Could not initialize events writer.");
last_flush_ = env_->NowMicros();
is_initialized_ = true;
return absl::OkStatus();
}
Status Flush() override {
mutex_lock ml(mu_);
if (!is_initialized_) {
return errors::FailedPrecondition("Class was not properly initialized.");
}
return InternalFlush();
}
~SummaryFileWriter() override {
(void)Flush();
}
Status WriteTensor(int64_t global_step, Tensor t, const string& tag,
const string& serialized_metadata) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
Summary::Value* v = e->mutable_summary()->add_value();
if (t.dtype() == DT_STRING) {
t.AsProtoField(v->mutable_tensor());
} else {
t.AsProtoTensorContent(v->mutable_tensor());
}
v->set_tag(tag);
if (!serialized_metadata.empty()) {
v->mutable_metadata()->ParseFromString(serialized_metadata);
}
return WriteEvent(std::move(e));
}
Status WriteScalar(int64_t global_step, Tensor t,
const string& tag) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(
AddTensorAsScalarToSummary(t, tag, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteHistogram(int64_t global_step, Tensor t,
const string& tag) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(
AddTensorAsHistogramToSummary(t, tag, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteImage(int64_t global_step, Tensor t, const string& tag,
int max_images, Tensor bad_color) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(AddTensorAsImageToSummary(t, tag, max_images, bad_color,
e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteAudio(int64_t global_step, Tensor t, const string& tag,
int max_outputs, float sample_rate) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(AddTensorAsAudioToSummary(
t, tag, max_outputs, sample_rate, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteGraph(int64_t global_step,
std::unique_ptr<GraphDef> graph) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
graph->SerializeToString(e->mutable_graph_def());
return WriteEvent(std::move(e));
}
Status WriteEvent(std::unique_ptr<Event> event) override {
mutex_lock ml(mu_);
queue_.emplace_back(std::move(event));
if (queue_.size() > max_queue_ ||
env_->NowMicros() - last_flush_ > 1000 * flush_millis_) {
return InternalFlush();
}
return absl::OkStatus();
}
string DebugString() const override { return "SummaryFileWriter"; }
private:
double GetWallTime() {
return static_cast<double>(env_->NowMicros()) / 1.0e6;
}
Status InternalFlush() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (const std::unique_ptr<Event>& e : queue_) {
events_writer_->WriteEvent(*e);
}
queue_.clear();
TF_RETURN_WITH_CONTEXT_IF_ERROR(events_writer_->Flush(),
"Could not flush events file.");
last_flush_ = env_->NowMicros();
return absl::OkStatus();
}
bool is_initialized_;
const int max_queue_;
const int flush_millis_;
uint64 last_flush_;
Env* env_;
mutex mu_;
std::vector<std::unique_ptr<Event>> queue_ TF_GUARDED_BY(mu_);
std::unique_ptr<EventsWriter> events_writer_ TF_GUARDED_BY(mu_);
std::vector<std::pair<string, SummaryMetadata>> registered_summaries_
TF_GUARDED_BY(mu_);
};
}
Status CreateSummaryFileWriter(int max_queue, int flush_millis,
const string& logdir,
const string& filename_suffix, Env* env,
SummaryWriterInterface** result) {
SummaryFileWriter* w = new SummaryFileWriter(max_queue, flush_millis, env);
const Status s = w->Initialize(logdir, filename_suffix);
if (!s.ok()) {
w->Unref();
*result = nullptr;
return s;
}
*result = w;
return absl::OkStatus();
}
} | #include "tensorflow/core/summary/summary_file_writer.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
uint64 NowSeconds() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
class SummaryFileWriterTest : public ::testing::Test {
protected:
Status SummaryTestHelper(
const string& test_name,
const std::function<Status(SummaryWriterInterface*)>& writer_fn,
const std::function<void(const Event&)>& test_fn) {
static std::set<string>* tests = new std::set<string>();
CHECK(tests->insert(test_name).second) << ": " << test_name;
SummaryWriterInterface* writer;
TF_CHECK_OK(CreateSummaryFileWriter(1, 1, testing::TmpDir(), test_name,
&env_, &writer));
core::ScopedUnref deleter(writer);
TF_CHECK_OK(writer_fn(writer));
TF_CHECK_OK(writer->Flush());
std::vector<string> files;
TF_CHECK_OK(env_.GetChildren(testing::TmpDir(), &files));
bool found = false;
for (const string& f : files) {
if (absl::StrContains(f, test_name)) {
if (found) {
return errors::Unknown("Found more than one file for ", test_name);
}
found = true;
std::unique_ptr<RandomAccessFile> read_file;
TF_CHECK_OK(env_.NewRandomAccessFile(io::JoinPath(testing::TmpDir(), f),
&read_file));
io::RecordReader reader(read_file.get(), io::RecordReaderOptions());
tstring record;
uint64 offset = 0;
TF_CHECK_OK(
reader.ReadRecord(&offset,
&record));
TF_CHECK_OK(reader.ReadRecord(&offset, &record));
Event e;
e.ParseFromString(record);
test_fn(e);
}
}
if (!found) {
return errors::Unknown("Found no file for ", test_name);
}
return absl::OkStatus();
}
FakeClockEnv env_;
};
TEST_F(SummaryFileWriterTest, WriteTensor) {
TF_CHECK_OK(SummaryTestHelper("tensor_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteTensor(
2, one, "name",
SummaryMetadata().SerializeAsString()));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
}));
TF_CHECK_OK(SummaryTestHelper(
"string_tensor_test",
[](SummaryWriterInterface* writer) {
Tensor hello(DT_STRING, TensorShape({}));
hello.scalar<tstring>()() = "hello";
TF_RETURN_IF_ERROR(writer->WriteTensor(
2, hello, "name", SummaryMetadata().SerializeAsString()));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_EQ(e.summary().value(0).tensor().dtype(), DT_STRING);
EXPECT_EQ(e.summary().value(0).tensor().string_val()[0], "hello");
}));
}
TEST_F(SummaryFileWriterTest, WriteScalar) {
TF_CHECK_OK(SummaryTestHelper(
"scalar_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteScalar(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_EQ(e.summary().value(0).simple_value(), 1.0);
}));
}
TEST_F(SummaryFileWriterTest, WriteHistogram) {
TF_CHECK_OK(SummaryTestHelper("hist_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(
writer->WriteHistogram(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_TRUE(e.summary().value(0).has_histo());
}));
}
namespace {
template <typename T>
static Status CreateImage(SummaryWriterInterface* writer) {
Tensor bad_color(DT_UINT8, TensorShape({1}));
bad_color.scalar<uint8>()() = 0;
Tensor one(DataTypeToEnum<T>::v(), TensorShape({1, 1, 1, 1}));
one.scalar<T>()() = T(1);
TF_RETURN_IF_ERROR(writer->WriteImage(2, one, "name", 1, bad_color));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
}
static void CheckImage(const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name/image");
CHECK(e.summary().value(0).has_image());
EXPECT_EQ(e.summary().value(0).image().height(), 1);
EXPECT_EQ(e.summary().value(0).image().width(), 1);
EXPECT_EQ(e.summary().value(0).image().colorspace(), 1);
}
}
TEST_F(SummaryFileWriterTest, WriteImageUInt8) {
TF_CHECK_OK(
SummaryTestHelper("image_test_uint8", CreateImage<uint8>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageFloat) {
TF_CHECK_OK(
SummaryTestHelper("image_test_float", CreateImage<float>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageHalf) {
TF_CHECK_OK(SummaryTestHelper("image_test_half", CreateImage<Eigen::half>,
CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageDouble) {
TF_CHECK_OK(
SummaryTestHelper("image_test_double", CreateImage<double>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteAudio) {
TF_CHECK_OK(SummaryTestHelper(
"audio_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({1, 1}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteAudio(2, one, "name", 1, 1));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name/audio");
CHECK(e.summary().value(0).has_audio());
}));
}
TEST_F(SummaryFileWriterTest, WriteEvent) {
TF_CHECK_OK(
SummaryTestHelper("event_test",
[](SummaryWriterInterface* writer) {
std::unique_ptr<Event> e{new Event};
e->set_step(7);
e->mutable_summary()->add_value()->set_tag("hi");
TF_RETURN_IF_ERROR(writer->WriteEvent(std::move(e)));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 7);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "hi");
}));
}
TEST_F(SummaryFileWriterTest, WallTime) {
env_.AdvanceByMillis(7023);
TF_CHECK_OK(SummaryTestHelper(
"wall_time_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteScalar(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) { EXPECT_EQ(e.wall_time(), 7.023); }));
}
TEST_F(SummaryFileWriterTest, AvoidFilenameCollision) {
string test_name = "avoid_filename_collision_test";
int num_files = 10;
for (int i = 0; i < num_files; i++) {
SummaryWriterInterface* writer;
TF_CHECK_OK(CreateSummaryFileWriter(1, 1, testing::TmpDir(), test_name,
&env_, &writer));
core::ScopedUnref deleter(writer);
}
std::vector<string> files;
TF_CHECK_OK(env_.GetChildren(testing::TmpDir(), &files));
files.erase(std::remove_if(files.begin(), files.end(),
[test_name](string f) {
return !absl::StrContains(f, test_name);
}),
files.end());
EXPECT_EQ(num_files, files.size())
<< "files = [" << absl::StrJoin(files, ", ") << "]";
}
}
} |
1,579 | cpp | tensorflow/tensorflow | summary_db_writer | tensorflow/core/summary/summary_db_writer.cc | tensorflow/core/summary/summary_db_writer_test.cc | #ifndef TENSORFLOW_CORE_SUMMARY_SUMMARY_DB_WRITER_H_
#define TENSORFLOW_CORE_SUMMARY_SUMMARY_DB_WRITER_H_
#include "tensorflow/core/kernels/summary_interface.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/db/sqlite.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
Status CreateSummaryDbWriter(Sqlite* db, const string& experiment_name,
const string& run_name, const string& user_name,
Env* env, SummaryWriterInterface** result);
}
#endif
#include "tensorflow/core/summary/summary_db_writer.h"
#include <deque>
#include "tensorflow/core/summary/summary_converter.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/db/sqlite.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/util/event.pb.h"
#define CALL_SUPPORTED_TYPES(m) \
TF_CALL_tstring(m) \
TF_CALL_half(m) \
TF_CALL_float(m) \
TF_CALL_double(m) \
TF_CALL_complex64(m) \
TF_CALL_complex128(m) \
TF_CALL_int8(m) \
TF_CALL_int16(m) \
TF_CALL_int32(m) \
TF_CALL_int64(m) \
TF_CALL_uint8(m) \
TF_CALL_uint16(m) \
TF_CALL_uint32(m) \
TF_CALL_uint64(m)
namespace tensorflow {
namespace {
const uint64 kIdTiers[] = {
0x7fffffULL,
0x7fffffffULL,
0x7fffffffffffULL,
};
const int kMaxIdTier = sizeof(kIdTiers) / sizeof(uint64) - 1;
const int kIdCollisionDelayMicros = 10;
const int kMaxIdCollisions = 21;
const int64_t kAbsent = 0LL;
const char* kScalarPluginName = "scalars";
const char* kImagePluginName = "images";
const char* kAudioPluginName = "audio";
const char* kHistogramPluginName = "histograms";
const int64_t kReserveMinBytes = 32;
const double kReserveMultiplier = 1.5;
const int64_t kPreallocateRows = 1000;
const uint64 kFlushBytes = 1024 * 1024;
double DoubleTime(uint64 micros) {
return static_cast<double>(micros) / 1.0e6;
}
string StringifyShape(const TensorShape& shape) {
string result;
bool first = true;
for (const auto& dim : shape) {
if (first) {
first = false;
} else {
strings::StrAppend(&result, ",");
}
strings::StrAppend(&result, dim.size);
}
return result;
}
Status CheckSupportedType(const Tensor& t) {
#define CASE(T) \
case DataTypeToEnum<T>::value: \
break;
switch (t.dtype()) {
CALL_SUPPORTED_TYPES(CASE)
default:
return errors::Unimplemented(DataTypeString(t.dtype()),
" tensors unsupported on platform");
}
return absl::OkStatus();
#undef CASE
}
Tensor AsScalar(const Tensor& t) {
Tensor t2{t.dtype(), {}};
#define CASE(T) \
case DataTypeToEnum<T>::value: \
t2.scalar<T>()() = t.flat<T>()(0); \
break;
switch (t.dtype()) {
CALL_SUPPORTED_TYPES(CASE)
default:
t2 = {DT_FLOAT, {}};
t2.scalar<float>()() = NAN;
break;
}
return t2;
#undef CASE
}
void PatchPluginName(SummaryMetadata* metadata, const char* name) {
if (metadata->plugin_data().plugin_name().empty()) {
metadata->mutable_plugin_data()->set_plugin_name(name);
}
}
Status SetDescription(Sqlite* db, int64_t id, const StringPiece& markdown) {
const char* sql = R"sql(
INSERT OR REPLACE INTO Descriptions (id, description) VALUES (?, ?)
)sql";
SqliteStatement insert_desc;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert_desc));
insert_desc.BindInt(1, id);
insert_desc.BindText(2, markdown);
return insert_desc.StepAndReset();
}
class IdAllocator {
public:
IdAllocator(Env* env, Sqlite* db) : env_{env}, db_{db} {
DCHECK(env_ != nullptr);
DCHECK(db_ != nullptr);
}
Status CreateNewId(int64_t* id) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
Status s;
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db_->Prepare("INSERT INTO Ids (id) VALUES (?)", &stmt));
for (int i = 0; i < kMaxIdCollisions; ++i) {
int64_t tid = MakeRandomId();
stmt.BindInt(1, tid);
s = stmt.StepAndReset();
if (s.ok()) {
*id = tid;
break;
}
if (s.code() != error::INVALID_ARGUMENT) break;
if (tier_ < kMaxIdTier) {
LOG(INFO) << "IdAllocator collision at tier " << tier_ << " (of "
<< kMaxIdTier << ") so auto-adjusting to a higher tier";
++tier_;
} else {
LOG(WARNING) << "IdAllocator (attempt #" << i << ") "
<< "resulted in a collision at the highest tier; this "
"is problematic if it happens often; you can try "
"pruning the Ids table; you can also file a bug "
"asking for the ID space to be increased; otherwise "
"writes will gradually slow down over time until they "
"become impossible";
}
env_->SleepForMicroseconds((1 << i) * kIdCollisionDelayMicros);
}
return s;
}
private:
int64_t MakeRandomId() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t id = static_cast<int64_t>(random::New64() & kIdTiers[tier_]);
if (id == kAbsent) ++id;
return id;
}
mutex mu_;
Env* const env_;
Sqlite* const db_;
int tier_ TF_GUARDED_BY(mu_) = 0;
IdAllocator(const IdAllocator&) = delete;
void operator=(const IdAllocator&) = delete;
};
class GraphWriter {
public:
static Status Save(Sqlite* db, SqliteTransaction* txn, IdAllocator* ids,
GraphDef* graph, uint64 now, int64_t run_id,
int64_t* graph_id)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db) {
TF_RETURN_IF_ERROR(ids->CreateNewId(graph_id));
GraphWriter saver{db, txn, graph, now, *graph_id};
saver.MapNameToNodeId();
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveNodeInputs(), "SaveNodeInputs");
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveNodes(), "SaveNodes");
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveGraph(run_id), "SaveGraph");
return absl::OkStatus();
}
private:
GraphWriter(Sqlite* db, SqliteTransaction* txn, GraphDef* graph, uint64 now,
int64_t graph_id)
: db_(db), txn_(txn), graph_(graph), now_(now), graph_id_(graph_id) {}
void MapNameToNodeId() {
size_t toto = static_cast<size_t>(graph_->node_size());
name_copies_.reserve(toto);
name_to_node_id_.reserve(toto);
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
name_copies_.emplace_back(graph_->node(node_id).name());
name_to_node_id_.emplace(name_copies_.back(), node_id);
}
}
Status SaveNodeInputs() {
const char* sql = R"sql(
INSERT INTO NodeInputs (
graph_id,
node_id,
idx,
input_node_id,
input_node_idx,
is_control
) VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
const NodeDef& node = graph_->node(node_id);
for (int idx = 0; idx < node.input_size(); ++idx) {
StringPiece name = node.input(idx);
int64_t input_node_id;
int64_t input_node_idx = 0;
int64_t is_control = 0;
size_t i = name.rfind(':');
if (i != StringPiece::npos) {
if (!strings::safe_strto64(name.substr(i + 1, name.size() - i - 1),
&input_node_idx)) {
return errors::DataLoss("Bad NodeDef.input: ", name);
}
name.remove_suffix(name.size() - i);
}
if (!name.empty() && name[0] == '^') {
name.remove_prefix(1);
is_control = 1;
}
auto e = name_to_node_id_.find(name);
if (e == name_to_node_id_.end()) {
return errors::DataLoss("Could not find node: ", name);
}
input_node_id = e->second;
insert.BindInt(1, graph_id_);
insert.BindInt(2, node_id);
insert.BindInt(3, idx);
insert.BindInt(4, input_node_id);
insert.BindInt(5, input_node_idx);
insert.BindInt(6, is_control);
unflushed_bytes_ += insert.size();
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node.name(),
" -> ", name);
TF_RETURN_IF_ERROR(MaybeFlush());
}
}
return absl::OkStatus();
}
Status SaveNodes() {
const char* sql = R"sql(
INSERT INTO Nodes (
graph_id,
node_id,
node_name,
op,
device,
node_def)
VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
NodeDef* node = graph_->mutable_node(node_id);
insert.BindInt(1, graph_id_);
insert.BindInt(2, node_id);
insert.BindText(3, node->name());
insert.BindText(4, node->op());
insert.BindText(5, node->device());
node->clear_name();
node->clear_op();
node->clear_device();
node->clear_input();
string node_def;
if (node->SerializeToString(&node_def)) {
insert.BindBlobUnsafe(6, node_def);
}
unflushed_bytes_ += insert.size();
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node->name());
TF_RETURN_IF_ERROR(MaybeFlush());
}
return absl::OkStatus();
}
Status SaveGraph(int64_t run_id) {
const char* sql = R"sql(
INSERT OR REPLACE INTO Graphs (
run_id,
graph_id,
inserted_time,
graph_def
) VALUES (?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
if (run_id != kAbsent) insert.BindInt(1, run_id);
insert.BindInt(2, graph_id_);
insert.BindDouble(3, DoubleTime(now_));
graph_->clear_node();
string graph_def;
if (graph_->SerializeToString(&graph_def)) {
insert.BindBlobUnsafe(4, graph_def);
}
return insert.StepAndReset();
}
Status MaybeFlush() {
if (unflushed_bytes_ >= kFlushBytes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(txn_->Commit(), "flushing ",
unflushed_bytes_, " bytes");
unflushed_bytes_ = 0;
}
return absl::OkStatus();
}
Sqlite* const db_;
SqliteTransaction* const txn_;
uint64 unflushed_bytes_ = 0;
GraphDef* const graph_;
const uint64 now_;
const int64_t graph_id_;
std::vector<string> name_copies_;
std::unordered_map<StringPiece, int64_t, StringPieceHasher> name_to_node_id_;
GraphWriter(const GraphWriter&) = delete;
void operator=(const GraphWriter&) = delete;
};
class RunMetadata {
public:
RunMetadata(IdAllocator* ids, const string& experiment_name,
const string& run_name, const string& user_name)
: ids_{ids},
experiment_name_{experiment_name},
run_name_{run_name},
user_name_{user_name} {
DCHECK(ids_ != nullptr);
}
const string& experiment_name() { return experiment_name_; }
const string& run_name() { return run_name_; }
const string& user_name() { return user_name_; }
int64_t run_id() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
return run_id_;
}
Status SetGraph(Sqlite* db, uint64 now, double computed_time,
std::unique_ptr<GraphDef> g) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
int64_t run_id;
{
mutex_lock lock(mu_);
TF_RETURN_IF_ERROR(InitializeRun(db, now, computed_time));
run_id = run_id_;
}
int64_t graph_id;
SqliteTransaction txn(*db);
TF_RETURN_IF_ERROR(
GraphWriter::Save(db, &txn, ids_, g.get(), now, run_id, &graph_id));
return txn.Commit();
}
Status GetTagId(Sqlite* db, uint64 now, double computed_time,
const string& tag_name, int64_t* tag_id,
const SummaryMetadata& metadata) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
TF_RETURN_IF_ERROR(InitializeRun(db, now, computed_time));
auto e = tag_ids_.find(tag_name);
if (e != tag_ids_.end()) {
*tag_id = e->second;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ids_->CreateNewId(tag_id));
tag_ids_[tag_name] = *tag_id;
TF_RETURN_IF_ERROR(
SetDescription(db, *tag_id, metadata.summary_description()));
const char* sql = R"sql(
INSERT INTO Tags (
run_id,
tag_id,
tag_name,
inserted_time,
display_name,
plugin_name,
plugin_data
) VALUES (
:run_id,
:tag_id,
:tag_name,
:inserted_time,
:display_name,
:plugin_name,
:plugin_data
)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert));
if (run_id_ != kAbsent) insert.BindInt(":run_id", run_id_);
insert.BindInt(":tag_id", *tag_id);
insert.BindTextUnsafe(":tag_name", tag_name);
insert.BindDouble(":inserted_time", DoubleTime(now));
insert.BindTextUnsafe(":display_name", metadata.display_name());
insert.BindTextUnsafe(":plugin_name", metadata.plugin_data().plugin_name());
insert.BindBlobUnsafe(":plugin_data", metadata.plugin_data().content());
return insert.StepAndReset();
}
private:
Status InitializeUser(Sqlite* db, uint64 now)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (user_id_ != kAbsent || user_name_.empty()) return absl::OkStatus();
const char* get_sql = R"sql(
SELECT user_id FROM Users WHERE user_name = ?
)sql";
SqliteStatement get;
TF_RETURN_IF_ERROR(db->Prepare(get_sql, &get));
get.BindText(1, user_name_);
bool is_done;
TF_RETURN_IF_ERROR(get.Step(&is_done));
if (!is_done) {
user_id_ = get.ColumnInt(0);
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ids_->CreateNewId(&user_id_));
const char* insert_sql = R"sql(
INSERT INTO Users (
user_id,
user_name,
inserted_time
) VALUES (?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
insert.BindInt(1, user_id_);
insert.BindText(2, user_name_);
insert.BindDouble(3, DoubleTime(now));
TF_RETURN_IF_ERROR(insert.StepAndReset());
return absl::OkStatus();
}
Status InitializeExperiment(Sqlite* db, uint64 now, double computed_time)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (experiment_name_.empty()) return absl::OkStatus();
if (experiment_id_ == kAbsent) {
TF_RETURN_IF_ERROR(InitializeUser(db, now));
const char* get_sql = R"sql(
SELECT
experiment_id,
started_time
FROM
Experiments
WHERE
user_id IS ?
AND experiment_name = ?
)sql";
SqliteStatement get;
TF_RETURN_IF_ERROR(db->Prepare(get_sql, &get));
if (user_id_ != kAbsent) get.BindInt(1, user_id_);
get.BindText(2, experiment_name_);
bool is_done;
TF_RETURN_IF_ERROR(get.Step(&is_done));
if (!is_done) {
experiment_id_ = get.ColumnInt(0);
experiment_started_time_ = get.ColumnInt(1);
} else {
TF_RETURN_IF_ERROR(ids_->CreateNewId(&experiment_id_));
experiment_started_time_ = computed_time;
const char* insert_sql = R"sql(
INSERT INTO Experiments (
user_id,
experiment_id,
experiment_name,
inserted_time,
started_time,
is_watching
) VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
if (user_id_ != kAbsent) insert.BindInt(1, user_id_);
insert.BindInt(2, experiment_id_);
insert.BindText(3, experiment_name_);
insert.BindDouble(4, DoubleTime(now));
insert.BindDouble(5, computed_time);
insert.BindInt(6, 0);
TF_RETURN_IF_ERROR(insert.StepAndReset());
}
}
if (computed_time < experiment_started_time_) {
experiment_started_time_ = computed_time;
const char* update_sql = R"sql(
UPDATE
Experiments
SET
started_time = ?
WHERE
experiment_id = ?
)sql";
SqliteStatement update;
TF_RETURN_IF_ERROR(db->Prepare(update_sql, &update));
update.BindDouble(1, computed_time);
update.BindInt(2, experiment_id_);
TF_RETURN_IF_ERROR(update.StepAndReset());
}
return absl::OkStatus();
}
Status InitializeRun(Sqlite* db, uint64 now, double computed_time)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (run_name_.empty()) return absl::OkStatus();
TF_RETURN_IF_ERROR(InitializeExperiment(db, now, computed_time));
if (run_id_ == kAbsent) {
TF_RETURN_IF_ERROR(ids_->CreateNewId(&run_id_));
run_started_time_ = computed_time;
const char* insert_sql = R"sql(
INSERT OR REPLACE INTO Runs (
experiment_id,
run_id,
run_name,
inserted_time,
started_time
) VALUES (?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
if (experiment_id_ != kAbsent) insert.BindInt(1, experiment_id_);
insert.BindInt(2, run_id_);
insert.BindText(3, run_name_);
insert.BindDouble(4, DoubleTime(now));
insert.BindDouble(5, computed_time);
TF_RETURN_IF_ERROR(insert.StepAndReset());
}
if (computed_time < run_started_time_) {
run_started_time_ = computed_time;
const char* update_sql = R"sql(
UPDATE
Runs
SET
started_time = ?
WHERE
run_id = ?
)sql";
SqliteStatement update;
TF_RETURN_IF_ERROR(db->Prepare(update_sql, &update));
update.BindDouble(1, computed_time);
update.BindInt(2, run_id_);
TF_RETURN_IF_ERROR(update.StepAndReset());
}
return absl::OkStatus();
}
mutex mu_;
IdAllocator* const ids_;
const string experiment_name_;
const string run_name_;
const string user_name_;
int64_t experiment_id_ TF_GUARDED_BY(mu_) = kAbsent;
int64_t run_id_ TF_GUARDED_BY(mu_) = kAbsent;
int64_t user_id_ TF_GUARDED_BY(mu_) = kAbsent;
double experiment_started_time_ TF_GUARDED_BY(mu_) = 0.0;
double run_started_time_ TF_GUARDED_BY(mu_) = 0.0;
std::unordered_map<string, int64_t> tag_ids_ TF_GUARDED_BY(mu_);
RunMetadata(const RunMetadata&) = delete;
void operator=(const RunMetadata&) = delete;
};
class SeriesWriter {
public:
SeriesWriter(int64_t series, RunMetadata* meta)
: series_{series}, meta_{meta} {
DCHECK(series_ > 0);
}
Status Append(Sqlite* db, int64_t step, uint64 now, double computed_time,
const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (rowids_.empty()) {
Status s = Reserve(db, t);
if (!s.ok()) {
rowids_.clear();
return s;
}
}
int64_t rowid = rowids_.front();
Status s = Write(db, rowid, step, computed_time, t);
if (s.ok()) {
++count_;
}
rowids_.pop_front();
return s;
}
Status Finish(Sqlite* db) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (!rowids_.empty()) {
SqliteTransaction txn(*db);
const char* sql = R"sql(
DELETE FROM Tensors WHERE rowid = ?
)sql";
SqliteStatement deleter;
TF_RETURN_IF_ERROR(db->Prepare(sql, &deleter));
for (size_t i = count_; i < rowids_.size(); ++i) {
deleter.BindInt(1, rowids_.front());
TF_RETURN_IF_ERROR(deleter.StepAndReset());
rowids_.pop_front();
}
TF_RETURN_IF_ERROR(txn.Commit());
rowids_.clear();
}
return absl::OkStatus();
}
private:
Status Write(Sqlite* db, int64_t rowid, int64_t step, double computed_time,
const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db) {
if (t.dtype() == DT_STRING) {
if (t.dims() == 0) {
return Update(db, step, computed_time, t, t.scalar<tstring>()(), rowid);
} else {
SqliteTransaction txn(*db);
TF_RETURN_IF_ERROR(
Update(db, step, computed_time, t, StringPiece(), rowid));
TF_RETURN_IF_ERROR(UpdateNdString(db, t, rowid));
return txn.Commit();
}
} else {
return Update(db, step, computed_time, t, t.tensor_data(), rowid);
}
}
Status Update(Sqlite* db, int64_t step, double computed_time, const Tensor& t,
const StringPiece& data, int64_t rowid) {
const char* sql = R"sql(
UPDATE OR REPLACE
Tensors
SET
step = ?,
computed_time = ?,
dtype = ?,
shape = ?,
data = ?
WHERE
rowid = ?
)sql";
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db->Prepare(sql, &stmt));
stmt.BindInt(1, step);
stmt.BindDouble(2, computed_time);
stmt.BindInt(3, t.dtype());
stmt.BindText(4, StringifyShape(t.shape()));
stmt.BindBlobUnsafe(5, data);
stmt.BindInt(6, rowid);
TF_RETURN_IF_ERROR(stmt.StepAndReset());
return absl::OkStatus();
}
Status UpdateNdString(Sqlite* db, const Tensor& t, int64_t tensor_rowid)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db) {
DCHECK_EQ(t.dtype(), DT_STRING);
DCHECK_GT(t.dims(), 0);
const char* deleter_sql = R"sql(
DELETE FROM TensorStrings WHERE tensor_rowid = ?
)sql";
SqliteStatement deleter;
TF_RETURN_IF_ERROR(db->Prepare(deleter_sql, &deleter));
deleter.BindInt(1, tensor_rowid);
TF_RETURN_WITH_CONTEXT_IF_ERROR(deleter.StepAndReset(), tensor_rowid);
const char* inserter_sql = R"sql(
INSERT INTO TensorStrings (
tensor_rowid,
idx,
data
) VALUES (?, ?, ?)
)sql";
SqliteStatement inserter;
TF_RETURN_IF_ERROR(db->Prepare(inserter_sql, &inserter));
auto flat = t.flat<tstring>();
for (int64_t i = 0; i < flat.size(); ++i) {
inserter.BindInt(1, tensor_rowid);
inserter.BindInt(2, i);
inserter.BindBlobUnsafe(3, flat(i));
TF_RETURN_WITH_CONTEXT_IF_ERROR(inserter.StepAndReset(), "i=", i);
}
return absl::OkStatus();
}
Status Reserve(Sqlite* db, const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
SqliteTransaction txn(*db);
unflushed_bytes_ = 0;
if (t.dtype() == DT_STRING) {
if (t.dims() == 0) {
TF_RETURN_IF_ERROR(ReserveData(db, &txn, t.scalar<tstring>()().size()));
} else {
TF_RETURN_IF_ERROR(ReserveTensors(db, &txn, kReserveMinBytes));
}
} else {
TF_RETURN_IF_ERROR(ReserveData(db, &txn, t.tensor_data().size()));
}
return txn.Commit();
}
Status ReserveData(Sqlite* db, SqliteTransaction* txn, size_t size)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t space =
static_cast<int64_t>(static_cast<double>(size) * kReserveMultiplier);
if (space < kReserveMinBytes) space = kReserveMinBytes;
return ReserveTensors(db, txn, space);
}
Status ReserveTensors(Sqlite* db, SqliteTransaction* txn,
int64_t reserved_bytes)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const char* sql = R"sql(
INSERT INTO Tensors (
series,
data
) VALUES (?, ZEROBLOB(?))
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert));
for (int64_t i = 0; i < kPreallocateRows; ++i) {
insert.BindInt(1, series_);
insert.BindInt(2, reserved_bytes);
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), "i=", i);
rowids_.push_back(db->last_insert_rowid());
unflushed_bytes_ += reserved_bytes;
TF_RETURN_IF_ERROR(MaybeFlush(db, txn));
}
return absl::OkStatus();
}
Status MaybeFlush(Sqlite* db, SqliteTransaction* txn)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (unflushed_bytes_ >= kFlushBytes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(txn->Commit(), "flushing ",
unflushed_bytes_, " bytes");
unflushed_bytes_ = 0;
}
return absl::OkStatus();
}
mutex mu_;
const int64_t series_;
RunMetadata* const meta_;
uint64 count_ TF_GUARDED_BY(mu_) = 0;
std::deque<int64_t> rowids_ TF_GUARDED_BY(mu_);
uint64 unflushed_bytes_ TF_GUARDED_BY(mu_) = 0;
SeriesWriter(const SeriesWriter&) = delete;
void operator=(const SeriesWriter&) = delete;
};
class RunWriter {
public:
explicit RunWriter(RunMetadata* meta) : meta_{meta} {}
Status Append(Sqlite* db, int64_t tag_id, int64_t step, uint64 now,
double computed_time, const Tensor& t)
SQLITE_TRANSACTIONS_EXCLUDED(*db) TF_LOCKS_EXCLUDED(mu_) {
SeriesWriter* writer = GetSeriesWriter(tag_id);
return writer->Append(db, step, now, computed_time, t);
}
Status Finish(Sqlite* db) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (series_writers_.empty()) return absl::OkStatus();
for (auto i = series_writers_.begin(); i != series_writers_.end(); ++i) {
if (!i->second) continue;
TF_RETURN_WITH_CONTEXT_IF_ERROR(i->second->Finish(db),
"finish tag_id=", i->first);
i->second.reset();
}
return absl::OkStatus();
}
private:
SeriesWriter* GetSeriesWriter(int64_t tag_id) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock sl(mu_);
auto spot = series_writers_.find(tag_id);
if (spot == series_writers_.end()) {
SeriesWriter* writer = new SeriesWriter(tag_id, meta_);
series_writers_[tag_id].reset(writer);
return writer;
} else {
return spot->second.get();
}
}
mutex mu_;
RunMetadata* const meta_;
std::unordered_map<int64_t, std::unique_ptr<SeriesWriter>> series_writers_
TF_GUARDED_BY(mu_);
RunWriter(const RunWriter&) = delete;
void operator=(const RunWriter&) = delete;
};
class SummaryDbWriter : public SummaryWriterInterface {
public:
SummaryDbWriter(Env* env, Sqlite* db, const string& experiment_name,
const string& run_name, const string& user_name)
: SummaryWriterInterface(),
env_{env},
db_{db},
ids_{env_, db_},
meta_{&ids_, experiment_name, run_name, user_name},
run_{&meta_} {
DCHECK(env_ != nullptr);
db_->Ref();
}
~SummaryDbWriter() override {
core::ScopedUnref unref(db_);
Status s = run_.Finish(db_);
if (!s.ok()) {
LOG(ERROR) << s;
}
int64_t run_id = meta_.run_id();
if (run_id == kAbsent) return;
const char* sql = R"sql(
UPDATE Runs SET finished_time = ? WHERE run_id = ?
)sql";
SqliteStatement update;
s = db_->Prepare(sql, &update);
if (s.ok()) {
update.BindDouble(1, DoubleTime(env_->NowMicros()));
update.BindInt(2, run_id);
s = update.StepAndReset();
}
if (!s.ok()) {
LOG(ERROR) << "Failed to set Runs[" << run_id << "].finish_time: " << s;
}
}
Status Flush() override { return absl::OkStatus(); }
Status WriteTensor(int64_t global_step, Tensor t, const string& tag,
const string& serialized_metadata) override {
TF_RETURN_IF_ERROR(CheckSupportedType(t));
SummaryMetadata metadata;
if (!metadata.ParseFromString(serialized_metadata)) {
return errors::InvalidArgument("Bad serialized_metadata");
}
return Write(global_step, t, tag, metadata);
}
Status WriteScalar(int64_t global_step, Tensor t,
const string& tag) override {
TF_RETURN_IF_ERROR(CheckSupportedType(t));
SummaryMetadata metadata;
PatchPluginName(&metadata | #include "tensorflow/core/summary/summary_db_writer.h"
#include "tensorflow/core/summary/schema.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/db/sqlite.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
Tensor MakeScalarInt64(int64_t x) {
Tensor t(DT_INT64, TensorShape({}));
t.scalar<int64_t>()() = x;
return t;
}
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
uint64 NowSeconds() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
class SummaryDbWriterTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db_));
TF_ASSERT_OK(SetupTensorboardSqliteDb(db_));
}
void TearDown() override {
if (writer_ != nullptr) {
writer_->Unref();
writer_ = nullptr;
}
db_->Unref();
db_ = nullptr;
}
int64_t QueryInt(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return -1;
}
return stmt.ColumnInt(0);
}
double QueryDouble(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return -1;
}
return stmt.ColumnDouble(0);
}
string QueryString(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return "MISSINGNO";
}
return stmt.ColumnString(0);
}
FakeClockEnv env_;
Sqlite* db_ = nullptr;
SummaryWriterInterface* writer_ = nullptr;
};
TEST_F(SummaryDbWriterTest, WriteHistogram_VerifyTensorValues) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "histtest", "test1", "user1", &env_,
&writer_));
int step = 0;
std::unique_ptr<Event> e{new Event};
e->set_step(step);
e->set_wall_time(123);
Summary::Value* s = e->mutable_summary()->add_value();
s->set_tag("normal/myhisto");
double dummy_value = 10.123;
HistogramProto* proto = s->mutable_histo();
proto->Clear();
proto->set_min(dummy_value);
proto->set_max(dummy_value);
proto->set_num(dummy_value);
proto->set_sum(dummy_value);
proto->set_sum_squares(dummy_value);
int size = 3;
double bucket_limits[] = {-30.5, -10.5, -5.5};
double bucket[] = {-10, 10, 20};
for (int i = 0; i < size; i++) {
proto->add_bucket_limit(bucket_limits[i]);
proto->add_bucket(bucket[i]);
}
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
writer_->Unref();
writer_ = nullptr;
string result = QueryString("SELECT data FROM Tensors");
const double* val = reinterpret_cast<const double*>(result.data());
double histarray[] = {std::numeric_limits<double>::min(),
-30.5,
-10,
-30.5,
-10.5,
10,
-10.5,
-5.5,
20};
int histarray_size = 9;
for (int i = 0; i < histarray_size; i++) {
EXPECT_EQ(histarray[i], val[i]);
}
}
TEST_F(SummaryDbWriterTest, NothingWritten_NoRowsCreated) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
TF_ASSERT_OK(writer_->Flush());
writer_->Unref();
writer_ = nullptr;
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Ids"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Users"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Runs"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Tags"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
}
TEST_F(SummaryDbWriterTest, TensorsWritten_RowsGetInitialized) {
SummaryMetadata metadata;
metadata.set_display_name("display_name");
metadata.set_summary_description("description");
metadata.mutable_plugin_data()->set_plugin_name("plugin_name");
metadata.mutable_plugin_data()->set_content("plugin_data");
SummaryMetadata metadata_nope;
metadata_nope.set_display_name("nope");
metadata_nope.set_summary_description("nope");
metadata_nope.mutable_plugin_data()->set_plugin_name("nope");
metadata_nope.mutable_plugin_data()->set_content("nope");
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(2, MakeScalarInt64(314LL), "taggy",
metadata_nope.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Users"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(1000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
int64_t user_id = QueryInt("SELECT user_id FROM Users");
int64_t experiment_id = QueryInt("SELECT experiment_id FROM Experiments");
int64_t run_id = QueryInt("SELECT run_id FROM Runs");
int64_t tag_id = QueryInt("SELECT tag_id FROM Tags");
EXPECT_LT(0LL, user_id);
EXPECT_LT(0LL, experiment_id);
EXPECT_LT(0LL, run_id);
EXPECT_LT(0LL, tag_id);
EXPECT_EQ("jart", QueryString("SELECT user_name FROM Users"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Users"));
EXPECT_EQ(user_id, QueryInt("SELECT user_id FROM Experiments"));
EXPECT_EQ("mad-science",
QueryString("SELECT experiment_name FROM Experiments"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Experiments"));
EXPECT_EQ(experiment_id, QueryInt("SELECT experiment_id FROM Runs"));
EXPECT_EQ("train", QueryString("SELECT run_name FROM Runs"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Runs"));
EXPECT_EQ(run_id, QueryInt("SELECT run_id FROM Tags"));
EXPECT_EQ("taggy", QueryString("SELECT tag_name FROM Tags"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Tags"));
EXPECT_EQ("display_name", QueryString("SELECT display_name FROM Tags"));
EXPECT_EQ("plugin_name", QueryString("SELECT plugin_name FROM Tags"));
EXPECT_EQ("plugin_data", QueryString("SELECT plugin_data FROM Tags"));
EXPECT_EQ("description", QueryString("SELECT description FROM Descriptions"));
EXPECT_EQ(tag_id, QueryInt("SELECT series FROM Tensors WHERE step = 1"));
EXPECT_EQ(0.023,
QueryDouble("SELECT computed_time FROM Tensors WHERE step = 1"));
EXPECT_EQ(tag_id, QueryInt("SELECT series FROM Tensors WHERE step = 2"));
EXPECT_EQ(0.046,
QueryDouble("SELECT computed_time FROM Tensors WHERE step = 2"));
}
TEST_F(SummaryDbWriterTest, EmptyParentNames_NoParentsCreated) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "", "", &env_, &writer_));
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy", ""));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Users"));
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(1000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
}
TEST_F(SummaryDbWriterTest, WriteEvent_Scalar) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "", "", &env_, &writer_));
std::unique_ptr<Event> e{new Event};
e->set_step(7);
e->set_wall_time(123.456);
Summary::Value* s = e->mutable_summary()->add_value();
s->set_tag("π");
s->set_simple_value(3.14f);
s = e->mutable_summary()->add_value();
s->set_tag("φ");
s->set_simple_value(1.61f);
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(2LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(2000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
int64_t tag1_id = QueryInt("SELECT tag_id FROM Tags WHERE tag_name = 'π'");
int64_t tag2_id = QueryInt("SELECT tag_id FROM Tags WHERE tag_name = 'φ'");
EXPECT_GT(tag1_id, 0LL);
EXPECT_GT(tag2_id, 0LL);
EXPECT_EQ(123.456, QueryDouble(strings::StrCat(
"SELECT computed_time FROM Tensors WHERE series = ",
tag1_id, " AND step = 7")));
EXPECT_EQ(123.456, QueryDouble(strings::StrCat(
"SELECT computed_time FROM Tensors WHERE series = ",
tag2_id, " AND step = 7")));
}
TEST_F(SummaryDbWriterTest, WriteGraph) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "R", "", &env_, &writer_));
env_.AdvanceByMillis(23);
GraphDef graph;
graph.mutable_library()->add_gradient()->set_function_name("funk");
NodeDef* node = graph.add_node();
node->set_name("x");
node->set_op("Placeholder");
node = graph.add_node();
node->set_name("y");
node->set_op("Placeholder");
node = graph.add_node();
node->set_name("z");
node->set_op("Love");
node = graph.add_node();
node->set_name("+");
node->set_op("Add");
node->add_input("x");
node->add_input("y");
node->add_input("^z");
node->set_device("tpu/lol");
std::unique_ptr<Event> e{new Event};
graph.SerializeToString(e->mutable_graph_def());
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Graphs"));
ASSERT_EQ(4LL, QueryInt("SELECT COUNT(*) FROM Nodes"));
ASSERT_EQ(3LL, QueryInt("SELECT COUNT(*) FROM NodeInputs"));
ASSERT_EQ(QueryInt("SELECT run_id FROM Runs"),
QueryInt("SELECT run_id FROM Graphs"));
int64_t graph_id = QueryInt("SELECT graph_id FROM Graphs");
EXPECT_GT(graph_id, 0LL);
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Graphs"));
GraphDef graph2;
graph2.ParseFromString(QueryString("SELECT graph_def FROM Graphs"));
EXPECT_EQ(0, graph2.node_size());
EXPECT_EQ("funk", graph2.library().gradient(0).function_name());
EXPECT_EQ("x", QueryString("SELECT node_name FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("y", QueryString("SELECT node_name FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("z", QueryString("SELECT node_name FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("+", QueryString("SELECT node_name FROM Nodes WHERE node_id = 3"));
EXPECT_EQ("Placeholder",
QueryString("SELECT op FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("Placeholder",
QueryString("SELECT op FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("Love", QueryString("SELECT op FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("Add", QueryString("SELECT op FROM Nodes WHERE node_id = 3"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("tpu/lol",
QueryString("SELECT device FROM Nodes WHERE node_id = 3"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(0LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(1LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(2LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(1LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 2"));
}
TEST_F(SummaryDbWriterTest, UsesIdsTable) {
SummaryMetadata metadata;
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(4LL, QueryInt("SELECT COUNT(*) FROM Ids"));
EXPECT_EQ(4LL, QueryInt(strings::StrCat(
"SELECT COUNT(*) FROM Ids WHERE id IN (",
QueryInt("SELECT user_id FROM Users"), ", ",
QueryInt("SELECT experiment_id FROM Experiments"), ", ",
QueryInt("SELECT run_id FROM Runs"), ", ",
QueryInt("SELECT tag_id FROM Tags"), ")")));
}
TEST_F(SummaryDbWriterTest, SetsRunFinishedTime) {
SummaryMetadata metadata;
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(0.023, QueryDouble("SELECT started_time FROM Runs"));
ASSERT_EQ(0.0, QueryDouble("SELECT finished_time FROM Runs"));
env_.AdvanceByMillis(23);
writer_->Unref();
writer_ = nullptr;
ASSERT_EQ(0.023, QueryDouble("SELECT started_time FROM Runs"));
ASSERT_EQ(0.046, QueryDouble("SELECT finished_time FROM Runs"));
}
}
} |
1,580 | cpp | tensorflow/tensorflow | kernel_fallback_compat_request_state | tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.cc | tensorflow/core/runtime_fallback/test/kernel_fallback_compat_request_state_test.cc | #ifndef TENSORFLOW_CORE_RUNTIME_FALLBACK_KERNEL_KERNEL_FALLBACK_COMPAT_REQUEST_STATE_H__
#define TENSORFLOW_CORE_RUNTIME_FALLBACK_KERNEL_KERNEL_FALLBACK_COMPAT_REQUEST_STATE_H__
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/graph_executor/config.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/pointer_util.h"
namespace tensorflow {
namespace tfd {
class FallbackResourceArray {
public:
void SetResource(int index, tfrt_stub::ImmutableTensor tensor);
tfrt::AsyncValuePtr<tfrt_stub::FallbackTensor> GetResource(int index) const {
return resource_async_values_.at(index).AsPtr();
}
const tfrt_stub::FallbackTensor& GetResourceAsFallbackTensor(
int index) const {
return GetResource(index).get();
}
private:
std::vector<std::unique_ptr<tfrt_stub::ImmutableTensor>> resources_;
std::vector<std::unique_ptr<
tfrt::internal::AsyncValueStorage<tfrt_stub::FallbackTensor>>>
resource_storage_;
std::vector<tfrt::AsyncValueOwningRef<tfrt_stub::FallbackTensor>>
resource_async_values_;
};
class KernelFallbackCompatRequestState {
public:
KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
tfrt::OwnedOrUnownedPtr<ScopedStepContainer> step_container,
std::unique_ptr<CollectiveExecutor::Handle> collective_executor,
core::RefCountPtr<Rendezvous> rendezvous,
tfrt_stub::OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr);
KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
tfrt_stub::OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr);
int64_t step_id() const { return step_id_; }
tensorflow::Device* custom_device(const tensorflow::Device* device) const {
auto it = custom_device_.find(device);
if (it == custom_device_.end()) return nullptr;
return it->second.get();
}
tensorflow::Device* cpu_device() const { return cpu_device_; }
tensorflow::FunctionLibraryRuntime* cpu_function_library_runtime() const {
return cpu_function_library_runtime_;
}
ScopedStepContainer* step_container() const { return step_container_.get(); }
const tensorflow::DeviceMgr& device_manager() const {
return *device_manager_;
}
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime() const {
return *pflr_;
}
CollectiveExecutor* collective_executor() const {
return collective_executor_;
}
tfrt_stub::OpKernelRunnerTable* runner_table() const { return runner_table_; }
FallbackResourceArray* resource_array() const { return resource_array_; }
std::function<void(std::function<void()>)>* runner() const { return runner_; }
CancellationManager* cancellation_manager() const {
return cancellation_manager_;
}
void set_cancellation_manager(CancellationManager* cancellation_manager) {
cancellation_manager_ = cancellation_manager;
}
RendezvousInterface* rendezvous() const { return rendezvous_.get(); }
void set_log_device_placement(bool log) { log_device_placement_ = log; }
bool log_device_placement() const { return log_device_placement_; }
tensorflow::thread::ThreadPoolInterface* intra_op_threadpool() const {
return intra_op_threadpool_;
}
const SessionMetadata& session_metadata() const { return session_metadata_; }
tensorflow::tfrt_stub::CostRecorder* cost_recorder() const {
return cost_recorder_;
}
void set_cost_recorder(tensorflow::tfrt_stub::CostRecorder* cost_recorder) {
cost_recorder_ = cost_recorder;
}
tfrt::ResourceContext* client_graph_resource_context() const {
return client_graph_resource_context_;
}
void set_client_graph_resource_context(
tfrt::ResourceContext* client_graph_resource_context) {
client_graph_resource_context_ = client_graph_resource_context;
}
void set_runtime_config(
const tensorflow::tfrt_stub::RuntimeConfig* runtime_config) {
runtime_config_ = runtime_config;
}
const tensorflow::tfrt_stub::RuntimeConfig* runtime_config() const {
return runtime_config_;
}
private:
int64_t step_id_ = 0;
std::function<void(std::function<void()>)>* runner_ = nullptr;
::tfrt::OwnedOrUnownedPtr<ScopedStepContainer> step_container_;
absl::flat_hash_map<const tensorflow::Device*,
std::unique_ptr<tensorflow::Device>>
custom_device_;
std::unique_ptr<tensorflow::Device> custom_cpu_device_;
tensorflow::Device* cpu_device_ = nullptr;
tensorflow::FunctionLibraryRuntime* cpu_function_library_runtime_ = nullptr;
std::unique_ptr<CollectiveExecutor::Handle> collective_executor_handle_;
CollectiveExecutor* collective_executor_ = nullptr;
core::RefCountPtr<Rendezvous> rendezvous_;
CancellationManager* cancellation_manager_ = nullptr;
const tensorflow::DeviceMgr* device_manager_ = nullptr;
tfrt_stub::OpKernelRunnerTable* runner_table_ = nullptr;
FallbackResourceArray* resource_array_ = nullptr;
tensorflow::thread::ThreadPoolInterface* intra_op_threadpool_ = nullptr;
SessionMetadata session_metadata_;
const tensorflow::ProcessFunctionLibraryRuntime* pflr_ = nullptr;
bool log_device_placement_ = false;
tensorflow::tfrt_stub::CostRecorder* cost_recorder_ = nullptr;
tfrt::ResourceContext* client_graph_resource_context_ = nullptr;
const tensorflow::tfrt_stub::RuntimeConfig* runtime_config_ = nullptr;
};
Status SetUpKernelFallbackCompatRequestContext(
tfrt::RequestContextBuilder* builder,
const tensorflow::DeviceMgr* device_manager,
const tensorflow::ProcessFunctionLibraryRuntime* pflr,
tfrt_stub::OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const std::optional<SessionMetadata>& model_metadata,
std::function<void(std::function<void()>)>* runner,
tfrt_stub::CostRecorder* cost_recorder,
tfrt::ResourceContext* client_graph_resource_context,
tensorflow::CancellationManager* cancellation_manager,
const tensorflow::tfrt_stub::RuntimeConfig* runtime_config);
}
}
#endif
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include <cstdlib>
#include <cstring>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/graph_executor/config.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/pointer_util.h"
namespace tensorflow {
namespace tfd {
using ::tensorflow::tfrt_stub::OpKernelRunnerTable;
void FallbackResourceArray::SetResource(
int index, tensorflow::tfrt_stub::ImmutableTensor tensor) {
if (resource_async_values_.size() <= index) {
resource_storage_.resize(index + 1);
resource_async_values_.resize(index + 1);
}
DCHECK(resource_storage_[index].get() == nullptr);
DCHECK(resource_async_values_[index].AsPtr().value() == nullptr);
resources_.push_back(std::make_unique<tensorflow::tfrt_stub::ImmutableTensor>(
std::move(tensor)));
resource_storage_[index] = std::make_unique<
tfrt::internal::AsyncValueStorage<tfrt_stub::FallbackTensor>>();
resource_async_values_[index] =
tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(
*resource_storage_[index], resources_.back().get());
}
KernelFallbackCompatRequestState::KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
tfrt::OwnedOrUnownedPtr<ScopedStepContainer> step_container,
std::unique_ptr<CollectiveExecutor::Handle> collective_executor_handle,
core::RefCountPtr<Rendezvous> rendezvous, OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr)
: step_id_(step_id),
runner_(runner),
step_container_(std::move(step_container)),
collective_executor_handle_(std::move(collective_executor_handle)),
collective_executor_(collective_executor_handle_
? collective_executor_handle_->get()
: nullptr),
rendezvous_(std::move(rendezvous)),
device_manager_(device_manager),
runner_table_(runner_table),
resource_array_(resource_array),
intra_op_threadpool_(user_intra_op_threadpool),
pflr_(pflr) {
DCHECK(runner_);
DCHECK(device_manager_);
DCHECK(runner_table_);
DCHECK(resource_array_);
DCHECK(rendezvous_);
DCHECK(pflr_);
cpu_device_ = device_manager_->HostCPU();
cpu_function_library_runtime_ = pflr_->GetFLR(cpu_device_->name());
if (user_intra_op_threadpool != nullptr) {
custom_cpu_device_ = tensorflow::RenamedDevice::NewRenamedDevice(
cpu_device_->name(), cpu_device_, false,
false, user_intra_op_threadpool);
cpu_device_ = custom_cpu_device_.get();
for (auto* device : device_manager_->ListDevices()) {
custom_device_[device] = tensorflow::RenamedDevice::NewRenamedDevice(
device->name(), device, false,
false, user_intra_op_threadpool);
}
}
if (model_metadata.has_value()) {
session_metadata_ = *model_metadata;
}
}
KernelFallbackCompatRequestState::KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
OpKernelRunnerTable* runner_table, FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr)
: KernelFallbackCompatRequestState(
runner, device_manager, step_id,
tfrt::OwnedOrUnownedPtr<ScopedStepContainer>{
std::make_unique<ScopedStepContainer>(
step_id,
[step_id, device_manager](const std::string& name) {
for (tensorflow::Device* device :
device_manager->ListDevices()) {
auto status = device->resource_manager()->Cleanup(name);
(void)status;
tensorflow::ScopedAllocatorMgr* sam =
device->GetScopedAllocatorMgr();
if (sam) sam->Cleanup(step_id);
}
})},
nullptr,
core::RefCountPtr<RefCountedIntraProcessRendezvous>(
new RefCountedIntraProcessRendezvous(device_manager)),
runner_table, resource_array, user_intra_op_threadpool,
model_metadata, pflr) {}
static std::function<void(std::function<void()>)>* GetDefaultRunner() {
static auto* const default_runner =
new std::function<void(std::function<void()>)>(
[](const std::function<void()>& f) { f(); });
return default_runner;
}
Status SetUpKernelFallbackCompatRequestContext(
tfrt::RequestContextBuilder* builder,
const tensorflow::DeviceMgr* device_manager,
const tensorflow::ProcessFunctionLibraryRuntime* pflr,
tfrt_stub::OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
std::function<void(std::function<void()>)>* runner,
tfrt_stub::CostRecorder* cost_recorder,
tfrt::ResourceContext* client_graph_resource_context,
tensorflow::CancellationManager* cancellation_manager,
const tensorflow::tfrt_stub::RuntimeConfig* runtime_config) {
DCHECK(builder);
DCHECK(device_manager);
DCHECK(pflr);
DCHECK(runner_table);
DCHECK(resource_array);
auto& fallback_request_state =
builder->context_data().emplace<KernelFallbackCompatRequestState>(
runner ? runner : GetDefaultRunner(), device_manager, builder->id(),
runner_table, resource_array, user_intra_op_threadpool,
model_metadata, pflr);
fallback_request_state.set_cost_recorder(cost_recorder);
fallback_request_state.set_client_graph_resource_context(
client_graph_resource_context);
fallback_request_state.set_cancellation_manager(cancellation_manager);
fallback_request_state.set_runtime_config(runtime_config);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
namespace tfd {
namespace {
TEST(FallbackResourceArrayTest, SetAndGetResourceOk) {
Tensor tensor_1 =
test::AsTensor<float>({0.0, 1.0, 2.0, 3.0}, TensorShape({1, 4}));
tfrt_stub::ImmutableTensor imm_tensor_1 =
tfrt_stub::ImmutableTensor::Create(tensor_1);
tensorflow::Tensor tensor_2 =
test::AsTensor<float>({5.0, 6.0, 7.0}, tensorflow::TensorShape({1, 3}));
tfrt_stub::ImmutableTensor imm_tensor_2 =
tfrt_stub::ImmutableTensor::Create(tensor_2);
FallbackResourceArray resource_array;
resource_array.SetResource(0, imm_tensor_1);
resource_array.SetResource(1, imm_tensor_2);
test::ExpectTensorEqual<float>(resource_array.GetResource(0)->tensor(),
tensor_1);
test::ExpectTensorEqual<float>(resource_array.GetResource(1)->tensor(),
tensor_2);
test::ExpectTensorEqual<float>(
resource_array.GetResourceAsFallbackTensor(0).tensor(), tensor_1);
test::ExpectTensorEqual<float>(
resource_array.GetResourceAsFallbackTensor(1).tensor(), tensor_2);
}
}
}
} |
1,581 | cpp | tensorflow/tensorflow | attr_util | tensorflow/core/runtime_fallback/util/attr_util.cc | tensorflow/core/runtime_fallback/util/attr_util_test.cc | #ifndef TENSORFLOW_CORE_RUNTIME_FALLBACK_UTIL_ATTR_UTIL_H_
#define TENSORFLOW_CORE_RUNTIME_FALLBACK_UTIL_ATTR_UTIL_H_
#include <vector>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tfrt/bef/bef_encoding.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/kernel_utils.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace tfd {
inline absl::string_view ToAbslStringView(tfrt::string_view sv) {
return absl::string_view(sv.data(), sv.size());
}
tensorflow::Status ParseTfDataType(absl::string_view dtype,
DataType* data_type);
DataType ConvertToTfDataType(tfrt::OpAttrType op_attr_type);
tfrt::OpAttrType ConvertFromTfDataType(DataType data_type);
DataType ConvertBefAttrTypeToTfDataType(tfrt::DType attr_type);
tfrt::DType ConvertTfDataTypeToBefAttrType(DataType data_type);
tensorflow::Status ParseTensorAttrValue(absl::string_view attr_value,
tensorflow::Tensor* tensor);
tensorflow::Status ParseTensorShapeAttrValue(absl::string_view attr_value,
std::vector<int64_t>* shape_val);
tensorflow::Status ParseBoolAttrValue(absl::string_view attr_value,
bool* bool_val);
tensorflow::Status ParseIntAttrValue(absl::string_view attr_value,
int64_t* int_val);
inline std::vector<absl::string_view> AttrValueSplit(absl::string_view str) {
return absl::StrSplit(str, absl::MaxSplits('$', 1));
}
bool IsUnusedAttribute(absl::string_view attr_name);
llvm::Error FillAttrValueMap(const tfrt::OpAttrsRef& attrs,
tfrt::HostContext* host,
AttrValueMap* attr_value_map);
tensorflow::Status SetUpAttrValueMap(tfrt::AggregateAttr op_attr_array,
tfrt::AggregateAttr op_func_attr_array,
tensorflow::AttrValueMap* attr_value_map);
}
}
#endif
#include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/utils/tensor_util.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/attribute_utils.h"
#include "tfrt/support/error_util.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/logging.h"
#include "tfrt/tensor/dense_host_tensor.h"
#include "tfrt/tensor/tensor_serialize_utils.h"
namespace tensorflow {
namespace tfd {
namespace {
using ::tensorflow::protobuf::RepeatedFieldBackInserter;
using ::tfrt::AggregateAttr;
using ::tfrt::BEFAttributeType;
using ::tfrt::DenseAttr;
using ::tfrt::DenseHostTensor;
using ::tfrt::HostContext;
using ::tfrt::OpAttrsRawEntry;
using ::tfrt::OpAttrsRef;
using ::tfrt::OpAttrType;
using ::tfrt::string_view;
llvm::Expected<tensorflow::Tensor> DecodeDenseAttrToTfTensor(
const DenseAttr& dense_attr, HostContext* host) {
llvm::Expected<DenseHostTensor> dht =
tfrt::DeserializeDenseHostTensorFromDenseAttr(dense_attr, host);
if (!dht) {
return tfrt::MakeStringError(
"Cannot create DenseHostTensor in DecodeDenseAttrToTensorInterface: ",
dht.takeError());
}
return tfrt::TFRTTensorToTFTensor(*dht);
}
llvm::Error FillAttrValueMapUsingArray(const OpAttrsRawEntry& entry,
AttrValue& attr_tmp,
const OpAttrsRef& attrs) {
attr_tmp.mutable_list()->Clear();
if (entry.element_count == 0) {
if (entry.type == OpAttrType::CHAR) {
attr_tmp.set_s("");
}
return llvm::Error::success();
}
switch (entry.type) {
case OpAttrType::CHAR: {
string_view attr_value = attrs.GetStringAsserting(entry.name);
attr_tmp.set_s(attr_value.data(), attr_value.size());
return llvm::Error::success();
}
case OpAttrType::FUNC: {
string_view attr_value = attrs.GetFuncNameAsserting(entry.name);
attr_tmp.mutable_func()->set_name(attr_value.data(), attr_value.size());
return llvm::Error::success();
}
case OpAttrType::I64: {
llvm::ArrayRef<int64_t> int_array =
attrs.GetArrayAsserting<int64_t>(entry.name);
auto* mutable_i = attr_tmp.mutable_list()->mutable_i();
std::copy(int_array.begin(), int_array.end(),
RepeatedFieldBackInserter(mutable_i));
return llvm::Error::success();
}
case OpAttrType::F32: {
llvm::ArrayRef<float> float_array =
attrs.GetArrayAsserting<float>(entry.name);
auto* mutable_f = attr_tmp.mutable_list()->mutable_f();
std::copy(float_array.begin(), float_array.end(),
RepeatedFieldBackInserter(mutable_f));
return llvm::Error::success();
}
case OpAttrType::BOOL: {
llvm::ArrayRef<bool> bool_array =
attrs.GetArrayAsserting<bool>(entry.name);
auto mutable_b = attr_tmp.mutable_list()->mutable_b();
std::copy(bool_array.begin(), bool_array.end(),
RepeatedFieldBackInserter(mutable_b));
return llvm::Error::success();
}
case OpAttrType::DTYPE: {
const auto& op_attr = attrs.GetRawAsserting(entry.name);
assert(op_attr.IsArray());
auto bef_dtypes =
llvm::ArrayRef(static_cast<const tfrt::DType*>(op_attr.GetData()),
op_attr.element_count);
llvm::SmallVector<tensorflow::DataType, 4> tf_dtypes;
tf_dtypes.reserve(bef_dtypes.size());
for (auto bef_dtype : bef_dtypes) {
tf_dtypes.push_back(ConvertBefAttrTypeToTfDataType(bef_dtype));
}
auto* mutable_type = attr_tmp.mutable_list()->mutable_type();
std::copy(tf_dtypes.begin(), tf_dtypes.end(),
RepeatedFieldBackInserter(mutable_type));
return llvm::Error::success();
}
default:
return tfrt::MakeStringError("unsupported array attribute type");
}
}
llvm::Error FillAttrValueMapUsingAggregate(const OpAttrsRawEntry& entry,
AttrValue& attr_tmp,
const OpAttrsRef& attrs) {
AggregateAttr list_attr = attrs.GetAsserting<AggregateAttr>(entry.name);
int num_values = list_attr.GetNumElements();
if (num_values == 0) {
attr_tmp.mutable_list();
return llvm::Error::success();
}
auto attr_base = list_attr.GetAttribute(0);
auto* mutable_list = attr_tmp.mutable_list();
mutable_list->Clear();
if (IsDataTypeAttribute(attr_base.type()) &&
GetDataType(attr_base.type()) == tfrt::DType::String) {
auto* mutable_s = mutable_list->mutable_s();
mutable_s->Reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto string_attr = list_attr.GetAttributeOfType<tfrt::StringAttr>(i);
mutable_list->add_s(string_attr.GetValue().data(),
string_attr.GetValue().size());
}
} else if (attr_base.type() == BEFAttributeType::kFunc) {
auto* mutable_f = mutable_list->mutable_func();
mutable_f->Reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto func_attr = list_attr.GetAttributeOfType<tfrt::FuncAttr>(i);
auto mutable_func = mutable_list->add_func();
mutable_func->set_name(func_attr.GetFunctionName().str());
}
} else if (attr_base.type() == BEFAttributeType::kShape) {
auto* mutable_list = attr_tmp.mutable_list();
auto* mutable_shape = mutable_list->mutable_shape();
mutable_shape->Reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto shape_attr = list_attr.GetAttributeOfType<tfrt::ShapeAttr>(i);
auto* added_shape = mutable_list->add_shape();
if (shape_attr.HasRank()) {
int rank = shape_attr.GetRank();
auto shape = shape_attr.GetShape();
added_shape->mutable_dim()->Reserve(rank);
for (int d = 0; d < rank; ++d) {
added_shape->add_dim()->set_size(shape[d]);
}
} else {
added_shape->set_unknown_rank(true);
}
}
} else {
return tfrt::MakeStringError("unsupported list attribute type");
}
return llvm::Error::success();
}
llvm::Error FillAttrValueMapUsingScalar(const OpAttrsRawEntry& entry,
AttrValue& attr_tmp, HostContext* host,
const OpAttrsRef& attrs) {
switch (entry.type) {
case OpAttrType::I64: {
int64_t attr_value = attrs.GetAsserting<int64_t>(entry.name);
attr_tmp.set_i(attr_value);
return llvm::Error::success();
}
case OpAttrType::F32: {
float attr_value = attrs.GetAsserting<float>(entry.name);
attr_tmp.set_f(attr_value);
return llvm::Error::success();
}
case OpAttrType::BOOL: {
bool attr_value = attrs.GetAsserting<bool>(entry.name);
attr_tmp.set_b(attr_value);
return llvm::Error::success();
}
case OpAttrType::DTYPE: {
OpAttrType op_attr_type = attrs.GetAsserting<OpAttrType>(entry.name);
DataType tf_dtype = ConvertToTfDataType(op_attr_type);
attr_tmp.set_type(tf_dtype);
return llvm::Error::success();
}
case OpAttrType::SHAPE: {
auto shape_attr = attrs.GetAsserting<tfrt::ShapeAttr>(entry.name);
auto* mutable_shape = attr_tmp.mutable_shape();
if (shape_attr.HasRank()) {
int rank = shape_attr.GetRank();
auto shape = shape_attr.GetShape();
mutable_shape->mutable_dim()->Reserve(rank);
for (int d = 0; d < rank; ++d) {
mutable_shape->add_dim()->set_size(shape[d]);
}
} else {
mutable_shape->set_unknown_rank(true);
}
return llvm::Error::success();
}
case OpAttrType::DENSE: {
auto dense_attr = attrs.GetAsserting<tfrt::DenseAttr>(entry.name);
llvm::Expected<tensorflow::Tensor> tf_tensor =
DecodeDenseAttrToTfTensor(dense_attr, host);
if (!tf_tensor) return tf_tensor.takeError();
auto* mutable_tensor = attr_tmp.mutable_tensor();
if (tf_tensor->NumElements() > 1) {
tf_tensor->AsProtoTensorContent(mutable_tensor);
} else {
tf_tensor->AsProtoField(mutable_tensor);
}
return llvm::Error::success();
}
case OpAttrType::AGGREGATE: {
return FillAttrValueMapUsingAggregate(entry, attr_tmp, attrs);
}
default:
LOG(ERROR) << "failure case";
return tfrt::MakeStringError("unsupported scalar attribute type");
}
}
}
Status ParseTfDataType(absl::string_view dtype, DataType* data_type) {
if (dtype == "DT_INT8") {
*data_type = DataType::DT_INT8;
return absl::OkStatus();
} else if (dtype == "DT_INT32") {
*data_type = DataType::DT_INT32;
return absl::OkStatus();
} else if (dtype == "DT_INT64") {
*data_type = DataType::DT_INT64;
return absl::OkStatus();
} else if (dtype == "DT_HALF") {
*data_type = DataType::DT_HALF;
return absl::OkStatus();
} else if (dtype == "DT_FLOAT") {
*data_type = DataType::DT_FLOAT;
return absl::OkStatus();
} else if (dtype == "DT_DOUBLE") {
*data_type = DataType::DT_DOUBLE;
return absl::OkStatus();
} else {
return errors::InvalidArgument("Unsupported dtype, ", std::string(dtype),
" in ParseTfDataType.");
}
}
DataType ConvertToTfDataType(tfrt::OpAttrType op_attr_type) {
switch (op_attr_type) {
#define OP_ATTR_TYPE(TFRT_ENUM, DT_ENUM) \
case tfrt::OpAttrType::TFRT_ENUM: \
return DataType::DT_ENUM;
#include "tensorflow/core/runtime_fallback/util/attr_type.def"
default:
TFRT_DLOG(ERROR) << "unsupported dtype" << static_cast<int>(op_attr_type)
<< " in TFRT fallback kernel.";
abort();
}
}
tfrt::OpAttrType ConvertFromTfDataType(DataType data_type) {
switch (data_type) {
#define OP_ATTR_TYPE(TFRT_ENUM, DT_ENUM) \
case DataType::DT_ENUM: \
return tfrt::OpAttrType::TFRT_ENUM;
#include "tensorflow/core/runtime_fallback/util/attr_type.def"
default:
TFRT_DLOG(ERROR) << "unsupported dtype " << static_cast<int>(data_type)
<< "in TFRT fallback kernel.";
abort();
}
}
DataType ConvertBefAttrTypeToTfDataType(tfrt::DType attr_type) {
switch (attr_type) {
case tfrt::DType::I1:
return DataType::DT_BOOL;
case tfrt::DType::I8:
return DataType::DT_INT8;
case tfrt::DType::I16:
return DataType::DT_INT16;
case tfrt::DType::I32:
return DataType::DT_INT32;
case tfrt::DType::I64:
return DataType::DT_INT64;
case tfrt::DType::UI8:
return DataType::DT_UINT8;
case tfrt::DType::UI16:
return DataType::DT_UINT16;
case tfrt::DType::UI32:
return DataType::DT_UINT32;
case tfrt::DType::UI64:
return DataType::DT_UINT64;
case tfrt::DType::F16:
return DataType::DT_HALF;
case tfrt::DType::BF16:
return DataType::DT_BFLOAT16;
case tfrt::DType::F32:
return DataType::DT_FLOAT;
case tfrt::DType::F64:
return DataType::DT_DOUBLE;
case tfrt::DType::Complex64:
return DataType::DT_COMPLEX64;
case tfrt::DType::Complex128:
return DataType::DT_COMPLEX128;
case tfrt::DType::String:
return DataType::DT_STRING;
case tfrt::DType::Resource:
return DataType::DT_RESOURCE;
case tfrt::DType::Variant:
return DataType::DT_VARIANT;
case tfrt::DType::QUI8:
return DataType::DT_QUINT8;
case tfrt::DType::QUI16:
return DataType::DT_QUINT16;
case tfrt::DType::QI8:
return DataType::DT_QINT8;
case tfrt::DType::QI16:
return DataType::DT_QINT16;
case tfrt::DType::QI32:
return DataType::DT_QINT32;
default:
TFRT_DLOG(ERROR) << "unsupported tfrt::DType"
<< static_cast<int>(attr_type)
<< " in TFRT fallback kernel.";
abort();
}
}
tfrt::DType ConvertTfDataTypeToBefAttrType(DataType data_type) {
switch (data_type) {
case DataType::DT_UINT8:
return tfrt::DType::UI8;
case DataType::DT_UINT16:
return tfrt::DType::UI16;
case DataType::DT_UINT32:
return tfrt::DType::UI32;
case DataType::DT_UINT64:
return tfrt::DType::UI64;
case DataType::DT_BOOL:
return tfrt::DType::I1;
case DataType::DT_INT8:
return tfrt::DType::I8;
case DataType::DT_INT16:
return tfrt::DType::I16;
case DataType::DT_INT32:
return tfrt::DType::I32;
case DataType::DT_INT64:
return tfrt::DType::I64;
case DataType::DT_HALF:
return tfrt::DType::F16;
case DataType::DT_BFLOAT16:
return tfrt::DType::BF16;
case DataType::DT_FLOAT:
return tfrt::DType::F32;
case DataType::DT_DOUBLE:
return tfrt::DType::F64;
case DataType::DT_COMPLEX64:
return tfrt::DType::Complex64;
case DataType::DT_COMPLEX128:
return tfrt::DType::Complex128;
case DataType::DT_STRING:
return tfrt::DType::String;
case DataType::DT_RESOURCE:
return tfrt::DType::Resource;
case DataType::DT_VARIANT:
return tfrt::DType::Variant;
case DataType::DT_QUINT8:
return tfrt::DType::QUI8;
case DataType::DT_QUINT16:
return tfrt::DType::QUI16;
case DataType::DT_QINT8:
return tfrt::DType::QI8;
case DataType::DT_QINT16:
return tfrt::DType::QI16;
case DataType::DT_QINT32:
return tfrt::DType::QI32;
default:
TFRT_DLOG(ERROR) << "unsupported DataType " << static_cast<int>(data_type)
<< " in TFRT fallback kernel.";
abort();
}
}
Status ParseBoolAttrValue(absl::string_view attr_value, bool* bool_val) {
if (attr_value == "false") {
*bool_val = false;
return absl::OkStatus();
} else if (attr_value == "true") {
*bool_val = true;
return absl::OkStatus();
} else {
return errors::InvalidArgument("Could not parse bool from \"", attr_value,
"\"");
}
}
Status ParseIntAttrValue(absl::string_view attr_value, int64_t* int_val) {
bool success = absl::SimpleAtoi(attr_value, int_val);
if (!success) {
return errors::InvalidArgument("Could not parse int from \"", attr_value,
"\"");
}
return absl::OkStatus();
}
Status ParseTensorAttrValue(absl::string_view attr_value,
tensorflow::Tensor* tensor) {
if (std::is_base_of<tensorflow::protobuf::Message,
tensorflow::TensorProto>()) {
tensorflow::TensorProto tensor_proto;
auto* message = reinterpret_cast<protobuf::Message*>(&tensor_proto);
if (protobuf::TextFormat::ParseFromString(
static_cast<std::string>(attr_value), message) &&
tensor->FromProto(tensor_proto)) {
return absl::OkStatus();
} else {
return errors::InvalidArgument("Could not parse tensor value from \"",
attr_value, "\"");
}
} else {
return errors::InvalidArgument(
"Tensor attributes are not supported on mobile.");
}
}
Status ParseTensorShapeAttrValue(absl::string_view attr_value,
std::vector<int64_t>* shape_val) {
if (attr_value.size() < 2 || attr_value[0] != '[' ||
attr_value[attr_value.size() - 1] != ']') {
return errors::InvalidArgument(
"Tensor shape attribute must be a string of the form [1,2...], instead "
"got \"",
attr_value, "\"");
}
absl::string_view attr_value_trunc =
attr_value.substr(1, attr_value.size() - 2);
auto container = absl::StrSplit(attr_value_trunc, ',');
for (auto it = container.begin(); it != container.end(); ++it) {
int64_t int_val;
if (!ParseIntAttrValue(*it, &int_val).ok()) {
return errors::InvalidArgument("Failed to parse an integer value from ",
*it, " while parsing shape.");
}
shape_val->push_back(int_val);
}
return absl::OkStatus();
}
bool IsUnusedAttribute(absl::string_view attr_name) {
return absl::StrContains(attr_name, "result_segment_sizes") ||
absl::StrContains(attr_name, "operand_segment_sizes") ||
absl::EndsWith(attr_name, "_tf_data_function");
}
llvm::Error FillAttrValueMap(const tfrt::OpAttrsRef& attrs,
tfrt::HostContext* host,
tensorflow::AttrValueMap* attr_value_map) {
AttrValue attr_tmp;
llvm::Error error = llvm::Error::success();
attrs.IterateEntries([&error, attr_value_map, &attr_tmp, host,
&attrs](const OpAttrsRawEntry& entry) {
assert(strcmp(entry.name, "device") != 0);
if (IsUnusedAttribute(entry.name)) {
return;
} else if (entry.IsArray()) {
error = FillAttrValueMapUsingArray(entry, attr_tmp, attrs);
} else {
error = FillAttrValueMapUsingScalar(entry, attr_tmp, host, attrs);
}
if (error) return;
attr_value_map->insert(AttrValueMap::value_type(entry.name, attr_tmp));
});
return error;
}
namespace {
tensorflow::Tensor CreateTfTensorFromDenseAttr(tfrt::DenseAttr attr) {
tensorflow::TensorShape shape(absl::InlinedVector<int64_t, 4>(
attr.shape().begin(), attr.shape().end()));
tensorflow::DataType dtype = ConvertBefAttrTypeToTfDataType(attr.dtype());
tensorflow::Tensor tensor(dtype, shape);
std::memcpy(tensor.data(), attr.GetElements(), tensor.TotalBytes());
return tensor;
}
Status SetUpScalarAttr(tfrt::TypedAttrBase bef_attr,
tensorflow::AttrValue* tf_attr) {
if (auto shape_attr = bef_attr.dyn_cast<tfrt::ShapeAttr>()) {
if (shape_attr.HasRank()) {
tensorflow::PartialTensorShape tf_shape(shape_attr.GetShape());
tf_shape.AsProto(tf_attr->mutable_shape());
} else {
tensorflow::PartialTensorShape unranked_shape;
unranked_shape.AsProto(tf_attr->mutable_shape());
}
} else if (auto dense_attr = bef_attr.dyn_cast<tfrt::DenseAttr>()) {
auto tf_tensor = CreateTfTensorFromDenseAttr(dense_attr);
tf_tensor.AsProtoTensorContent(tf_attr->mutable_tensor());
} else if (auto type_attr = bef_attr.dyn_cast<tfrt::TypeAttr>()) {
tf_attr->set_type(ConvertBefAttrTypeToTfDataType(type_attr.GetValue()));
} else if (auto i1_attr = bef_attr.dyn_cast<tfrt::I1Attr>()) {
tf_attr->set_b(i1_attr.GetValue());
} else if (auto f32_attr = bef_attr.dyn_cast<tfrt::F32Attr>()) {
tf_attr->set_f(f32_attr.GetValue());
} else if (auto i64_attr = bef_attr.dyn_cast<tfrt::I64Attr>()) {
tf_attr->set_i(i64_attr.GetValue());
} else if (auto string_attr = bef_attr.dyn_cast<tfrt::StringAttr>()) {
tf_attr->set_s(string_attr.GetValue().data(),
string_attr.GetValue().size());
} else {
return tensorflow::errors::Internal("Failed to set up attribute.");
}
return absl::OkStatus();
}
Status SetUpScalarFunctionAttr(tfrt::StringAttr func_attr,
tensorflow::AttrValue& tf_attr) {
tfrt::string_view func_name = func_attr.GetValue();
tf_attr.mutable_func()->set_name(func_name.data(), func_name.size());
return absl::OkStatus();
}
void AddShapeToAttrList(tfrt::ShapeAttr shape,
tensorflow::AttrValue::ListValue* list) {
if (shape.HasRank()) {
tensorflow::PartialTensorShape tf_shape(shape.GetShape());
tf_shape.AsProto(list->add_shape());
return;
}
tensorflow::PartialTensorShape unranked_shape;
unranked_shape.AsProto(list->add_shape());
}
void AddTensorToAttrList(tfrt::DenseAttr dense_attr,
tensorflow::AttrValue::ListValue* list) {
auto tf_tensor = CreateTfTensorFromDenseAttr(dense_attr);
tf_tensor.AsProtoTensorContent(list->add_tensor());
}
Status SetUpListAttr(tfrt::AggregateAttr aggregate_attr,
tensorflow::AttrValue* tf_attr) {
auto* list = tf_attr->mutable_list();
for (int i = 0; i < aggregate_attr.GetNumElements(); ++i) {
auto base = aggregate_attr.GetAttribute(i);
if (auto shape_attr = base.dyn_cast<tfrt::ShapeAttr>()) {
AddShapeToAttrList(shape_attr, list);
} else if (auto dense_attr = base.dyn_cast<tfrt::DenseAttr>()) {
AddTensorToAttrList(dense_attr, list);
} else if (auto string_attr = base.dyn_cast<tfrt::StringAttr>()) {
list->add_s(string_attr.GetValue().data(), string_attr.GetValue().size());
} else {
return tensorflow::errors::Internal("Failed to set up list attr.");
}
}
return absl::OkStatus();
}
Status SetUpListAttr(tfrt::ArrayAttr array_attr,
tensorflow::AttrValue* tf_attr) {
auto* list = tf_attr->mutable_list();
if (array_attr.GetNumElements() == 0) {
return absl::OkStatus();
}
tfrt::BEFAttributeType element_type = array_attr.GetElementType();
if (tfrt::IsDataTypeAttribute(element_type)) {
tfrt::DType dtype = GetDataType(element_type);
switch (dtype) {
case tfrt::DType::I1: {
for (auto value : array_attr.GetValue<bool>()) {
list->add_b(value);
}
return absl::OkStatus();
}
case tfrt::DType::I64: {
for (auto value : array_attr.GetValue<int64_t>()) {
list->add_i(value);
}
return absl::OkStatus();
}
case tfrt::DType::F32: {
for (auto value : array_attr.GetValue<float>()) {
list->add_f(value);
}
return absl::OkStatus();
}
default:
return tensorflow::errors::Internal(
StrCat("Failed to set up list attr: unsupported dtype: ",
tfrt::DType(dtype)));
}
} else if (element_type == tfrt::BEFAttributeType::kType) {
for (auto value : array_attr.GetValue<tfrt::DType>()) {
list->add_type(ConvertBefAttrTypeToTfDataType(value));
}
return absl::OkStatus();
}
return tensorflow::errors::Internal("Failed to set up list attr.");
}
}
Status SetUpAttrValueMap(tfrt::AggregateAttr op_attr_array,
tfrt::AggregateAttr op_func_attr_array,
tensorflow::AttrValueMap* attr_value_map) {
auto obtain_name_attr_pair =
[](tfrt::AggregateAttr attr_array,
int i) -> std::pair<std::string, tfrt::TypedAttrBase> {
auto pair = attr_array.GetAttributeOfType<tfrt::AggregateAttr>(i);
assert(pair.GetNumElements() == 2);
return {pair.GetAttributeOfType<tfrt::StringAttr>(0).GetValue().str(),
pair.GetAttribute(1)};
};
for (size_t i = 0, e = op_attr_array.GetNumElements(); i != e; ++i) {
auto name_attr_pair = obtain_name_attr_pair(op_attr_array, i);
if (IsUnusedAttribute(name_attr_pair.first)) continue;
AttrValue& tf_attr = (*attr_value_map)[name_attr_pair.first];
tfrt::TypedAttrBase attr_value = name_attr_pair.second;
if (auto aggregate_attr = attr_value.dyn_cast<tfrt::AggregateAttr>()) {
TF_RETURN_IF_ERROR(SetUpListAttr(aggregate_attr, &tf_attr));
} else if (auto array_attr = attr_value.dyn_cast<tfrt::ArrayAttr>()) {
TF_RETURN_IF_ERROR(SetUpListAttr(array_attr, &tf_attr));
} else {
TF_RETURN_IF_ERROR(SetUpScalarAttr(attr_value, &tf_attr));
}
}
for (size_t i = 0, e = op_func_attr_array.GetNumElements(); i != e; ++i) {
auto name_attr_pair = obtain_name_attr_pair(op_func_attr_array, i);
if (IsUnusedAttribute(name_attr_pair.first)) continue;
AttrValue& tf_attr = (*attr_value_map)[name_attr_pair.first];
auto attr_value = name_attr_pair.second.dyn_cast<tfrt::StringAttr>();
TF_RETURN_IF_ERROR(SetUpScalarFunctionAttr(attr_value, tf_attr));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tfrt/bef/bef_encoding.h"
#include "tfrt/bef_converter/bef_attr_encoder.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/dtype/dtype.h"
#include "tfrt/host_context/attribute_utils.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace tfd {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::EqualsProto;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
std::unique_ptr<tfrt::HostContext> CreateTestHostContext() {
return std::make_unique<tfrt::HostContext>(
[](const tfrt::DecodedDiagnostic&) {}, tfrt::CreateMallocAllocator(),
tfrt::CreateSingleThreadedWorkQueue());
}
struct DataTypeAndString {
std::string str_val;
DataType dtype;
};
class ParseTfDataTypeTest : public ::testing::TestWithParam<DataTypeAndString> {
};
INSTANTIATE_TEST_SUITE_P(
AllDTypes, ParseTfDataTypeTest,
::testing::Values(DataTypeAndString{"DT_INT8", DataType::DT_INT8},
DataTypeAndString{"DT_INT32", DataType::DT_INT32},
DataTypeAndString{"DT_INT64", DataType::DT_INT64},
DataTypeAndString{"DT_HALF", DataType::DT_HALF},
DataTypeAndString{"DT_FLOAT", DataType::DT_FLOAT},
DataTypeAndString{"DT_DOUBLE", DataType::DT_DOUBLE}));
TEST_P(ParseTfDataTypeTest, Ok) {
DataType data_type;
ASSERT_EQ(ParseTfDataType(GetParam().str_val, &data_type), absl::OkStatus());
EXPECT_EQ(data_type, GetParam().dtype);
}
TEST(ParseTfDataTypeTest, ReturnsInvalidArgument) {
DataType data_type;
EXPECT_EQ(ParseTfDataType("DT_BFLOAT16_REF", &data_type),
errors::InvalidArgument(
"Unsupported dtype, DT_BFLOAT16_REF in ParseTfDataType."));
}
TEST(UtilsTest, ToAbslStringViewOk) {
std::string str("Tensorflow Runtime");
tfrt::string_view str_view(str);
EXPECT_EQ(ToAbslStringView(str_view), str);
}
struct OpAttrTypeAndDType {
tfrt::OpAttrType op_attr_type;
DataType dtype;
};
class OpAttrTypeDTypeTest
: public ::testing::TestWithParam<OpAttrTypeAndDType> {};
INSTANTIATE_TEST_SUITE_P(
AllDTypes, OpAttrTypeDTypeTest,
::testing::Values(
OpAttrTypeAndDType{tfrt::OpAttrType::BOOL, DataType::DT_BOOL},
OpAttrTypeAndDType{tfrt::OpAttrType::UI8, DataType::DT_UINT8},
OpAttrTypeAndDType{tfrt::OpAttrType::I8, DataType::DT_INT8},
OpAttrTypeAndDType{tfrt::OpAttrType::I16, DataType::DT_INT16},
OpAttrTypeAndDType{tfrt::OpAttrType::UI16, DataType::DT_UINT16},
OpAttrTypeAndDType{tfrt::OpAttrType::I32, DataType::DT_INT32},
OpAttrTypeAndDType{tfrt::OpAttrType::UI32, DataType::DT_UINT32},
OpAttrTypeAndDType{tfrt::OpAttrType::I64, DataType::DT_INT64},
OpAttrTypeAndDType{tfrt::OpAttrType::UI64, DataType::DT_UINT64},
OpAttrTypeAndDType{tfrt::OpAttrType::BF16, DataType::DT_BFLOAT16},
OpAttrTypeAndDType{tfrt::OpAttrType::F16, DataType::DT_HALF},
OpAttrTypeAndDType{tfrt::OpAttrType::F32, DataType::DT_FLOAT},
OpAttrTypeAndDType{tfrt::OpAttrType::F64, DataType::DT_DOUBLE},
OpAttrTypeAndDType{tfrt::OpAttrType::COMPLEX64, DataType::DT_COMPLEX64},
OpAttrTypeAndDType{tfrt::OpAttrType::COMPLEX128,
DataType::DT_COMPLEX128},
OpAttrTypeAndDType{tfrt::OpAttrType::CHAR, DataType::DT_STRING},
OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QUI8,
DataType::DT_QUINT8},
OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QUI16,
DataType::DT_QUINT16},
OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QI8,
DataType::DT_QINT8},
OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QI16,
DataType::DT_QINT16},
OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_QI32,
DataType::DT_QINT32},
OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_RESOURCE,
DataType::DT_RESOURCE},
OpAttrTypeAndDType{tfrt::OpAttrType::UNSUPPORTED_VARIANT,
DataType::DT_VARIANT}));
TEST_P(OpAttrTypeDTypeTest, ToTfDataTypeOk) {
EXPECT_EQ(ConvertToTfDataType(GetParam().op_attr_type), GetParam().dtype);
}
TEST_P(OpAttrTypeDTypeTest, FromTfDataTypeOk) {
EXPECT_EQ(ConvertFromTfDataType(GetParam().dtype), GetParam().op_attr_type);
}
TEST(OpAttrTypeDTypeTest, DeathUnsupportedDType) {
EXPECT_DEATH(ConvertFromTfDataType(DataType::DT_RESOURCE_REF), "");
}
struct TfrtDTypeAndTensorflowDType {
tfrt::DType tfrt_dtype;
DataType dtype;
};
class TfrtToTensorflowDTypeTest
: public ::testing::TestWithParam<TfrtDTypeAndTensorflowDType> {};
INSTANTIATE_TEST_SUITE_P(
AllDTypes, TfrtToTensorflowDTypeTest,
::testing::Values(
TfrtDTypeAndTensorflowDType{tfrt::DType::I1, DataType::DT_BOOL},
TfrtDTypeAndTensorflowDType{tfrt::DType::UI8, DataType::DT_UINT8},
TfrtDTypeAndTensorflowDType{tfrt::DType::I8, DataType::DT_INT8},
TfrtDTypeAndTensorflowDType{tfrt::DType::I16, DataType::DT_INT16},
TfrtDTypeAndTensorflowDType{tfrt::DType::UI16, DataType::DT_UINT16},
TfrtDTypeAndTensorflowDType{tfrt::DType::I32, DataType::DT_INT32},
TfrtDTypeAndTensorflowDType{tfrt::DType::UI32, DataType::DT_UINT32},
TfrtDTypeAndTensorflowDType{tfrt::DType::I64, DataType::DT_INT64},
TfrtDTypeAndTensorflowDType{tfrt::DType::UI64, DataType::DT_UINT64},
TfrtDTypeAndTensorflowDType{tfrt::DType::BF16, DataType::DT_BFLOAT16},
TfrtDTypeAndTensorflowDType{tfrt::DType::F16, DataType::DT_HALF},
TfrtDTypeAndTensorflowDType{tfrt::DType::F32, DataType::DT_FLOAT},
TfrtDTypeAndTensorflowDType{tfrt::DType::F64, DataType::DT_DOUBLE},
TfrtDTypeAndTensorflowDType{tfrt::DType::Complex64,
DataType::DT_COMPLEX64},
TfrtDTypeAndTensorflowDType{tfrt::DType::Complex128,
DataType::DT_COMPLEX128},
TfrtDTypeAndTensorflowDType{tfrt::DType::String, DataType::DT_STRING},
TfrtDTypeAndTensorflowDType{tfrt::DType::QUI8, DataType::DT_QUINT8},
TfrtDTypeAndTensorflowDType{tfrt::DType::QUI16, DataType::DT_QUINT16},
TfrtDTypeAndTensorflowDType{tfrt::DType::QI8, DataType::DT_QINT8},
TfrtDTypeAndTensorflowDType{tfrt::DType::QI16, DataType::DT_QINT16},
TfrtDTypeAndTensorflowDType{tfrt::DType::QI32, DataType::DT_QINT32},
TfrtDTypeAndTensorflowDType{tfrt::DType::Resource,
DataType::DT_RESOURCE},
TfrtDTypeAndTensorflowDType{tfrt::DType::Variant,
DataType::DT_VARIANT}));
TEST_P(TfrtToTensorflowDTypeTest, BefAttrTypeToTfDataTypeOk) {
EXPECT_EQ(ConvertBefAttrTypeToTfDataType(GetParam().tfrt_dtype),
GetParam().dtype);
}
TEST_P(TfrtToTensorflowDTypeTest, TfDataTypeTpBefAttrTypeOk) {
EXPECT_EQ(ConvertTfDataTypeToBefAttrType(GetParam().dtype),
GetParam().tfrt_dtype);
}
TEST(TfrtToTensorflowDTypeTest, DeathUnsupportedDType) {
EXPECT_DEATH(ConvertTfDataTypeToBefAttrType(DataType::DT_RESOURCE_REF), "");
}
TEST(UtilsTest, ParseTensorAttrValueOk) {
tensorflow::Tensor tensor;
std::string tensor_str = R"pb(dtype: DT_INT32
tensor_shape {
dim { size: 2 }
dim { size: 2 }
}
int_val: 1
int_val: 1
int_val: 1
int_val: 1)pb";
ASSERT_EQ(ParseTensorAttrValue(tensor_str, &tensor), absl::OkStatus());
EXPECT_EQ(tensor.dtype(), DT_INT32);
EXPECT_EQ(tensor.NumElements(), 4);
}
TEST(UtilsTest, ParseTensorAttrValueReturnsInvalidArgument) {
tensorflow::Tensor tensor;
std::string tensor_str = R"pb(foobar)pb";
EXPECT_EQ(
ParseTensorAttrValue(tensor_str, &tensor),
errors::InvalidArgument("Could not parse tensor value from \"foobar\""));
}
TEST(UtilsTest, ParseTensorShapeAttrValueOk) {
std::vector<int64_t> dims;
ASSERT_THAT(ParseTensorShapeAttrValue("[1,2,3]", &dims), absl::OkStatus());
EXPECT_THAT(dims, ElementsAre(Eq(1), Eq(2), Eq(3)));
}
TEST(UtilsTest, ParseTensorShapeAttrValueInvalidArgument) {
std::vector<int64_t> dims;
EXPECT_EQ(
ParseTensorShapeAttrValue("foobar", &dims),
errors::InvalidArgument("Tensor shape attribute must be a string of the "
"form [1,2...], instead got \"foobar\""));
}
TEST(UtilsTest, ParseTensorShapeAttrValueInvalidArgumentEmptyString) {
std::vector<int64_t> dims;
EXPECT_EQ(ParseTensorShapeAttrValue("", &dims),
errors::InvalidArgument("Tensor shape attribute must be a string "
"of the form [1,2...], instead got \"\""));
}
TEST(UtilsTest, ParseBoolAttrValueOk) {
bool bool_val;
ASSERT_THAT(ParseBoolAttrValue("false", &bool_val), absl::OkStatus());
EXPECT_FALSE(bool_val);
ASSERT_THAT(ParseBoolAttrValue("true", &bool_val), absl::OkStatus());
EXPECT_TRUE(bool_val);
}
TEST(UtilsTest, ParseBoolAttrValueInvalidArgument) {
bool bool_val;
EXPECT_EQ(ParseBoolAttrValue("foobar", &bool_val),
errors::InvalidArgument("Could not parse bool from \"foobar\""));
}
TEST(UtilsTest, ParseIntAttrValueOk) {
int64_t int_val;
ASSERT_THAT(ParseIntAttrValue("42", &int_val), absl::OkStatus());
EXPECT_EQ(int_val, 42);
}
TEST(UtilsTest, ParseIntAttrValueInvalidArgument) {
int64_t int_val;
EXPECT_EQ(ParseIntAttrValue("foobar", &int_val),
errors::InvalidArgument("Could not parse int from \"foobar\""));
}
TEST(UtilsTest, IsUnusedAttributeOk) {
EXPECT_TRUE(IsUnusedAttribute("result_segment_sizes"));
EXPECT_TRUE(IsUnusedAttribute("operand_segment_sizes"));
EXPECT_TRUE(IsUnusedAttribute("_tf_data_function"));
EXPECT_FALSE(IsUnusedAttribute("device"));
}
TEST(UtilsTest, FillAttrValueMapOk) {
tfrt::OpAttrs attrs;
attrs.SetArray("shape", tfrt::ArrayRef<int64_t>{2, 2});
attrs.SetArray("values", tfrt::ArrayRef<float>{2});
attrs.SetArray("flags", tfrt::ArrayRef<bool>{false, true});
attrs.SetArray("baz", tfrt::ArrayRef<char>{'a'});
attrs.Set<bool>("transpose_a", false);
attrs.Set<bool>("transpose_b", true);
attrs.Set<int64_t>("result_segment_sizes", 2);
attrs.Set<float>("foo", 2);
attrs.Set<int64_t>("bar", 2);
tfrt::AggregateAttr aggAttr;
attrs.Set<tfrt::AggregateAttr>("aggAttr", aggAttr);
AttrValueMap map;
auto host_context = CreateTestHostContext();
ASSERT_FALSE(llvm::errorToBool(
FillAttrValueMap(attrs.freeze(), host_context.get(), &map)));
EXPECT_THAT(
map,
UnorderedElementsAre(
Pair(Eq("shape"), EqualsProto(R"pb(list { i: 2 i: 2 })pb")),
Pair(Eq("values"), EqualsProto(R"pb(list { f: 2 })pb")),
Pair(Eq("flags"), EqualsProto(R"pb(list { b: false b: true })pb")),
Pair(Eq("baz"), EqualsProto(R"pb(s: "a")pb")),
Pair(Eq("transpose_a"), EqualsProto(R"pb(b: false)pb")),
Pair(Eq("transpose_b"), EqualsProto(R"pb(b: true)pb")),
Pair(Eq("foo"), EqualsProto(R"pb(f: 2)pb")),
Pair(Eq("bar"), EqualsProto(R"pb(i: 2)pb")),
Pair(Eq("aggAttr"), EqualsProto(R"pb(list {})pb"))));
}
}
}
} |
1,582 | cpp | tensorflow/tensorflow | tfrt_op_kernel | tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.cc | tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel_test.cc | #ifndef TENSORFLOW_CORE_RUNTIME_FALLBACK_KERNEL_TFRT_OP_KERNEL_H_
#define TENSORFLOW_CORE_RUNTIME_FALLBACK_KERNEL_TFRT_OP_KERNEL_H_
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ManagedStatic.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include "tfrt/common/compat/eigen/thread_pool_device.h"
#include "tfrt/core_runtime/op_attrs.h"
namespace tfrt {
class AsyncKernelFrame;
}
namespace tensorflow {
class TFRTOpKernel;
class TFRTOpMeta;
class Tensor;
class TensorShape;
class TFRTOpKernelConstruction {
public:
explicit TFRTOpKernelConstruction(const tfrt::OpAttrsRef& attributes);
template <class T>
Status GetAttr(StringPiece attr_name, T* value) const;
void CtxFailure(const Status& s);
void CtxFailureWithWarning(const Status& s);
void CtxFailure(const char* file, int line, const Status& s);
void CtxFailureWithWarning(const char* file, int line, const Status& s);
Status MatchSignature(const DataTypeSlice expected_inputs,
const DataTypeSlice expected_outputs) {
return absl::OkStatus();
}
const std::optional<std::string>& error();
private:
const tfrt::OpAttrsRef& attributes_;
std::optional<std::string> error_;
};
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::string* value) const;
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
DataType* value) const;
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
Padding* value) const;
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::vector<int32>* value) const;
Status MissingAttributeError(StringPiece attr_name);
template <class T>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
T* value) const {
bool success = attributes_.Get<T>(
llvm::StringRef(attr_name.data(), attr_name.size()), value);
if (!success) {
return MissingAttributeError(attr_name);
}
return absl::OkStatus();
}
class TFRTOpKernelContext {
public:
explicit TFRTOpKernelContext(
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs,
int num_outputs, const TFRTOpMeta* op_meta, tfrt::HostContext* host);
const Tensor& output(int index);
const std::optional<std::string>& error();
bool ValidateInputsAreSameShape(TFRTOpKernel* op);
const Tensor& input(int index);
int num_inputs() const;
void set_output(int index, const Tensor& tensor);
int num_outputs() const;
bool forward_input_to_output_with_shape(int input_index, int output_index,
const TensorShape& output_shape,
Tensor** output) {
return false;
}
Status allocate_temp(DataType type, const TensorShape& shape,
Tensor* out_temp);
Status allocate_output(int index, const TensorShape& shape, Tensor** tensor);
DataType expected_output_dtype(int i) const;
template <typename EigenDeviceType>
const EigenDeviceType& eigen_device() const;
void CtxFailure(const Status& s);
void CtxFailureWithWarning(const Status& s);
void CtxFailure(const char* file, int line, const Status& s);
void CtxFailureWithWarning(const char* file, int line, const Status& s);
private:
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs_;
const TFRTOpMeta* op_meta_;
std::vector<Tensor> outputs_;
std::optional<std::string> error_;
tfrt::compat::EigenHostContext eigen_host_context_;
};
class TFRTOpKernel {
public:
explicit TFRTOpKernel(TFRTOpKernelConstruction* context) {}
virtual ~TFRTOpKernel() = default;
virtual void Compute(TFRTOpKernelContext* context) = 0;
};
inline void CheckNotInComputeAsync(TFRTOpKernelConstruction*, const char*) {}
inline void CheckNotInComputeAsync(TFRTOpKernelContext*, const char*) {}
class TFRTOpMeta {
public:
explicit TFRTOpMeta(std::vector<DataType> output_types);
DataType output_type(int index) const;
private:
std::vector<DataType> output_types_;
};
class TFRTOpMetaBuilder {
public:
explicit TFRTOpMetaBuilder(StringPiece op_name);
TFRTOpMetaBuilder& Output(StringPiece output_spec);
TFRTOpMetaBuilder& Input(StringPiece input_spec);
TFRTOpMetaBuilder& Attr(StringPiece attr_spec);
const string& op_name() const;
TFRTOpMeta BuildMeta() const;
private:
string op_name_;
std::vector<DataType> output_types_;
};
class TFRTOpMetaMap {
public:
TFRTOpMetaMap();
void RegisterOpMeta(const TFRTOpMetaBuilder& op_builder);
const TFRTOpMeta* GetOpMeta(StringPiece op_name) const;
private:
llvm::StringMap<TFRTOpMeta> op_metas_;
};
extern llvm::ManagedStatic<TFRTOpMetaMap> tfrt_forwarding_op_meta_map;
class TFRTOpRegisterer {
public:
TFRTOpRegisterer(
const TFRTOpMetaBuilder& op_builder);
};
#define REGISTER_KERNEL_FALLBACK_OP(name) \
REGISTER_KERNEL_FALLBACK_OP_UNIQ_HELPER(__COUNTER__, name)
#define REGISTER_KERNEL_FALLBACK_OP_UNIQ_HELPER(ctr, name) \
REGISTER_KERNEL_FALLBACK_OP_UNIQ(ctr, name)
#define REGISTER_KERNEL_FALLBACK_OP_UNIQ(ctr, name) \
static TFRTOpRegisterer global_tfrt_forwarding_op_meta_builder_##ctr##_ = \
TFRTOpMetaBuilder(name)
struct TFRTOpKernelReg {
using CallbackT =
std::unique_ptr<TFRTOpKernel> (*)(TFRTOpKernelConstruction*);
explicit TFRTOpKernelReg(CallbackT callback) : callback(callback) {}
CallbackT callback;
llvm::StringMap<DataType> type_constraints;
};
class TFRTOpKernelFactories {
public:
TFRTOpKernelFactories();
void RegisterFactory(StringPiece kernel_class_name,
TFRTOpKernelReg kernel_info);
std::unique_ptr<TFRTOpKernel> CreateKernel(
StringPiece kernel_class_name,
TFRTOpKernelConstruction* op_kernel_construction) const;
private:
llvm::StringMap<std::vector<TFRTOpKernelReg>> factories_;
};
extern llvm::ManagedStatic<TFRTOpKernelFactories>
tfrt_forwarding_kernel_factories;
#define REGISTER_KERNEL_FALLBACK_KERNEL(name, ...) \
REGISTER_KERNEL_FALLBACK_KERNEL_UNIQ_HELPER(__COUNTER__, name, __VA_ARGS__)
#define REGISTER_KERNEL_FALLBACK_KERNEL_UNIQ_HELPER(ctr, name, ...) \
REGISTER_KERNEL_FALLBACK_KERNEL_UNIQ(ctr, name, __VA_ARGS__)
#define REGISTER_KERNEL_FALLBACK_KERNEL_UNIQ(ctr, name, ...) \
static bool global_tfrt_forwarding_kernel_##ctr##_registered_ = []() { \
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory( \
name, TFRTOpKernelReg([](TFRTOpKernelConstruction* construction) \
-> std::unique_ptr<TFRTOpKernel> { \
return std::make_unique<__VA_ARGS__>(construction); \
})); \
return true; \
}();
}
#endif
#include "tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "llvm/Support/raw_ostream.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include "tensorflow/core/tfrt/utils/error_util.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/kernel_frame.h"
namespace tensorflow {
TFRTOpKernelConstruction::TFRTOpKernelConstruction(
const tfrt::OpAttrsRef& attributes)
: attributes_(std::move(attributes)) {}
Status MissingAttributeError(StringPiece attr_name) {
return errors::InvalidArgument("Missing attribute: ", attr_name);
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::string* value) const {
tfrt::string_view view;
bool success = attributes_.GetString(
llvm::StringRef(attr_name.data(), attr_name.size()), &view);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = view.str();
return absl::OkStatus();
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
DataType* value) const {
tfrt::OpAttrType attrtype;
bool success = attributes_.Get<tfrt::OpAttrType>(
llvm::StringRef(attr_name.data(), attr_name.size()), &attrtype);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = tfd::ConvertToTfDataType(attrtype);
return absl::OkStatus();
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
Padding* value) const {
std::string padding_str;
TF_RETURN_IF_ERROR(GetAttr<std::string>(attr_name, &padding_str));
return GetPaddingFromString(padding_str, value);
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::vector<int32>* value) const {
llvm::ArrayRef<int32> arrayref;
bool success = attributes_.GetArray<int32>(
llvm::StringRef(attr_name.data(), attr_name.size()), &arrayref);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = arrayref;
return absl::OkStatus();
}
void TFRTOpKernelConstruction::CtxFailure(const Status& s) {
error_ = tfrt::MakeStatusString(s);
}
void TFRTOpKernelConstruction::CtxFailureWithWarning(const Status& s) {
CtxFailure(s);
}
namespace {
std::string FillFailureMessage(const char* file, int line, const Status& s) {
std::string error;
llvm::raw_string_ostream sstr(error);
sstr << "OP_REQUIRES failed at " << file << ":" << line << " : "
<< tfrt::MakeStatusString(s);
sstr.str();
return error;
}
}
void TFRTOpKernelConstruction::CtxFailure(const char* file, int line,
const Status& s) {
error_ = FillFailureMessage(file, line, s);
}
void TFRTOpKernelConstruction::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
CtxFailure(file, line, s);
}
const std::optional<std::string>& TFRTOpKernelConstruction::error() {
return error_;
}
TFRTOpKernelContext::TFRTOpKernelContext(
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs, int num_outputs,
const TFRTOpMeta* op_meta, tfrt::HostContext* host)
: inputs_(inputs),
op_meta_(op_meta),
outputs_(num_outputs),
eigen_host_context_(host) {}
const Tensor& TFRTOpKernelContext::output(int index) { return outputs_[index]; }
const std::optional<std::string>& TFRTOpKernelContext::error() {
return error_;
}
bool TFRTOpKernelContext::ValidateInputsAreSameShape(TFRTOpKernel* op) {
return true;
}
const Tensor& TFRTOpKernelContext::input(int index) {
return inputs_[index]->get<Tensor>();
}
int TFRTOpKernelContext::num_inputs() const { return inputs_.size(); }
int TFRTOpKernelContext::num_outputs() const { return outputs_.size(); }
void TFRTOpKernelContext::set_output(int index, const Tensor& tensor) {
outputs_[index] = tensor;
}
Status TFRTOpKernelContext::allocate_temp(DataType type,
const TensorShape& shape,
Tensor* out_temp) {
*out_temp = Tensor(type, shape);
return absl::OkStatus();
}
Status TFRTOpKernelContext::allocate_output(int index, const TensorShape& shape,
Tensor** tensor) {
DataType output_type = op_meta_->output_type(index);
outputs_[index] = Tensor(output_type, shape);
*tensor = &outputs_[index];
return absl::OkStatus();
}
DataType TFRTOpKernelContext::expected_output_dtype(int i) const {
return op_meta_->output_type(i);
}
void TFRTOpKernelContext::CtxFailure(const Status& s) { error_ = s.message(); }
void TFRTOpKernelContext::CtxFailureWithWarning(const Status& s) {
CtxFailure(s);
}
void TFRTOpKernelContext::CtxFailure(const char* file, int line,
const Status& s) {
error_ = FillFailureMessage(file, line, s);
}
void TFRTOpKernelContext::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
CtxFailure(file, line, s);
}
template <>
const Eigen::ThreadPoolDevice& TFRTOpKernelContext::eigen_device() const {
return eigen_host_context_.Device();
}
TFRTOpMeta::TFRTOpMeta(std::vector<DataType> output_types)
: output_types_(std::move(output_types)) {}
DataType TFRTOpMeta::output_type(int index) const {
return output_types_[index];
}
TFRTOpMetaBuilder::TFRTOpMetaBuilder(StringPiece op_name) : op_name_(op_name) {}
namespace {
DataType ParseInputOutputSpec(StringPiece spec) {
std::vector<absl::string_view> name_type =
absl::StrSplit(spec, absl::MaxSplits(':', 2));
DataType data_type;
bool success =
DataTypeFromString(absl::StripAsciiWhitespace(name_type[1]), &data_type);
assert(success && "Failed to parse DataType");
(void)success;
return data_type;
}
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Output(StringPiece output_spec) {
output_types_.push_back(ParseInputOutputSpec(output_spec));
return *this;
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Input(StringPiece input_spec) {
return *this;
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Attr(StringPiece attr_spec) {
return *this;
}
const string& TFRTOpMetaBuilder::op_name() const { return op_name_; }
TFRTOpMeta TFRTOpMetaBuilder::BuildMeta() const {
return TFRTOpMeta(output_types_);
}
TFRTOpMetaMap::TFRTOpMetaMap() = default;
void TFRTOpMetaMap::RegisterOpMeta(const TFRTOpMetaBuilder& op_builder) {
auto insert_result = op_metas_.insert(
std::make_pair(op_builder.op_name(), op_builder.BuildMeta()));
assert(insert_result.second && "Multiple registrations for the same op_name");
(void)insert_result;
}
const TFRTOpMeta* TFRTOpMetaMap::GetOpMeta(StringPiece op_name) const {
auto it = op_metas_.find(llvm::StringRef(op_name.data(), op_name.size()));
if (it == op_metas_.end()) return nullptr;
return &it->second;
}
TFRTOpRegisterer::TFRTOpRegisterer(const TFRTOpMetaBuilder& op_builder) {
tfrt_forwarding_op_meta_map->RegisterOpMeta(op_builder);
}
llvm::ManagedStatic<TFRTOpMetaMap> tfrt_forwarding_op_meta_map;
llvm::ManagedStatic<TFRTOpKernelFactories> tfrt_forwarding_kernel_factories;
TFRTOpKernelFactories::TFRTOpKernelFactories() = default;
void TFRTOpKernelFactories::RegisterFactory(StringPiece kernel_class_name,
TFRTOpKernelReg kernel_info) {
factories_[std::string(kernel_class_name)].push_back(kernel_info);
}
Status ValidKernelAttr(StringPiece kernel_class_name,
TFRTOpKernelConstruction* construction,
const llvm::StringMap<DataType>& constraints) {
for (const auto& constraint : constraints) {
auto attr_name = std::string(constraint.first());
DataType type;
Status s = construction->GetAttr(attr_name, &type);
if (!s.ok()) {
return errors::InvalidArgument(
"Kernel ", kernel_class_name,
" has constraint for unset tfdtype attribute ", attr_name, ".");
}
if (type != constraint.second) {
return errors::InvalidArgument(
"Kernel ", kernel_class_name, " with type constraint ", attr_name,
": ", DataTypeString(constraint.second),
" does not match attribute type ", DataTypeString(type), ".");
}
}
return absl::OkStatus();
}
std::unique_ptr<TFRTOpKernel> TFRTOpKernelFactories::CreateKernel(
StringPiece kernel_class_name,
TFRTOpKernelConstruction* op_kernel_construction) const {
auto it = factories_.find(std::string(kernel_class_name));
if (it == factories_.end()) {
op_kernel_construction->CtxFailure(errors::NotFound(
"Could not find kernel ", kernel_class_name, " in the registry."));
return std::unique_ptr<TFRTOpKernel>(nullptr);
}
Status status;
for (const auto& kernel_info : it->second) {
Status s = ValidKernelAttr(kernel_class_name, op_kernel_construction,
kernel_info.type_constraints);
if (s.ok()) {
return kernel_info.callback(op_kernel_construction);
}
status.Update(s);
}
op_kernel_construction->CtxFailure(status);
return std::unique_ptr<TFRTOpKernel>(nullptr);
}
} | #include "tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/error_codes.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/padding.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace tensorflow {
namespace {
std::unique_ptr<tfrt::HostContext> CreateTestHostContext(int num_threads) {
return std::make_unique<tfrt::HostContext>(
[](const tfrt::DecodedDiagnostic&) {}, tfrt::CreateMallocAllocator(),
tfrt::CreateSingleThreadedWorkQueue());
}
TEST(TFRTOpKernelTest, TestGetBoolAttr) {
tfrt::OpAttrs attrs;
attrs.Set<bool>("foo", true);
attrs.Set<bool>("bar", false);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
bool value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_TRUE(value);
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_FALSE(value);
}
TEST(TFRTOpKernelTest, TestGetIntAttr) {
tfrt::OpAttrs attrs;
attrs.Set<int32>("foo", -2);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
int32_t value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, -2);
}
TEST(TFRTOpKernelTest, TestGetIntListAttr) {
tfrt::OpAttrs attrs;
attrs.SetArray<int32>("foo", {});
attrs.SetArray<int32>("bar", {1});
attrs.SetArray<int32>("baz", {1, 2, 3});
attrs.SetString("bar", "test");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
std::vector<int32> v1, v2, v3;
std::vector<int32> expected_v1;
std::vector<int32> expected_v2 = {1};
std::vector<int32> expected_v3 = {1, 2, 3};
TF_ASSERT_OK(ctx.GetAttr("foo", &v1));
ASSERT_EQ(v1, expected_v1);
TF_ASSERT_OK(ctx.GetAttr("bar", &v2));
ASSERT_EQ(v2, expected_v2);
TF_ASSERT_OK(ctx.GetAttr("baz", &v3));
ASSERT_EQ(v3, expected_v3);
}
TEST(TFRTOpKernelTest, TestGetStrAttr) {
tfrt::OpAttrs attrs;
attrs.SetString("foo", "");
attrs.SetString("bar", "test");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
std::string value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, "");
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_EQ(value, "test");
}
TEST(TFRTOpKernelTest, TestGetPaddingAttr) {
tfrt::OpAttrs attrs;
attrs.SetString("foo", "VALID");
attrs.SetString("bar", "SAME");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
Padding value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, Padding::VALID);
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_EQ(value, Padding::SAME);
}
TEST(TFRTOpKernelTest, TestMissingAttr) {
tfrt::OpAttrs attrs;
attrs.Set<bool>("foo", true);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
bool value;
auto status = ctx.GetAttr("bar", &value);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
class TestKernel : public TFRTOpKernel {
public:
explicit TestKernel(TFRTOpKernelConstruction* construction)
: TFRTOpKernel(construction) {}
void Compute(TFRTOpKernelContext* context) override {}
};
TEST(TFRTOpKernelTest, TestKernelMatchesTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::F32);
attrs.Set<tfrt::OpAttrType>("bar", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg.type_constraints["foo"] = DT_FLOAT;
reg.type_constraints["bar"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernelFloatInt", reg);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernelFloatInt",
&ctx);
ASSERT_NE(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestSecondKernelMatchesTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg1([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
TFRTOpKernelReg reg2([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg1.type_constraints["foo"] = DT_FLOAT;
reg2.type_constraints["foo"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernel2ndConstraint", reg1);
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernel2ndConstraint", reg2);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernel2ndConstraint",
&ctx);
ASSERT_NE(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestKernelDoesNotMatchTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::I32);
attrs.Set<tfrt::OpAttrType>("bar", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg.type_constraints["foo"] = DT_FLOAT;
reg.type_constraints["bar"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernelIntInt", reg);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernelIntInt", &ctx);
ASSERT_EQ(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestAllocateTemp) {
auto host_context = CreateTestHostContext(1);
int num_outputs = 1;
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs;
TFRTOpMeta op_meta({DT_INT32});
TFRTOpKernelContext ctx(inputs, num_outputs, &op_meta, host_context.get());
Tensor out;
ASSERT_EQ(out.AllocatedBytes(), 0);
TF_EXPECT_OK(ctx.allocate_temp(DT_INT32, {}, &out));
ASSERT_GT(out.AllocatedBytes(), 0);
out.scalar<int32>()() = 123;
ASSERT_EQ(out.dtype(), DT_INT32);
ASSERT_EQ(out.shape().dims(), 0);
}
}
} |
1,583 | cpp | tensorflow/tensorflow | runtime_fallback_kernels | tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc | tensorflow/core/runtime_fallback/test/runtime_fallback_kernels_test.cc | #ifndef TENSORFLOW_CORE_RUNTIME_FALLBACK_RUNTIME_RUNTIME_FALLBACK_KERNELS_H_
#define TENSORFLOW_CORE_RUNTIME_FALLBACK_RUNTIME_RUNTIME_FALLBACK_KERNELS_H_
#include <memory>
#include "llvm/Support/Error.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/runtime_fallback/runtime/kernel_utils.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/chain.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/shared_context.h"
#include "tfrt/tensor/tensor.h"
namespace tensorflow {
namespace tfd {
Status CallEagerExecute(const tfrt::ExecutionContext& exec_ctx,
EagerContext* eager_ctx, const char* op_name,
const char* device_name,
llvm::ArrayRef<TensorHandle*> input_tensor_handles,
const tfrt::OpAttrsRef& attrs,
llvm::MutableArrayRef<tensorflow::AbstractTensorHandle*>
result_tensor_handles);
tfrt::AsyncValueRef<tfrt::Chain> RuntimeFallbackExecute(
const tfrt::ExecutionContext& exec_ctx, const char* op_name,
const char* device_name, tfrt::ArrayRef<tfrt::Tensor*> arguments,
const tfrt::OpAttrsRef& attrs,
tfrt::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>> results);
}
}
#endif
#include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_tensor.h"
#include "tensorflow/core/runtime_fallback/runtime/kernel_utils.h"
#include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_op_handler.h"
#include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_tensor.h"
#include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include "tensorflow/core/runtime_fallback/util/tensor_util.h"
#include "tensorflow/core/runtime_fallback/util/type_util.h"
#include "tensorflow/core/tfrt/utils/error_util.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/tensor_util.h"
#include "tfrt/cpu/core_runtime/cpu_op_handler.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/core_runtime/core_runtime_op.h"
#include "tfrt/core_runtime/execute_op_impl.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/tensor_handle.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/attribute_utils.h"
#include "tfrt/host_context/device.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/host_buffer.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/kernel_frame.h"
#include "tfrt/host_context/kernel_utils.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/host_context/sync_kernel_frame.h"
#include "tfrt/support/error_util.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/ref_count.h"
#include "tfrt/tensor/conversion_registry.h"
#include "tfrt/tensor/dense_host_tensor.h"
#include "tfrt/tensor/scalar_host_tensor.h"
#include "tfrt/tensor/string_host_tensor.h"
#include "tfrt/tensor/tensor_serialize_utils.h"
namespace tensorflow {
namespace tfd {
namespace {
constexpr char kHostContextPtrAttrName[] = "host_ptr";
constexpr char kDefaultCpuDevice[] =
"/job:localhost/replica:0/task:0/device:CPU:0";
}
using tfrt::AggregateAttr;
using tfrt::Argument;
using tfrt::AsyncValue;
using tfrt::AsyncValueRef;
using tfrt::BEFAttributeType;
using tfrt::Chain;
using tfrt::DenseAttr;
using tfrt::DenseHostTensor;
using tfrt::ExecutionContext;
using tfrt::Expected;
using tfrt::FuncAttr;
using tfrt::HostBuffer;
using tfrt::HostContext;
using tfrt::KernelErrorHandler;
using tfrt::OpAttrs;
using tfrt::OpAttrsRawEntry;
using tfrt::OpAttrsRef;
using tfrt::OpAttrType;
using tfrt::raw_ostream;
using tfrt::RCReference;
using tfrt::RemainingArguments;
using tfrt::RemainingAttributes;
using tfrt::RemainingResults;
using tfrt::Result;
using tfrt::ShapeAttr;
using tfrt::string_view;
using tfrt::StringAttr;
using tfrt::StringAttribute;
using tfrt::Tensor;
using tfrt::TensorShape;
#define TFD_REPORT_AND_RETURN_IF_ERROR(handler, status) \
if (!status.ok()) { \
handler.ReportError(status.message()); \
return; \
}
static AsyncValueRef<RuntimeFallbackTensor> CreateRuntimeFallbackTensor(
TensorHandle* handle, HostContext* host) {
OwnedTensorHandle th(handle);
int rank;
tensorflow::Status status = th->NumDims(&rank);
if (!status.ok())
return tfrt::MakeErrorAsyncValueRef(tfrt::StrCat(
"error getting rank from TF tensor handle: ", status.message()));
llvm::SmallVector<tfrt::Index, 4> dims;
for (auto i = 0; i < rank; ++i) {
int64_t dim;
status = th->Dim(i, &dim);
if (!status.ok())
return tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat("error getting dimension from TFE tensor handle: ",
status.message()));
dims.push_back(dim);
}
TensorShape shape{dims};
DataType dtype = th->DataType();
return tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
shape, GetTfrtDtype(dtype), std::move(th));
}
static std::pair<RuntimeFallbackTensor, Chain> TfdMoveDHTToTFT(
Argument<DenseHostTensor> dht, Argument<Chain> in_chain,
const ExecutionContext& exec_ctx) {
return std::make_pair(
MoveDHTToRuntimeFallbackTensor(std::move(dht.get()), exec_ctx.host()),
in_chain.get());
}
static void TfdConvertTFTToDHT(Argument<RuntimeFallbackTensor> tft,
Argument<Chain> in_chain,
Result<DenseHostTensor> dht,
Result<Chain> out_chain,
KernelErrorHandler handler,
const ExecutionContext& exec_ctx) {
dht.Set(tfrt::ConvertTensorOnHost(exec_ctx, tft.get(),
DenseHostTensor::kTensorType)
.ReleaseRCRef());
out_chain.Set(in_chain);
}
static void TfdPrintTFT(Argument<RuntimeFallbackTensor> tft,
Argument<Chain> in_chain, Result<Chain> out_chain) {
llvm::outs() << tft.get() << "\n";
llvm::outs().flush();
out_chain.Set(in_chain);
}
static void TfdInitEagerContext(Argument<Chain> in_chain,
Result<Chain> out_chain,
KernelErrorHandler handler,
const ExecutionContext& exec_ctx) {
tfrt::ResourceContext* resource_context = exec_ctx.resource_context();
tensorflow::tfd::EagerContextResource* eager_context_resource =
resource_context
->GetOrCreateResource<tensorflow::tfd::EagerContextResource>(
tensorflow::tfd::kEagerContextResourceName);
(void)eager_context_resource;
out_chain.Set(in_chain);
}
OwnedTFTensor MoveDHTToTFTensor(DenseHostTensor&& dht, HostContext* host) {
llvm::SmallVector<tfrt::Index, 4> dims;
dht.shape().GetDimensions(&dims);
HostBuffer* host_buffer = dht.ReleaseBuffer().release();
auto deallocator = [](void* data, size_t len, void* arg) {
auto* host_buffer = reinterpret_cast<HostBuffer*>(arg);
host_buffer->DropRef();
};
CheckBoolCompatibility();
OwnedTFTensor tf_tensor{
TF_NewTensor(static_cast<TF_DataType>(GetTfDataType(dht.dtype())),
dims.data(), dims.size(), host_buffer->data(),
host_buffer->size(), deallocator, host_buffer)};
return tf_tensor;
}
static tensorflow::Status DecodeDenseAttrToTensorInterface(
const DenseAttr& dense_attr, HostContext* host,
tensorflow::TensorInterface* result) {
Expected<DenseHostTensor> dht =
tfrt::DeserializeDenseHostTensorFromDenseAttr(dense_attr, host);
if (!dht)
return tensorflow::errors::Internal(tfrt::StrCat(
"cannot create DenseHostTensor in DecodeDenseAttrToTensorInterface:",
dht.takeError()));
OwnedTFTensor tf_tensor = MoveDHTToTFTensor(std::move(*dht), host);
tensorflow::Tensor t;
TF_RETURN_IF_ERROR(TF_TensorToTensor(tf_tensor.get(), &t));
*result = tensorflow::TensorInterface(std::move(t));
return absl::OkStatus();
}
static tensorflow::Status PrepareAttributes(EagerOperation* eager_op,
const OpAttrsRef& attrs,
HostContext* host,
EagerContext* eager_ctx) {
tensorflow::Status status;
attrs.IterateEntries([eager_op, eager_ctx, status_ptr = &status, host,
&attrs](const OpAttrsRawEntry& entry) {
assert(strcmp(entry.name, "device") != 0);
if (IsUnusedAttribute(entry.name)) {
return;
} else if (entry.IsArray()) {
if (entry.element_count == 0) {
if (entry.type == OpAttrType::CHAR) {
std::string empty_str;
*status_ptr = eager_op->SetAttrString(entry.name, empty_str.data(),
empty_str.size());
} else {
AttrValue empty_attr_value;
eager_op->MutableAttrs()->Set(entry.name, empty_attr_value);
}
} else if (entry.type == OpAttrType::CHAR) {
string_view attr_value = attrs.GetStringAsserting(entry.name);
*status_ptr = eager_op->SetAttrString(entry.name, attr_value.data(),
attr_value.size());
} else if (entry.type == OpAttrType::FUNC) {
string_view attr_value = attrs.GetFuncNameAsserting(entry.name);
*status_ptr = eager_op->SetAttrFunctionName(
entry.name, attr_value.data(), attr_value.size());
} else if (entry.type == OpAttrType::I64) {
llvm::ArrayRef<int64_t> int_array =
attrs.GetArrayAsserting<int64_t>(entry.name);
*status_ptr = eager_op->SetAttrIntList(entry.name, int_array.data(),
int_array.size());
} else if (entry.type == OpAttrType::F32) {
llvm::ArrayRef<float> float_array =
attrs.GetArrayAsserting<float>(entry.name);
*status_ptr = eager_op->SetAttrFloatList(entry.name, float_array.data(),
float_array.size());
} else if (entry.type == OpAttrType::BOOL) {
llvm::ArrayRef<bool> bool_array =
attrs.GetArrayAsserting<bool>(entry.name);
std::vector<unsigned char> bool_char_array(bool_array.begin(),
bool_array.end());
*status_ptr = eager_op->SetAttrBoolList(
entry.name, bool_char_array.data(), bool_char_array.size());
} else if (entry.type == OpAttrType::DTYPE) {
const auto& op_attr = attrs.GetRawAsserting(entry.name);
assert(op_attr.IsArray());
auto bef_dtypes =
llvm::ArrayRef(static_cast<const tfrt::DType*>(op_attr.GetData()),
op_attr.element_count);
llvm::SmallVector<tensorflow::DataType, 4> tf_dtypes;
tf_dtypes.reserve(bef_dtypes.size());
for (auto bef_dtype : bef_dtypes) {
tf_dtypes.push_back(ConvertBefAttrTypeToTfDataType(bef_dtype));
}
*status_ptr = eager_op->SetAttrTypeList(entry.name, tf_dtypes.data(),
tf_dtypes.size());
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported array attribute type");
}
} else {
if (entry.type == OpAttrType::I64) {
int64_t attr_value = attrs.GetAsserting<int64_t>(entry.name);
*status_ptr = eager_op->SetAttrInt(entry.name, attr_value);
} else if (entry.type == OpAttrType::F32) {
float attr_value = attrs.GetAsserting<float>(entry.name);
*status_ptr = eager_op->SetAttrFloat(entry.name, attr_value);
} else if (entry.type == OpAttrType::BOOL) {
bool attr_value = attrs.GetAsserting<bool>(entry.name);
*status_ptr = eager_op->SetAttrBool(entry.name, attr_value);
} else if (entry.type == OpAttrType::DTYPE) {
OpAttrType op_attr_type = attrs.GetAsserting<OpAttrType>(entry.name);
DataType tf_dtype = ConvertToTfDataType(op_attr_type);
*status_ptr = eager_op->SetAttrType(entry.name, tf_dtype);
} else if (entry.type == OpAttrType::SHAPE) {
tfrt::ShapeAttr shape_attr =
attrs.GetAsserting<tfrt::ShapeAttr>(entry.name);
if (shape_attr.HasRank()) {
*status_ptr = eager_op->SetAttrShape(
entry.name, shape_attr.GetShape().data(), shape_attr.GetRank());
} else {
*status_ptr = eager_op->SetAttrShape(entry.name, nullptr,
-1);
}
} else if (entry.type == OpAttrType::DENSE) {
DenseAttr dense_attr = attrs.GetAsserting<DenseAttr>(entry.name);
tensorflow::TensorInterface interface;
*status_ptr =
DecodeDenseAttrToTensorInterface(dense_attr, host, &interface);
if (!status_ptr->ok()) return;
*status_ptr = eager_op->SetAttrTensor(entry.name, &interface);
} else if (entry.type == OpAttrType::AGGREGATE) {
AggregateAttr list_attr = attrs.GetAsserting<AggregateAttr>(entry.name);
int num_values = list_attr.GetNumElements();
if (num_values == 0) {
std::vector<int> dummy_attr;
eager_op->MutableAttrs()->Set(
entry.name, gtl::ArraySlice<const int>(dummy_attr.data(), 0));
return;
}
auto attr_base = list_attr.GetAttribute(0);
if (IsDataTypeAttribute(attr_base.type()) &&
GetDataType(attr_base.type()) == tfrt::DType::String) {
llvm::SmallVector<const void*, 8> values;
llvm::SmallVector<size_t, 8> lengths;
values.reserve(num_values);
lengths.reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto string_attr = list_attr.GetAttributeOfType<StringAttr>(i);
values.push_back(string_attr.GetValue().data());
lengths.push_back(string_attr.GetValue().size());
}
*status_ptr = eager_op->SetAttrStringList(entry.name, values.data(),
lengths.data(), num_values);
} else if (IsFuncAttribute(attr_base.type())) {
std::vector<const AbstractOperation*> funcs(num_values);
for (int i = 0; i < num_values; ++i) {
auto func_attr = list_attr.GetAttributeOfType<FuncAttr>(i);
ImmediateExecutionOperation* new_op = eager_ctx->CreateOperation();
auto func_name = func_attr.GetFunctionName();
*status_ptr = new_op->Reset(func_name.str().c_str(),
nullptr);
funcs[i] = new_op;
}
*status_ptr =
eager_op->SetAttrFunctionList(entry.name, absl::MakeSpan(funcs));
} else if (attr_base.type() == BEFAttributeType::kShape) {
llvm::SmallVector<int, 8> ranks;
llvm::SmallVector<const int64_t*, 8> dims;
ranks.reserve(num_values);
dims.reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto shape_attr = list_attr.GetAttributeOfType<ShapeAttr>(i);
if (shape_attr.HasRank()) {
ranks.push_back(shape_attr.GetRank());
dims.push_back(shape_attr.GetShape().data());
} else {
ranks.push_back(-1);
dims.push_back(nullptr);
}
}
*status_ptr = eager_op->SetAttrShapeList(entry.name, dims.data(),
ranks.data(), num_values);
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported list attribute type");
}
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported scalar attribute type");
}
}
});
return status;
}
Status CallEagerExecute(const tfrt::ExecutionContext& exec_ctx,
EagerContext* eager_ctx, const char* op_name,
const char* device_name,
llvm::ArrayRef<TensorHandle*> input_tensor_handles,
const OpAttrsRef& attrs,
llvm::MutableArrayRef<tensorflow::AbstractTensorHandle*>
result_tensor_handles) {
assert(eager_ctx != nullptr && "EagerContext is NULL");
OwnedEagerOperation eager_op{new EagerOperation(eager_ctx)};
TF_RETURN_IF_ERROR(eager_op->Reset(op_name, device_name));
for (TensorHandle* input_tensor : input_tensor_handles) {
TF_RETURN_IF_ERROR(eager_op->AddInput(input_tensor));
}
auto* host = exec_ctx.host();
TF_RETURN_IF_ERROR(PrepareAttributes(eager_op.get(), attrs, host, eager_ctx));
int num_retvals = result_tensor_handles.size();
TF_RETURN_IF_ERROR(eager_op->Execute(
absl::MakeSpan(result_tensor_handles.data(), num_retvals), &num_retvals));
return absl::OkStatus();
}
static bool ShouldAddHostContextAttr(const char* op_name) {
return strcmp(op_name, "TFRTMakeIterator") == 0;
}
AsyncValueRef<Chain> RuntimeFallbackExecute(
const tfrt::ExecutionContext& exec_ctx, EagerContext* eager_ctx,
const char* op_name, const char* device_name,
llvm::ArrayRef<Tensor*> arguments, const OpAttrsRef& attrs,
llvm::MutableArrayRef<RCReference<AsyncValue>> results) {
auto emit_error = [&exec_ctx, results](const tensorflow::Status& status) {
auto error = EmitErrorAsync(exec_ctx, status);
std::fill(results.begin(), results.end(), error);
return error;
};
llvm::SmallVector<TensorHandle*, 4> input_tensor_handles;
input_tensor_handles.reserve(arguments.size());
for (Tensor* input_tensor : arguments) {
input_tensor_handles.push_back(
llvm::cast<RuntimeFallbackTensor>(input_tensor)->GetTensorHandle());
}
int num_retvals = results.size();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 4> result_tensor_handles(
num_retvals);
Status status;
if (!ShouldAddHostContextAttr(op_name)) {
status =
CallEagerExecute(exec_ctx, eager_ctx, op_name, device_name,
input_tensor_handles, attrs, result_tensor_handles);
} else {
assert(attrs.GetNumEntries() == 1);
OpAttrs updated;
updated.Set(kHostContextPtrAttrName,
reinterpret_cast<int64_t>(exec_ctx.host()));
status = CallEagerExecute(
exec_ctx, eager_ctx, op_name, device_name, input_tensor_handles,
OpAttrsRef(std::move(updated)), result_tensor_handles);
}
if (!status.ok()) return emit_error(status);
auto host = exec_ctx.host();
for (int i = 0; i < num_retvals; ++i) {
auto expected_fallback_tensor =
CreateRuntimeFallbackTensorFromTfTensorHandle(
OwnedTensorHandle{
TensorHandleFromInterface(result_tensor_handles[i])},
host);
if (!expected_fallback_tensor)
results[i] = EmitErrorAsync(
exec_ctx, tfrt::StrCat(expected_fallback_tensor.takeError()));
else
results[i] = tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
std::move(*expected_fallback_tensor));
}
return tfrt::GetReadyChain();
}
AsyncValueRef<Chain> RuntimeFallbackExecute(
const tfrt::ExecutionContext& exec_ctx, const char* op_name,
const char* device_name, llvm::ArrayRef<Tensor*> arguments,
const OpAttrsRef& attrs,
llvm::MutableArrayRef<RCReference<AsyncValue>> results) {
auto eager_ctx_expected = GetEagerContext(exec_ctx);
if (!eager_ctx_expected) {
auto error =
EmitErrorAsync(exec_ctx, toString(eager_ctx_expected.takeError()));
std::fill(results.begin(), results.end(), error);
return std::move(error);
}
EagerContext* eager_ctx = eager_ctx_expected.get();
return RuntimeFallbackExecute(exec_ctx, eager_ctx, op_name, device_name,
arguments, attrs, results);
}
static void RuntimeFallbackKernel(
Argument<Chain> in_chain, RemainingArguments input_tensors,
Result<Chain> out_chain, RemainingResults output_tensors,
StringAttribute op_name, RemainingAttributes remaining_attributes,
KernelErrorHandler handler, const ExecutionContext& exec_ctx) {
HostContext* host = exec_ctx.host();
tfrt::ResourceContext* resource_context = exec_ctx.resource_context();
EagerContextResource* eager_context_resource =
resource_context->GetOrCreateResource<EagerContextResource>(
tensorflow::tfd::kEagerContextResourceName);
tfrt::Expected<EagerContext*> eager_ctx_expected =
eager_context_resource->GetTFEagerContext();
if (!eager_ctx_expected) {
handler.ReportError("eager_ctx_expected.takeError()");
return;
}
EagerContext* eager_ctx = eager_ctx_expected.get();
std::string op_name_str = [&] {
auto view = op_name.get();
view.consume_front("tf.");
return view.str();
}();
OwnedEagerOperation eager_op{new EagerOperation(eager_ctx)};
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->Reset(op_name_str.c_str(), nullptr));
for (AsyncValue* input_tensor_av : input_tensors.values()) {
auto input_tensor_handle =
input_tensor_av->get<RuntimeFallbackTensor>().GetTensorHandle();
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
eager_op->AddInput(input_tensor_handle));
}
assert(remaining_attributes.size() % 2 == 0);
int num_tf_attrs = remaining_attributes.size() / 2;
for (int i = 0; i < num_tf_attrs; ++i) {
std::string attr_name =
remaining_attributes.GetStringAttribute(i * 2).str();
absl::string_view attr_value = ToAbslStringView(
remaining_attributes.GetStringAttribute(i * 2 + 1).get());
std::vector<absl::string_view> value_split =
tfd::AttrValueSplit(attr_value);
if (value_split[0] == "string") {
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->SetAttrString(attr_name.c_str(), value_split[1].data(),
value_split[1].size()));
} else if (value_split[0] == "bool") {
bool bool_val;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseBoolAttrValue(value_split[1], &bool_val));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrBool(attr_name.c_str(), bool_val));
} else if (value_split[0] == "int") {
int64_t int_val;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseIntAttrValue(value_split[1], &int_val));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrInt(attr_name.c_str(), int_val));
} else if (value_split[0] == "tftensor") {
tensorflow::Tensor t;
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
ParseTensorAttrValue(value_split[1], &t));
tensorflow::TensorInterface interface(t);
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrTensor(attr_name.c_str(), &interface));
} else if (value_split[0] == "tfdtype") {
DataType dtype;
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
ParseTfDataType(value_split[1], &dtype));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrType(attr_name.c_str(), dtype));
} else if (value_split[0] == "tfshape") {
std::vector<int64_t> dims;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseTensorShapeAttrValue(value_split[1], &dims));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->SetAttrShape(attr_name.c_str(), dims.data(), dims.size()));
} else {
handler.ReportError("attribute type not yet supported");
return;
}
}
int num_retvals = output_tensors.size();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 4> retvals(num_retvals);
tensorflow::Status status = eager_op->Execute(
absl::MakeSpan(retvals.data(), num_retvals), &num_retvals);
TFD_REPORT_AND_RETURN_IF_ERROR(handler, status);
if (num_retvals != output_tensors.size()) {
handler.ReportError("Incorrect number of output values");
return;
}
for (int i = 0; i < num_retvals; ++i) {
OwnedTensorHandle owned_th{TensorHandleFromInterface(retvals[i])};
if (!owned_th) handler.ReportError("TensorHandleFromInterface failed");
auto fallback_tensor = CreateRuntimeFallbackTensorFromTfTensorHandle(
std::move(owned_th), host);
if (!fallback_tensor) {
output_tensors[i] = tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat(fallback_tensor | #include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.h"
#include <gtest/gtest.h>
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/runtime_fallback/test/coreruntime_driver.h"
#include "tfrt/core_runtime/op_attrs.h"
namespace tfrt {
namespace {
TEST(RuntimeFallbackKernelsTest, CallEagerExecute) {
auto driver = CoreRuntimeDriver();
driver.InitializeCpuRuntimeFallbackOpHandler();
auto exec_ctx = driver.CreateExecutionContext(__FILE__, __LINE__);
tensorflow::Tensor input(tensorflow::DT_FLOAT, {2, 2});
tensorflow::test::FillValues<float>(&input, {1, 1, 1, 1});
tensorflow::TensorHandle* input_th =
tensorflow::TensorHandle::CreateLocalHandle(input);
tfrt::OpAttrs matmul_attrs;
matmul_attrs.Set<bool>("transpose_a", false);
matmul_attrs.Set<bool>("transpose_b", false);
tfrt::OpAttrsRef matmul_attrs_ref = matmul_attrs.freeze();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 1> results;
results.resize(1);
auto eager_ctx_expected = tensorflow::tfd::GetEagerContext(exec_ctx);
ASSERT_FALSE(!eager_ctx_expected);
tensorflow::EagerContext* eager_ctx = eager_ctx_expected.get();
TF_EXPECT_OK(tensorflow::tfd::CallEagerExecute(
exec_ctx, eager_ctx, "MatMul", "", {input_th, input_th},
matmul_attrs_ref, results));
ASSERT_EQ(results.size(), 1);
tensorflow::TensorHandle* res_th =
tensorflow::TensorHandleFromInterface(results[0]);
const tensorflow::Tensor* res_tensor;
TF_EXPECT_OK(res_th->Tensor(&res_tensor));
EXPECT_EQ(res_th->DataType(), tensorflow::DT_FLOAT);
int64_t num_elements;
TF_EXPECT_OK(res_th->NumElements(&num_elements));
EXPECT_EQ(num_elements, 4);
tensorflow::Tensor expected(tensorflow::DT_FLOAT, {2, 2});
tensorflow::test::FillValues<float>(&expected, {2, 2, 2, 2});
tensorflow::test::ExpectTensorEqual<float>(*res_tensor, expected);
input_th->Unref();
res_th->Unref();
}
}
} |
1,584 | cpp | tensorflow/tensorflow | collective_executor_mgr | tensorflow/core/common_runtime/collective_executor_mgr.cc | tensorflow/core/common_runtime/collective_executor_mgr_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_COLLECTIVE_EXECUTOR_MGR_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_COLLECTIVE_EXECUTOR_MGR_H_
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
class ConfigProto;
class DeviceMgr;
class CollectiveExecutorMgr : public CollectiveExecutorMgrInterface {
public:
CollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* dev_mgr,
std::unique_ptr<DeviceResolverInterface> dev_resolver,
std::unique_ptr<ParamResolverInterface> param_resolver,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator);
virtual ~CollectiveExecutorMgr();
CollectiveExecutor* FindOrCreate(int64_t step_id) override;
void Cleanup(int64_t step_id) override;
void CleanupAll() override;
ParamResolverInterface* GetParamResolver() const override {
return param_resolver_.get();
}
DeviceResolverInterface* GetDeviceResolver() const override {
return dev_resolver_.get();
}
NcclCommunicatorInterface* GetNcclCommunicator() const override {
return nccl_communicator_.get();
}
void GetStepSequenceAsync(const GetStepSequenceRequest* request,
GetStepSequenceResponse* response,
const StatusCallback& done) override;
void RefreshStepIdSequenceAsync(int64_t graph_key,
const StatusCallback& done) override;
int64_t NextStepId(int64_t graph_key) override {
return CollectiveExecutor::kInvalidId;
}
void RetireStepId(int64_t graph_key, int64_t step_id) override {}
protected:
virtual CollectiveExecutor* Create(int64_t step_id);
const DeviceMgr* dev_mgr_;
std::unique_ptr<DeviceResolverInterface> dev_resolver_;
std::unique_ptr<ParamResolverInterface> param_resolver_;
string gpu_ring_order_;
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator_;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
private:
mutex exec_mu_;
gtl::FlatMap<int64_t, CollectiveExecutor*> executor_table_
TF_GUARDED_BY(exec_mu_);
};
std::unique_ptr<CollectiveExecutorMgr> CreateProdLocalCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* device_mgr,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator);
}
#endif
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/build_graph_options.h"
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
CollectiveExecutorMgr::CollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* dev_mgr,
std::unique_ptr<DeviceResolverInterface> dev_resolver,
std::unique_ptr<ParamResolverInterface> param_resolver,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator)
: dev_mgr_(dev_mgr),
dev_resolver_(std::move(dev_resolver)),
param_resolver_(std::move(param_resolver)),
gpu_ring_order_(
config.gpu_options().experimental().collective_ring_order()),
nccl_communicator_(std::move(nccl_communicator)),
work_queue_(std::make_shared<UnboundedWorkQueue>(Env::Default(),
"collective_ops")) {}
CollectiveExecutorMgr::~CollectiveExecutorMgr() {
for (auto iter : executor_table_) {
iter.second->Unref();
}
}
CollectiveExecutor* CollectiveExecutorMgr::FindOrCreate(int64_t step_id) {
CollectiveExecutor* ce = nullptr;
{
mutex_lock l(exec_mu_);
auto it = executor_table_.find(step_id);
if (it != executor_table_.end()) {
ce = it->second;
} else {
ce = Create(step_id);
executor_table_[step_id] = ce;
}
ce->Ref();
}
return ce;
}
CollectiveExecutor* CollectiveExecutorMgr::Create(int64_t step_id) {
CollectiveRemoteAccessLocal* rma =
new CollectiveRemoteAccessLocal(dev_mgr_, dev_resolver_.get(), step_id);
return new BaseCollectiveExecutor(this, rma, step_id, dev_mgr_, work_queue_);
}
void CollectiveExecutorMgr::Cleanup(int64_t step_id) {
CollectiveExecutor* ce = nullptr;
{
mutex_lock l(exec_mu_);
auto it = executor_table_.find(step_id);
if (it != executor_table_.end()) {
ce = it->second;
executor_table_.erase(it);
}
}
if (ce) ce->Unref();
}
void CollectiveExecutorMgr::CleanupAll() {
gtl::FlatMap<int64_t, CollectiveExecutor*> executor_table;
{
mutex_lock l(exec_mu_);
std::swap(executor_table, executor_table_);
}
for (auto iter : executor_table) {
iter.second->Unref();
}
}
void CollectiveExecutorMgr::GetStepSequenceAsync(
const GetStepSequenceRequest* request, GetStepSequenceResponse* response,
const StatusCallback& done) {
done(errors::Internal(
"CollectiveExecutorMgr does not implement GetStepSequence."));
}
void CollectiveExecutorMgr::RefreshStepIdSequenceAsync(
int64_t graph_key, const StatusCallback& done) {
done(errors::Internal(
"CollectiveExecutorMgr does not implement RefreshStepIdSequence."));
}
std::unique_ptr<CollectiveExecutorMgr> CreateProdLocalCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* device_mgr,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator) {
auto device_resolver = std::make_unique<DeviceResolverLocal>(device_mgr);
auto param_resolver = std::make_unique<CollectiveParamResolverLocal>(
config, device_mgr, device_resolver.get(), nccl_communicator.get(),
"/job:localhost/replica:0/task:0");
return std::make_unique<CollectiveExecutorMgr>(
config, device_mgr, std::move(device_resolver), std::move(param_resolver),
std::move(nccl_communicator));
}
} | #include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
#define NUM_DEVS 3
TEST(MaybeCreateNcclCommunicatorm, ZeroGpus) {
ConfigProto cp;
(*cp.mutable_device_count())["GPU"] = 0;
EXPECT_EQ(nullptr, MaybeCreateNcclCommunicator(cp));
}
class CollectiveExecutorMgrTest : public ::testing::Test {
protected:
CollectiveExecutorMgrTest() {
ConfigProto cp;
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
string task_name = "/job:localhost/replica:0/task:0";
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
std::unique_ptr<DeviceResolverInterface> drl(
new DeviceResolverLocal(device_mgr_.get()));
std::unique_ptr<ParamResolverInterface> prl(
new CollectiveParamResolverLocal(cp, device_mgr_.get(), drl.get(),
nullptr,
task_name));
cme_.reset(new CollectiveExecutorMgr(cp, device_mgr_.get(), std::move(drl),
std::move(prl),
MaybeCreateNcclCommunicator(cp)));
}
std::unique_ptr<CollectiveExecutorMgr> cme_;
std::unique_ptr<DeviceMgr> device_mgr_;
};
TEST_F(CollectiveExecutorMgrTest, FindOrCreate) {
CollectiveExecutor::Handle* h =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_TRUE(h->get());
CollectiveExecutor::Handle* h2 =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_EQ(h->get(), h2->get());
CollectiveExecutor* ce = h->get();
delete h;
delete h2;
CollectiveExecutor::Handle h3(cme_->FindOrCreate(1), true);
EXPECT_EQ(ce, h3.get());
cme_->Cleanup(1);
}
TEST_F(CollectiveExecutorMgrTest, StepSequenceRelated) {
EXPECT_EQ(CollectiveExecutor::kInvalidId, cme_->NextStepId(123));
Notification ss_note;
Status ss_status;
cme_->RefreshStepIdSequenceAsync(123,
[&ss_status, &ss_note](const Status& s) {
ss_status = s;
ss_note.Notify();
});
ss_note.WaitForNotification();
EXPECT_FALSE(ss_status.ok());
EXPECT_EQ(ss_status.message(),
"CollectiveExecutorMgr does not implement RefreshStepIdSequence.");
Notification gs_note;
Status gs_status;
GetStepSequenceRequest* req = nullptr;
GetStepSequenceResponse* resp = nullptr;
cme_->GetStepSequenceAsync(req, resp,
[&gs_status, &gs_note](const Status& s) {
gs_status = s;
gs_note.Notify();
});
gs_note.WaitForNotification();
EXPECT_FALSE(gs_status.ok());
EXPECT_EQ(gs_status.message(),
"CollectiveExecutorMgr does not implement GetStepSequence.");
}
}
} |
1,585 | cpp | tensorflow/tensorflow | inline_function_utils | tensorflow/core/common_runtime/inline_function_utils.cc | tensorflow/core/common_runtime/inline_function_utils_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_INLINE_FUNCTION_UTILS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_INLINE_FUNCTION_UTILS_H_
#include <functional>
#include <memory>
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/lower_function_call_inline_policy.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
static constexpr const char* const kNoInlineAttr = "_noinline";
class InlinedFunctionBodyPlacer {
public:
virtual ~InlinedFunctionBodyPlacer() = default;
virtual absl::optional<string> InputNodeDevice(int input_index) const = 0;
virtual absl::optional<string> OutputNodeDevice(int output_index) const = 0;
virtual bool ColocateInputOutputIdentities() const = 0;
virtual absl::optional<string> ControlNodeDevice() const = 0;
virtual absl::optional<string> BodyNodeDevice(const NodeDef& ndef) const = 0;
static std::unique_ptr<InlinedFunctionBodyPlacer> DefaultPlacer(
const Graph& graph, const Node& caller);
static std::unique_ptr<InlinedFunctionBodyPlacer> SingleDevicePlacer(
const Graph& graph, const Node& caller);
static std::unique_ptr<InlinedFunctionBodyPlacer> MultiDevicePlacer(
const Graph& graph, const Node& caller);
using Factory = std::function<std::unique_ptr<InlinedFunctionBodyPlacer>(
const Graph&, const Node&)>;
struct Config {
string name;
Factory get;
};
static Config Default() { return {"default", DefaultPlacer}; }
static Config SingleDevice() { return {"single_device", SingleDevicePlacer}; }
static Config MultiDevice() { return {"multi_device", MultiDevicePlacer}; }
};
struct InlineFunctionBodyOptions {
enum class OutputControlSource { kDataOutputs, kControlOutputs };
enum class KeepCallerNode { kDoNotKeep, kFetchable, kTargetable };
bool disable_inlining = false;
bool ignore_noinline = false;
bool inline_impl_selection_group_functions = false;
KeepCallerNode keep_caller_node = KeepCallerNode::kDoNotKeep;
OutputControlSource output_control_src = OutputControlSource::kDataOutputs;
InlinedFunctionBodyPlacer::Config inlined_function_body_placer =
InlinedFunctionBodyPlacer::Default();
bool uniquify_frame_names = true;
string DebugString() const;
};
Status ValidateInlining(const Node* node, const FunctionBody* fbody,
const InlineFunctionBodyOptions& options);
Status InlineFunctionBody(const FunctionLibraryDefinition& flib_def, Graph* g,
Node* caller, const FunctionBody* fbody,
const InlineFunctionBodyOptions& options);
struct ExpandInlineFunctionsOptions {
ExpandInlineFunctionsOptions() : native_options(), multi_device_options() {
using OutputControlSrc = InlineFunctionBodyOptions::OutputControlSource;
multi_device_options.output_control_src = OutputControlSrc::kControlOutputs;
}
InlineFunctionBodyOptions native_options;
InlineFunctionBodyOptions multi_device_options;
};
bool ExpandInlineFunctions(FunctionLibraryRuntime* lib, Graph* graph,
const ExpandInlineFunctionsOptions& options);
inline bool ExpandInlineFunctions(FunctionLibraryRuntime* lib, Graph* graph) {
return ExpandInlineFunctions(lib, graph, ExpandInlineFunctionsOptions());
}
}
#endif
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include <deque>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/optimizer_cse.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
constexpr const char* const
LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr;
constexpr const char* const
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
namespace {
static constexpr const char* const kArgOp = FunctionLibraryDefinition::kArgOp;
static constexpr const char* const kDeviceArgOp =
FunctionLibraryDefinition::kDeviceArgOp;
static constexpr const char* const kRetOp = FunctionLibraryDefinition::kRetOp;
static constexpr const char* const kDeviceRetOp =
FunctionLibraryDefinition::kDeviceRetOp;
static constexpr const char* const kGradientOp =
FunctionLibraryDefinition::kGradientOp;
static constexpr const char* const kNodeLabel = "Func";
static constexpr const char* const kFuncAttr =
FunctionLibraryDefinition::kFuncAttr;
struct Endpoint {
Node* node;
int index;
string name() const {
if (index == 0) {
return node->name();
} else {
return strings::StrCat(node->name(), ":", index);
}
}
DataType dtype() const { return node->output_type(index); }
};
struct EndpointHash {
uint64 operator()(const Endpoint& x) const {
return Hash64(reinterpret_cast<const char*>(&x.node), sizeof(Node*),
x.index);
}
};
struct EndpointEq {
bool operator()(const Endpoint& x, const Endpoint& y) const {
return (x.node == y.node) && (x.index == y.index);
}
};
static Node* AddNoOp(StringPiece name, Graph* g) {
NodeDef ndef;
ndef.set_name(g->NewName(absl::StrCat(kNodeLabel, "/", name)));
ndef.set_op("NoOp");
Status s;
Node* ret = g->AddNode(ndef, &s);
TF_CHECK_OK(s);
return ret;
}
static Node* AddIdentity(StringPiece name, Graph* g, Endpoint input) {
DCHECK_LT(0, input.dtype());
NodeDef ndef;
ndef.set_name(g->NewName(absl::StrCat(kNodeLabel, "/", name)));
ndef.set_op("Identity");
ndef.add_input(input.name());
AddNodeAttr("T", BaseType(input.dtype()), &ndef);
Status s;
Node* ret = g->AddNode(ndef, &s);
TF_CHECK_OK(s);
g->AddEdge(input.node, input.index, ret, 0);
return ret;
}
std::vector<string> InputDevices(const Node& caller) {
std::vector<string> input_devices(caller.in_edges().size());
std::vector<string> input_tensors(caller.in_edges().size());
for (const Edge* edge : caller.in_edges()) {
if (edge->IsControlEdge()) continue;
const string& input_device = edge->src()->has_assigned_device_name()
? edge->src()->assigned_device_name()
: edge->src()->requested_device();
input_devices[edge->dst_input()] = input_device;
input_tensors[edge->dst_input()] =
absl::StrCat(edge->src()->name(), ":", edge->src_output());
}
if (VLOG_IS_ON(4)) {
VLOG(4) << "Function instantiation input devices:";
for (int i = 0; i < input_devices.size(); ++i) {
if (input_tensors[i].empty()) continue;
VLOG(4) << " [index " << i << "]"
<< " device: " << input_devices[i]
<< " (input: " << input_tensors[i] << ")";
}
}
return input_devices;
}
class DefaultFunctionBodyPlacer : public InlinedFunctionBodyPlacer {
public:
explicit DefaultFunctionBodyPlacer(const Node& caller)
: input_devices_(InputDevices(caller)) {}
absl::optional<string> InputNodeDevice(int input_index) const override {
return input_devices_[input_index];
}
absl::optional<string> OutputNodeDevice(int output_index) const override {
return absl::nullopt;
}
bool ColocateInputOutputIdentities() const override { return false; }
absl::optional<string> ControlNodeDevice() const override {
return absl::nullopt;
}
absl::optional<string> BodyNodeDevice(const NodeDef& ndef) const override {
return absl::nullopt;
}
private:
const std::vector<string> input_devices_;
};
class SingleDeviceFunctionBodyPlacer : public InlinedFunctionBodyPlacer {
public:
explicit SingleDeviceFunctionBodyPlacer(const Node& caller)
: caller_device_(caller.def().device()) {}
absl::optional<string> InputNodeDevice(int input_index) const override {
return caller_device_;
}
absl::optional<string> OutputNodeDevice(int output_index) const override {
return caller_device_;
}
bool ColocateInputOutputIdentities() const override { return false; }
absl::optional<string> ControlNodeDevice() const override {
return caller_device_;
}
absl::optional<string> BodyNodeDevice(const NodeDef& ndef) const override {
return caller_device_;
}
private:
const string caller_device_;
};
class MultiDeviceFunctionBodyPlacer : public InlinedFunctionBodyPlacer {
public:
explicit MultiDeviceFunctionBodyPlacer(const Node& caller)
: caller_device_(caller.def().device()),
input_devices_(InputDevices(caller)) {
has_parsed_caller_device_ =
DeviceNameUtils::ParseFullName(caller_device_, &caller_parsed_device_);
}
absl::optional<string> InputNodeDevice(int input_index) const override {
return input_devices_[input_index];
}
absl::optional<string> OutputNodeDevice(int output_index) const override {
return absl::nullopt;
}
bool ColocateInputOutputIdentities() const override { return true; }
absl::optional<string> ControlNodeDevice() const override {
return caller_device_;
}
absl::optional<string> BodyNodeDevice(const NodeDef& ndef) const override {
if (ndef.device().empty()) return caller_device_;
if (!has_parsed_caller_device_) return ndef.device();
DeviceNameUtils::ParsedName ndef_parsed_device;
if (!DeviceNameUtils::ParseFullName(ndef.device(), &ndef_parsed_device))
return ndef.device();
DeviceNameUtils::MergeUnsetDevNames(&ndef_parsed_device,
caller_parsed_device_);
return DeviceNameUtils::ParsedNameToString(ndef_parsed_device);
}
private:
string caller_device_;
bool has_parsed_caller_device_;
DeviceNameUtils::ParsedName caller_parsed_device_;
std::vector<string> input_devices_;
};
}
std::unique_ptr<InlinedFunctionBodyPlacer>
InlinedFunctionBodyPlacer::DefaultPlacer(const Graph& graph,
const Node& caller) {
VLOG(3) << "Create default placer for inlined function body.";
return std::make_unique<DefaultFunctionBodyPlacer>(caller);
}
std::unique_ptr<InlinedFunctionBodyPlacer>
InlinedFunctionBodyPlacer::SingleDevicePlacer(const Graph& graph,
const Node& caller) {
VLOG(3) << "Create single device placer for inlined function body.";
return std::make_unique<SingleDeviceFunctionBodyPlacer>(caller);
}
std::unique_ptr<InlinedFunctionBodyPlacer>
InlinedFunctionBodyPlacer::MultiDevicePlacer(const Graph& graph,
const Node& caller) {
VLOG(3) << "Create multi device placer for inlined function body.";
return std::make_unique<MultiDeviceFunctionBodyPlacer>(caller);
}
namespace {
Status ValidateNoInline(const FunctionBody* fbody) {
const auto attr = AttrSlice(&fbody->record->fdef().attr());
bool noinline = false;
if (TryGetNodeAttr(attr, kNoInlineAttr, &noinline) && noinline) {
return errors::InvalidArgument(
"Can't inline function marked with '_noinline'");
}
return absl::OkStatus();
}
using OutputControlSrc = InlineFunctionBodyOptions::OutputControlSource;
void PropagateDebugInfoToNode(const string& func,
const std::vector<const Node*>& nodes,
NodeDef* target) {
if (nodes.empty() || target->has_experimental_debug_info()) {
return;
}
for (const Node* node : nodes) {
const auto& node_def = node->def();
if (node_def.has_experimental_debug_info()) {
target->mutable_experimental_debug_info()->MergeFrom(
node_def.experimental_debug_info());
} else {
target->mutable_experimental_debug_info()->add_original_node_names(
node_def.name());
target->mutable_experimental_debug_info()->add_original_func_names(func);
}
}
}
}
string InlineFunctionBodyOptions::DebugString() const {
const auto true_false = [](bool b) { return b ? "true" : "false"; };
const auto keep_caller_node_str = [this]() -> string {
switch (keep_caller_node) {
case KeepCallerNode::kDoNotKeep:
return "DoNotKeep";
case KeepCallerNode::kFetchable:
return "Fetchable";
case KeepCallerNode::kTargetable:
return "Targetable";
}
};
return absl::StrCat(
"disable_inlining=", true_false(disable_inlining),
", ignore_noinline=", true_false(ignore_noinline),
", inline_impl_selection_group_functions=",
true_false(inline_impl_selection_group_functions),
", keep_caller_node=", keep_caller_node_str(), ", output_control_src=",
output_control_src == OutputControlSrc::kDataOutputs ? "DataOutputs"
: "ControlOutputs",
", inlined_function_body_placer=", inlined_function_body_placer.name,
", uniquify_frame_names=", true_false(uniquify_frame_names));
}
Status ValidateInlining(const Node* node, const FunctionBody* fbody,
const InlineFunctionBodyOptions& options) {
const auto num_node_inputs = static_cast<size_t>(node->num_inputs());
const auto num_node_outputs = static_cast<size_t>(node->num_outputs());
if (num_node_inputs != fbody->arg_types.size() ||
num_node_inputs != fbody->arg_nodes.size()) {
return errors::InvalidArgument(
"Node inputs do not match function arguments: inputs=", num_node_inputs,
" arg_types=", fbody->arg_types.size(),
" arg_nodes=", fbody->arg_nodes.size());
}
if (num_node_outputs != fbody->ret_types.size() ||
num_node_outputs != fbody->ret_nodes.size()) {
return errors::InvalidArgument(
"Node outputs do not match function returns: outputs=",
num_node_outputs, " ret_types=", fbody->ret_types.size(),
" ret_nodes=", fbody->ret_nodes.size());
}
for (int i = 0; i < node->num_inputs(); ++i) {
if (node->input_type(i) != fbody->arg_types[i]) {
return errors::InvalidArgument(
"Node input type doesn't match function argument type: ",
node->input_type(i), " != ", fbody->arg_types[i], " @ index=", i);
}
}
for (int i = 0; i < node->num_outputs(); ++i) {
if (node->output_type(i) != fbody->ret_types[i]) {
return errors::InvalidArgument(
"Node output type doesn't match function return type: ",
node->output_type(i), " != ", fbody->ret_types[i], " @ index=", i);
}
}
if (options.disable_inlining) {
return errors::InvalidArgument(
"Function inlining explicitly disabled by 'options.disable_inlining'");
}
if (!options.inline_impl_selection_group_functions) {
bool is_impl_selection_group_function =
fbody->record->fdef().attr().find("api_implements") !=
fbody->record->fdef().attr().end();
if (is_impl_selection_group_function) {
return errors::InvalidArgument(
"Inlining of implementation selection group function ",
fbody->record->fdef().signature().name(),
" is disabled by options.inline_impl_selection_group_functions");
}
}
if (!options.ignore_noinline) {
TF_RETURN_IF_ERROR(ValidateNoInline(fbody));
}
return absl::OkStatus();
}
Status InlineFunctionBody(const FunctionLibraryDefinition& flib_def, Graph* g,
Node* caller, const FunctionBody* fbody,
const InlineFunctionBodyOptions& options) {
VLOG(3) << "Inline function call: " << SummarizeNode(*caller) << " ["
<< options.DebugString() << "]";
VLOG(4) << "Inlining function: "
<< fbody->record->fdef().DebugString();
VLOG(4) << "Current graphdef: " << g->ToGraphDefDebug().DebugString();
VLOG(4) << "Caller: " << caller->DebugString();
Status validation = ValidateInlining(caller, fbody, options);
if (!validation.ok()) {
return errors::Internal("Inlining mismatch: ", validation.message());
}
const std::unique_ptr<InlinedFunctionBodyPlacer> placer =
options.inlined_function_body_placer.get(*g, *caller);
static constexpr bool kDoNotCheckDuplicates = true;
const auto no_op = [&](StringPiece name) -> Node* {
Node* node = AddNoOp(absl::StrCat(caller->name(), "/", name), g);
const absl::optional<string> device = placer->ControlNodeDevice();
if (device.has_value()) node->set_requested_device(*device);
retur | #include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
TEST(InlineFunctionBody, ColocationConstraintPropagation) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
FunctionDef fdef = FunctionDefHelper::Define(
"f",
{"x: float", "y: float"},
{"z: float"},
{},
{
{{"z"},
"AddV2",
{"x", "y"},
{{"T", DT_FLOAT}, {"_class", std::vector<string>({"loc:@x"})}}},
});
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
auto g = std::make_unique<Graph>(OpRegistry::Global());
GraphConstructorOptions opts;
const Tensor kZero = test::AsScalar<int64_t>(0);
const Tensor kOne = test::AsScalar<int64_t>(1);
GraphDef gdef = GDef(
{
NDef("inp0", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kZero}}),
NDef("inp1", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}),
NDef("call", "StatefulPartitionedCall", {"inp0", "inp1"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FunctionDefHelper::FunctionRef("f", {})}}),
NDef("out0", "_Retval", {"call:0"}, {{"T", DT_FLOAT}}),
},
{});
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, gdef, g.get()));
Node* caller = nullptr;
for (Node* node : g->nodes()) {
if (node->name() == "call") {
caller = node;
}
}
std::unique_ptr<FunctionBody> fbody;
TF_ASSERT_OK(FunctionDefToBodyHelper(fdef, {}, &flib_def, &fbody));
InlineFunctionBodyOptions inline_options;
TF_ASSERT_OK(InlineFunctionBody(flib_def, g.get(), caller, fbody.get(),
inline_options));
GraphDef expected_gdef = GDef(
{
NDef("inp0", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kZero}}),
NDef("inp1", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}),
NDef("out0", "_Retval", {"Func/call/output/_2"}, {{"T", DT_FLOAT}}),
NDef("Func/call/input/_0", "Identity", {"inp0"}, {{"T", DT_FLOAT}}),
NDef("Func/call/input/_1", "Identity", {"inp1"}, {{"T", DT_FLOAT}}),
NDef("call/z", "AddV2", {"Func/call/input/_0", "Func/call/input/_1"},
{{"T", DT_FLOAT},
{"_class", std::vector<string>({"loc:@Func/call/input/_0"})}}),
NDef("Func/call/output/_2", "Identity", {"call/z"},
{{"T", DT_FLOAT}}),
},
{});
GraphDef output_gdef;
g->ToGraphDef(&output_gdef);
TF_EXPECT_GRAPH_EQ(expected_gdef, output_gdef);
}
}
} |
1,586 | cpp | tensorflow/tensorflow | lower_functional_ops | tensorflow/core/common_runtime/lower_functional_ops.cc | tensorflow/core/common_runtime/lower_functional_ops_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_LOWER_FUNCTIONAL_OPS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_LOWER_FUNCTIONAL_OPS_H_
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class LowerFunctionalOpsPass : public GraphOptimizationPass {
public:
LowerFunctionalOpsPass() = default;
Status Run(const GraphOptimizationPassOptions& options) override;
static constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr;
static constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
};
}
#endif
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/device_propagation.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/lower_function_call_op.h"
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
constexpr const char* const kTpuReplicateAttr = "_tpu_replicate";
constexpr const char* const kXlaClusterAttr = "_xla_compile_id";
constexpr const char* const kXlaMustCompileAttr = "_XlaMustCompile";
bool CheckBoolAttr(const Node* n, absl::string_view attr_name) {
bool match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && match;
}
bool CheckStringAttr(const Node* n, absl::string_view attr_name) {
string match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && !match.empty();
}
bool LowerUsingSwitchMergeIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerUsingSwitchMergeAttr);
}
bool LowerAsMultiDeviceFunctionIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerAsMultiDeviceFunctionAttr);
}
bool MarkedForTpuCompilation(const Node* n) {
return CheckStringAttr(n, kTpuReplicateAttr);
}
bool MarkedForXlaCompilation(const Node* n) {
return CheckStringAttr(n, kXlaClusterAttr) ||
CheckBoolAttr(n, kXlaMustCompileAttr);
}
bool HasArgsOrRetvals(const Graph& g) {
for (const Node* n : g.op_nodes()) {
if (n->IsArg() || n->IsRetval()) return true;
}
return false;
}
const absl::flat_hash_set<std::string>& DevicePropagationOpList() {
static const auto op_list = new absl::flat_hash_set<std::string>(
{"Identity", "IdentityN", "Enter", "Exit", "Switch", "Merge",
"NextIteration"});
return *op_list;
}
bool IsPropagatableDevice(StringPiece device_string) {
DeviceNameUtils::ParsedName device;
return DeviceNameUtils::ParseFullName(device_string, &device) &&
device.type == DEVICE_TPU;
}
}
Status LowerFunctionalOpsPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.partition_graphs != nullptr) {
return errors::Internal(
"Lowering If/While ops should happen before partitioning.");
}
if (options.graph == nullptr) {
return absl::OkStatus();
}
Graph* g = options.graph->get();
if (g == nullptr) {
return errors::Internal(
"Lowering While op requires a graph to be available.");
}
FunctionLibraryDefinition* flib_def = options.flib_def;
if (flib_def == nullptr) {
return errors::Internal(
"Lowering If op requires a FunctionLibraryDefinition to be available.");
}
const bool lower_function_calls =
options.session_options && options.session_options->config.graph_options()
.optimizer_options()
.do_function_inlining();
bool keep_lowered_nodes_fetchable = !HasArgsOrRetvals(*g);
const bool functional_control_flow =
options.session_options &&
(options.session_options->config.experimental().executor_type() ==
"SINGLE_THREADED_EXECUTOR" ||
options.session_options->config.experimental().use_tfrt() ||
options.session_options->config.experimental()
.disable_functional_ops_lowering());
const auto used_by_xla = [](Node* node) -> bool {
return MarkedForTpuCompilation(node) || MarkedForXlaCompilation(node);
};
const auto lower_control_flow = [&](Node* node) -> bool {
return LowerUsingSwitchMergeIsOn(node) && !used_by_xla(node);
};
int num_node_ids_before_lowering = g->num_node_ids();
for (int i = 2; i < g->num_node_ids(); ++i) {
Node* n = g->FindNodeId(i);
if (n == nullptr) continue;
if (IsFunctionCall(*flib_def, *n) && !used_by_xla(n) &&
(lower_function_calls || LowerAsMultiDeviceFunctionIsOn(n))) {
TF_RETURN_IF_ERROR(RewriteFunctionCallNode(n, g, *flib_def,
keep_lowered_nodes_fetchable));
continue;
}
if (functional_control_flow) continue;
if (n->IsIfNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteIfNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsCaseNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteCaseNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsWhileNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(
RewriteWhileNode(n, g, flib_def, keep_lowered_nodes_fetchable));
} else {
DCHECK(!lower_control_flow(n))
<< "Node " << FormatNodeForError(*n) << " of type "
<< n->type_string() << " has '"
<< LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr
<< "' attr set but it does not support lowering.\n";
}
}
PropagateDevices(
[num_node_ids_before_lowering](const Node& n) {
return DevicePropagationOpList().contains(n.type_string()) &&
n.id() >= num_node_ids_before_lowering;
},
IsPropagatableDevice, g);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 10,
LowerFunctionalOpsPass);
} | #include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
typedef FunctionDefHelper FDH;
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr;
static void AssertHasSubstr(StringPiece s, StringPiece expected) {
ASSERT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
FunctionDef WhileWithIfCond(int32_t N) {
const Tensor kN = test::AsScalar<int32>(N);
return FDH::Define(
"WhileWithIfCond",
{"counter: int32", "pred: bool", "x: int32"},
{"z: bool"},
{},
{
{{"N"}, "Const", {}, {{"value", kN}, {"dtype", DT_INT32}}},
{{"z"}, "Less", {"counter", "N"}, {{"T", DT_INT32}}},
});
}
FunctionDef WhileWithIfBody() {
NameAttrList then_func;
then_func.set_name("XTimesTwo");
NameAttrList else_func;
else_func.set_name("XTimesFour");
const Tensor kOne = test::AsScalar<int32>(1);
std::vector<DataType> input_types = {DT_INT32};
std::vector<DataType> output_types = {DT_INT32};
return FDH::Define(
"WhileWithIfBody",
{"counter: int32", "pred: bool", "x: int32"},
{"updated_counter: int32", "pred: bool", "if: int32"},
{},
{
{{"if"},
"If",
{"pred", "x"},
{{"then_branch", then_func},
{"else_branch", else_func},
{"Tcond", DT_BOOL},
{"Tin", input_types},
{"Tout", output_types},
{kLowerUsingSwitchMergeAttr, true}}},
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_INT32}}},
{{"updated_counter"}, "Add", {"counter", "one"}, {{"T", DT_INT32}}},
});
}
TEST(LowerIfWhileTest, CondInWhile) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::XTimesFour();
*f_lib_proto.add_function() = WhileWithIfCond(3);
*f_lib_proto.add_function() = WhileWithIfBody();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto counter = ops::Placeholder(root.WithOpName("counter"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(counter.node()), NodeBuilder::NodeOut(pred.node()),
NodeBuilder::NodeOut(a.node())});
Node* while_node;
AttrValue cond_func;
cond_func.mutable_func()->set_name("WhileWithIfCond");
AttrValue body_func;
body_func.mutable_func()->set_name("WhileWithIfBody");
TF_ASSERT_OK(NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32, DT_BOOL, DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto* op : graph->op_nodes()) {
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 8);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 64);
}
}
FunctionDef IfWithWhileThen() {
NameAttrList cond_func;
cond_func.set_name("LessThanOrEqualToN");
NameAttrList body_func;
body_func.set_name("XTimesTwo");
std::vector<DataType> input_and_output_types = {DT_INT32};
std::vector<TensorShape> output_shapes = {TensorShape()};
return FDH::Define(
"IfWithWhileThen",
{"x: int32"},
{"while: int32"},
{},
{
{{"while"},
"While",
{"x"},
{{"cond", cond_func},
{"body", body_func},
{"T", input_and_output_types},
{"output_shapes", output_shapes},
{kLowerUsingSwitchMergeAttr, true}}},
});
}
TEST(LowerIfWhileTest, WhileInCond) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
*f_lib_proto.add_function() = IfWithWhileThen();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue then_func;
then_func.mutable_func()->set_name("IfWithWhileThen");
AttrValue else_func;
else_func.mutable_func()->set_name("XTimesTwo");
Node* if_node;
TF_ASSERT_OK(NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", then_func)
.Attr("else_branch", else_func)
.Attr("Tout", {DT_INT32})
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &if_node));
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
if (op->name() == "if") {
node_called_if_count++;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "if") {
node_called_if_count++;
}
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 2);
}
}
}
} |
1,587 | cpp | tensorflow/tensorflow | replicate_per_replica_nodes | tensorflow/core/common_runtime/replicate_per_replica_nodes.cc | tensorflow/core/common_runtime/replicate_per_replica_nodes_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_REPLICATE_PER_REPLICA_NODES_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_REPLICATE_PER_REPLICA_NODES_H_
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
Status ReplicatePerReplicaNodesInFunctionGraph(
const absl::flat_hash_map<string, const std::vector<string>*>&
composite_devices,
Graph* graph);
}
#endif
#include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include <algorithm>
#include <queue>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/optimize_cross_host_control_deps.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
constexpr int kOptimizeCrossHostEdgesTheshold = 8;
constexpr int kOptimizeCrossHostDataEdgesTheshold = 2;
class ReplicateHelper {
public:
Status InitializeNode(const Node* node, int num_allowed_devices) {
if (replicated_nodes_map_.find(node) != replicated_nodes_map_.end()) {
return errors::InvalidArgument("Node ", node->name(),
" has been replicated.");
}
std::vector<Node*> replicated_nodes(num_allowed_devices, nullptr);
replicated_nodes_map_.emplace(node, std::move(replicated_nodes));
return absl::OkStatus();
}
Status ReplicateNode(const Node* node,
const std::vector<string>& allowed_devices,
int allowed_device_index, Graph* graph) {
auto& replicated_nodes = replicated_nodes_map_.at(node);
if (replicated_nodes[allowed_device_index] != nullptr) {
return absl::OkStatus();
}
const auto& device = allowed_devices.at(allowed_device_index);
NodeDef node_def = node->def();
const string suffix = strings::StrCat("/R", allowed_device_index);
node_def.set_name(graph->NewName(strings::StrCat(node_def.name(), suffix)));
TF_ASSIGN_OR_RETURN(Node * replicated_node, graph->AddNode(node_def));
replicated_node->set_assigned_device_name(device);
if (replicated_node->IsArg()) {
replicated_node->AddAttr("sub_index", allowed_device_index);
}
replicated_nodes[allowed_device_index] = replicated_node;
return absl::OkStatus();
}
void ReplicateFromRegularDeviceToCompositeDevice(const Edge* edge,
Graph* graph) const {
Node* src = edge->src();
const std::vector<Node*>& dst_replicated_nodes =
replicated_nodes_map_.at(edge->dst());
for (Node* dst : dst_replicated_nodes) {
if (dst == nullptr) {
continue;
}
graph->AddEdge(src, edge->src_output(), dst, edge->dst_input());
}
}
Status ReplicateFromCompositeDeviceToCompositeDevice(
const Edge* edge, const std::vector<string>& allowed_devices,
Graph* graph) {
const std::vector<Node*>& src_replicated_nodes =
replicated_nodes_map_.at(edge->src());
const std::vector<Node*>& dst_replicated_nodes =
replicated_nodes_map_.at(edge->dst());
if (src_replicated_nodes.size() != dst_replicated_nodes.size()) {
return errors::InvalidArgument(
"Nodes assigned to the same composite device should have the "
"same number of replicated nodes. Found an edge from node ",
edge->src()->name(), " (", src_replicated_nodes.size(),
" replicated nodes) to node ", edge->dst()->name(), " (",
dst_replicated_nodes.size(), " replicated nodes).");
}
for (int i = 0; i < src_replicated_nodes.size(); ++i) {
Node* dst = dst_replicated_nodes.at(i);
if (dst == nullptr) {
continue;
}
TF_RETURN_IF_ERROR(ReplicateNode(edge->src(), allowed_devices, i, graph));
graph->AddEdge(src_replicated_nodes.at(i), edge->src_output(), dst,
edge->dst_input());
}
return absl::OkStatus();
}
Status ReplicateFromCompositeDeviceToRegularDevice(
const Edge* edge, const std::vector<string>& allowed_devices,
Graph* graph) {
const std::vector<Node*>& src_replicated_nodes =
replicated_nodes_map_.at(edge->src());
Node* dst = edge->dst();
const string& dst_device = dst->assigned_device_name();
bool found_src_node = false;
for (int i = 0; i < allowed_devices.size(); ++i) {
if (allowed_devices.at(i) == dst_device) {
TF_RETURN_IF_ERROR(
ReplicateNode(edge->src(), allowed_devices, i, graph));
graph->AddEdge(src_replicated_nodes.at(i), edge->src_output(), dst,
edge->dst_input());
found_src_node = true;
break;
}
}
if (!found_src_node) {
for (int i = 0; i < allowed_devices.size(); ++i) {
TF_RETURN_IF_ERROR(
ReplicateNode(edge->src(), allowed_devices, i, graph));
}
if (edge->IsControlEdge()) {
for (Node* replicated_node : src_replicated_nodes) {
graph->AddControlEdge(replicated_node, dst,
true);
}
return absl::OkStatus();
}
if (edge->src()->type_string() == "_Arg") {
NodeDefBuilder pack_builder(
graph->NewName(absl::StrCat(edge->src()->name(), "/Packed")),
"Pack");
const int num_replicas = src_replicated_nodes.size();
pack_builder.Attr("N", num_replicas);
const DataType dtype = edge->src()->output_type(edge->src_output());
pack_builder.Attr("T", dtype);
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.reserve(src_replicated_nodes.size());
for (Node* replicated_node : src_replicated_nodes) {
inputs.emplace_back(NodeDefBuilder::NodeOut{
replicated_node->name(), edge->src_output(), dtype});
}
pack_builder.Input(inputs);
NodeDef pack_def;
TF_RETURN_IF_ERROR(pack_builder.Finalize(&pack_def));
TF_ASSIGN_OR_RETURN(Node * pack_node, graph->AddNode(pack_def));
pack_node->set_assigned_device_name(dst->assigned_device_name());
for (int i = 0; i < src_replicated_nodes.size(); ++i) {
graph->AddEdge(src_replicated_nodes[i], edge->src_output(), pack_node,
i);
}
graph->AddEdge(pack_node, 0, dst, edge->dst_input());
} else {
return errors::InvalidArgument(
"Dst node should be assigned to an allowed device. Found an "
"edge from node ",
edge->src()->name(), " assigned to ",
edge->src()->assigned_device_name(), " to node ", dst->name(),
" assigned to ", dst_device);
}
}
return absl::OkStatus();
}
private:
absl::flat_hash_map<const Node*, std::vector<Node*>> replicated_nodes_map_;
};
Status ReplicateNodesAndEdges(const std::vector<string>& allowed_devices,
absl::flat_hash_map<Node*, int>* cluster_nodes,
ReplicateHelper* helper, Graph* graph) {
std::queue<Node*> nodes_ready_to_delete;
for (auto& pair : *cluster_nodes) {
Node* node = pair.first;
for (const Edge* edge : node->out_edges()) {
Node* dst = edge->dst();
if (dst->assigned_device_name() != node->assigned_device_name()) {
TF_RETURN_IF_ERROR(helper->ReplicateFromCompositeDeviceToRegularDevice(
edge, allowed_devices, graph));
--pair.second;
}
}
if (cluster_nodes->at(node) == 0) {
nodes_ready_to_delete.push(node);
}
}
while (!nodes_ready_to_delete.empty()) {
Node* node = nodes_ready_to_delete.front();
nodes_ready_to_delete.pop();
for (const Edge* edge : node->in_edges()) {
Node* src = edge->src();
if (src->assigned_device_name() != node->assigned_device_name()) {
helper->ReplicateFromRegularDeviceToCompositeDevice(edge, graph);
} else {
TF_RETURN_IF_ERROR(
helper->ReplicateFromCompositeDeviceToCompositeDevice(
edge, allowed_devices, graph));
if (--(*cluster_nodes)[src] == 0) {
nodes_ready_to_delete.push(src);
}
}
}
cluster_nodes->erase(node);
graph->RemoveNode(node);
}
return absl::OkStatus();
}
}
Status ReplicatePerReplicaNodesInFunctionGraph(
const absl::flat_hash_map<string, const std::vector<string>*>&
composite_devices,
Graph* graph) {
VLOG(1) << "Starting ReplicatePerReplicaNodesInFunctionGraph";
VLOG(1) << "Graph #nodes " << graph->num_nodes() << " #edges "
<< graph->num_edges();
std::set<string> composite_device_names;
for (const auto& it : composite_devices) {
composite_device_names.insert(it.first);
}
absl::flat_hash_map<string, absl::flat_hash_map<Node*, int>>
composite_device_to_cluster_nodes;
for (Node* n : graph->op_nodes()) {
if (composite_device_names.find(n->assigned_device_name()) !=
composite_device_names.end()) {
composite_device_to_cluster_nodes[n->assigned_device_name()].emplace(
n, n->out_edges().size());
}
}
if (composite_device_to_cluster_nodes.empty()) {
VLOG(1) << "No nodes with composiste device found.";
return absl::OkStatus();
}
for (auto& it : composite_device_to_cluster_nodes) {
const std::vector<string>& allowed_devices =
*composite_devices.at(it.first);
if (allowed_devices.empty()) {
return errors::InvalidArgument("No allowed device of composite device: ",
it.first);
}
absl::flat_hash_map<Node*, int>& cluster_nodes = it.second;
if (allowed_devices.size() == 1) {
for (const auto& pair : it.second) {
Node* n = pair.first;
n->set_assigned_device_name(allowed_devices.at(0));
if (n->IsArg()) {
n->AddAttr("sub_index", 0);
}
}
continue;
}
ReplicateHelper helper;
for (const auto& pair : cluster_nodes) {
TF_RETURN_IF_ERROR(
helper.InitializeNode(pair.first, allowed_devices.size()));
}
TF_RETURN_IF_ERROR(ReplicateNodesAndEdges(allowed_devices, &cluster_nodes,
&helper, graph));
if (!cluster_nodes.empty()) {
return errors::InvalidArgument(
"There are still ", cluster_nodes.size(),
" nodes on CompositiveDevice ",
cluster_nodes.begin()->first->assigned_device_name());
}
}
TF_RETURN_IF_ERROR(OptimizeCrossHostControlOutputEdges(
graph, kOptimizeCrossHostEdgesTheshold));
TF_RETURN_IF_ERROR(OptimizeCrossHostControlInputEdges(
graph, kOptimizeCrossHostEdgesTheshold));
TF_RETURN_IF_ERROR(OptimizeCrossHostDataOutputEdges(
graph, kOptimizeCrossHostDataEdgesTheshold));
VLOG(1) << "Finished ReplicatePerReplicaNodesInFunctionGraph";
VLOG(1) << "Graph #nodes " << graph->num_nodes() << " #edges "
<< graph->num_edges();
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include <map>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class GraphHelper {
public:
explicit GraphHelper(const Graph& graph) : graph_(graph) {
for (Node* node : graph.nodes()) {
nodes_by_name_[node->name()] = node;
}
}
Node* GetNodeByName(const string& name) {
const auto it = nodes_by_name_.find(name);
if (it != nodes_by_name_.end()) {
return it->second;
}
for (const auto& entry : nodes_by_name_) {
if (absl::StartsWith(entry.first, name)) {
return entry.second;
}
}
return nullptr;
}
void SetAssignedDevice(const string& node_name, const string& device_name) {
CHECK_NOTNULL(GetNodeByName(node_name))
->set_assigned_device_name(device_name);
}
void CheckArgNum(const int expected_num) {
int arg_num = 0;
for (Node* node : graph_.op_nodes()) {
if (node->IsArg()) {
arg_num++;
}
}
EXPECT_EQ(arg_num, expected_num);
}
void CheckAssignedDevice(const string& node_name,
const string& expected_device_name) {
EXPECT_EQ(expected_device_name,
CHECK_NOTNULL(GetNodeByName(node_name))->assigned_device_name());
}
void CheckAssignedDevicePrefix(const string& node_name,
const string& expected_device_name) {
auto assigned =
CHECK_NOTNULL(GetNodeByName(node_name))->assigned_device_name();
EXPECT_EQ(assigned.rfind(expected_device_name, 0), 0);
}
private:
const Graph& graph_;
std::map<string, Node*> nodes_by_name_;
};
TEST(ReplicatePerReplicaNodesTest, SingleCompositeDevice) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto one = ops::Const<int32>(scope.WithOpName("one"), 1);
auto write = ops::AssignVariableOp(scope.WithOpName("write"), arg, one);
auto ret = ops::_Retval(
scope.WithOpName("ret").WithControlDependencies({write}), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 5);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("one", "/device:CPU:0");
helper.SetAssignedDevice("write", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 9);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevicePrefix("arg/R0", "/device:TPU");
helper.CheckAssignedDevicePrefix("arg/R1", "/device:TPU");
helper.CheckAssignedDevicePrefix("write/R0", "/device:TPU");
helper.CheckAssignedDevicePrefix("write/R1", "/device:TPU");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("one", "/device:CPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, SingleCompositeDeviceToSingleDevice) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.CheckArgNum(1);
helper.CheckAssignedDevice("arg", "/device:TPU:0");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, MultipleCompositeDevices) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_RESOURCE, 0);
Output arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_RESOURCE, 0);
auto read0 = ops::ReadVariableOp(scope.WithOpName("read0"), arg0, DT_INT32);
auto read1 = ops::ReadVariableOp(scope.WithOpName("read1"), arg1, DT_INT32);
auto identity0 = ops::Identity(scope.WithOpName("identity0"), read0);
auto identity1 = ops::Identity(scope.WithOpName("identity1"), read1);
auto add = ops::Add(scope.WithOpName("add"), identity0, identity1);
auto ret = ops::_Retval(scope.WithOpName("ret"), add, 0);
const std::vector<string> underlying_devices_0 = {"/device:TPU:0",
"/device:TPU:1"};
const std::vector<string> underlying_devices_1 = {"/device:TPU:2",
"/device:TPU:3"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices_0},
{"/device:TPU_COMPOSITE:1", &underlying_devices_1}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 8);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg0", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read0", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("identity0", "/device:TPU:1");
helper.SetAssignedDevice("arg1", "/device:TPU_COMPOSITE:1");
helper.SetAssignedDevice("read1", "/device:TPU_COMPOSITE:1");
helper.SetAssignedDevice("identity1", "/device:TPU:3");
helper.SetAssignedDevice("add", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 8);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevice("arg0/R1", "/device:TPU:1");
helper.CheckAssignedDevice("arg1/R1", "/device:TPU:3");
helper.CheckAssignedDevice("read0/R1", "/device:TPU:1");
helper.CheckAssignedDevice("read1/R1", "/device:TPU:3");
helper.CheckAssignedDevice("identity0", "/device:TPU:1");
helper.CheckAssignedDevice("identity1", "/device:TPU:3");
helper.CheckAssignedDevice("add", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, NestedFunctions) {
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
FunctionDefLibrary fdef_lib;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "Func", &fdef));
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
}
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
NodeDef def;
TF_ASSERT_OK(NodeDefBuilder("func", "Func", &flib_def)
.Input(arg.name(), 0, DT_RESOURCE)
.Finalize(&def));
Status status;
Node* func = scope.graph()->AddNode(def, &status);
TF_ASSERT_OK(status);
scope.graph()->AddEdge(arg.node(), 0, func, 0);
auto ret = ops::_Retval(scope.WithOpName("ret"), Output(func), 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
GraphHelper helper(graph);
EXPECT_EQ(graph.num_op_nodes(), 3);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("func", "/device:CPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 5);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevice("arg/R0", "/device:TPU:0");
helper.CheckAssignedDevice("arg/R1", "/device:TPU:1");
helper.CheckAssignedDevice("arg/Packed", "/device:CPU:0");
helper.CheckAssignedDevice("func", "/device:CPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
const EdgeSet& packed_in_edges =
helper.GetNodeByName("arg/Packed")->in_edges();
EXPECT_EQ(packed_in_edges.size(), 2);
auto it = packed_in_edges.begin();
EXPECT_EQ(helper.GetNodeByName("arg/R0"), (*it++)->src());
EXPECT_EQ(helper.GetNodeByName("arg/R1"), (*it)->src());
const EdgeSet& func_in_edges = helper.GetNodeByName("func")->in_edges();
EXPECT_EQ(func_in_edges.size(), 1);
EXPECT_EQ(helper.GetNodeByName("arg/Packed"),
(*func_in_edges.begin())->src());
}
}
TEST(ReplicatePerReplicaNodesTest, DeadArgNodes) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.CheckArgNum(1);
helper.CheckAssignedDevice("arg/R0", "/device:TPU:0");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
}
} |
1,588 | cpp | tensorflow/tensorflow | quantize_training | tensorflow/core/common_runtime/quantize_training.cc | tensorflow/core/common_runtime/quantize_training_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_QUANTIZE_TRAINING_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_QUANTIZE_TRAINING_H_
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
Status DoQuantizeTraining(int32_t num_bits, const string& quant_op_type,
Graph* g);
Status DoQuantizeTrainingOnSerializedGraphDef(const string& input_graph,
int32_t num_bits,
const string& quant_op_type,
string* result_graph);
Status DoQuantizeTrainingOnGraphDef(const GraphDef& input_graphdef,
int32_t num_bits,
const string& quant_op_type,
GraphDef* result_graphdef);
}
#endif
#include "tensorflow/core/common_runtime/quantize_training.h"
#include <algorithm>
#include <atomic>
#include <set>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
const uint32 kAllowedInputs = 2;
const float kEMADecay = 0.999;
const auto* nodes_to_rewrite =
new std::unordered_set<string, StringPieceHasher>{"MatMul", "Conv2D"};
struct EdgeToConvert {
const Edge* edge;
int32 num_bits;
bool signed_input;
bool range_given;
float input_min;
float input_max;
EdgeToConvert(const Edge* e, int32_t bits, bool sign, bool range, float min,
float max)
: edge(e),
num_bits(bits),
signed_input(sign),
range_given(range),
input_min(min),
input_max(max) {}
};
inline bool IsGradientNode(const Graph* graph, const Node* node) {
static const string tag = "gradients";
return (node->name().compare(0, tag.size(), tag) == 0);
}
bool FindType(const Graph* graph, const Node* node, bool* signed_input,
bool* range_given, float* input_min, float* input_max) {
const string& src_op = node->type_string();
if (src_op == "Const" || src_op == "Variable" || src_op == "VariableV2") {
*signed_input = true;
*range_given = false;
} else if (src_op == "Relu") {
*signed_input = false;
*range_given = false;
} else if (src_op == "Relu6") {
*signed_input = false;
*range_given = true;
*input_min = 0;
*input_max = 6;
} else if (src_op == "Sigmoid") {
*signed_input = false;
*range_given = true;
*input_min = 0;
*input_max = 1;
} else if (src_op == "Tanh") {
*signed_input = true;
*range_given = true;
*input_min = -1;
*input_max = 1;
} else if (src_op == "Reshape" || src_op == "ConcatV2") {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() != Graph::kControlSlot && edge->dst_input() == 0) {
FindType(graph, edge->src(), signed_input, range_given, input_min,
input_max);
}
}
} else if (src_op == "Identity" || src_op == "MaxPool" ||
src_op == "AvgPool" || src_op == "MaxPool3D" ||
src_op == "AvgPool3D") {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() != Graph::kControlSlot) {
FindType(graph, edge->src(), signed_input, range_given, input_min,
input_max);
}
}
} else {
*signed_input = true;
*range_given = false;
return false;
}
return true;
}
Status FindSaveOp(const Graph* graph, Node** save_op,
std::vector<const Edge*>* in_edges, bool* found) {
*found = false;
for (Node* node : graph->op_nodes()) {
if (node->type_string() == "SaveV2") {
if (*found) {
return errors::InvalidArgument("Input graph has multiple SaveV2 ops.");
}
*save_op = node;
*found = true;
TF_RETURN_IF_ERROR(node->input_edges(in_edges));
}
}
return absl::OkStatus();
}
Node* FindRestoreAllOp(const Graph* graph, StringPiece save_prefix) {
for (Node* node : graph->op_nodes()) {
if (node->name() == strings::StrCat(save_prefix, "/restore_all")) {
return node;
}
}
return nullptr;
}
StringPiece GetNodeNamePrefix(const Node* node) {
StringPiece name = node->name();
return name.substr(0, name.rfind('/'));
}
void FillStringTensor(Tensor* dst, const Tensor& src) {
auto dst_flat = dst->flat<tstring>();
auto src_flat = src.flat<tstring>();
for (int i = 0; i < src.NumElements(); i++) {
dst_flat(i) = src_flat(i);
}
}
Status ConnectVariablesToSaveOp(Graph* graph, Node* save_op,
const std::vector<const Edge*>& in_edges,
const std::vector<Node*>& added_variables) {
Node* tensor_names_op = in_edges[1]->src();
Node* shape_and_slices_op = in_edges[2]->src();
Tensor tensor_names;
Tensor shape_and_slices;
TF_RETURN_IF_ERROR(
GetNodeAttr(tensor_names_op->attrs(), "value", &tensor_names));
TF_RETURN_IF_ERROR(
GetNodeAttr(shape_and_slices_op->attrs(), "value", &shape_and_slices));
int tn_size = tensor_names.NumElements();
int var_size = added_variables.size();
NodeBuilder save_op_builder =
NodeBuilder(save_op->name(), save_op->type_string());
for (int i = 0; i < 3; i++) {
save_op_builder = save_op_builder.Input(in_edges[i]->src());
}
std::vector<NodeBuilder::NodeOut> var_nodeouts;
var_nodeouts.reserve(tn_size + var_size);
for (int i = 3; i < in_edges.size(); i++) {
var_nodeouts.emplace_back(in_edges[i]->src());
}
Tensor new_tensor_names(DT_STRING, TensorShape({tn_size + var_size}));
Tensor new_shape_and_slices(DT_STRING, TensorShape({tn_size + var_size}));
FillStringTensor(&new_tensor_names, tensor_names);
FillStringTensor(&new_shape_and_slices, shape_and_slices);
for (int i = 0; i < var_size; i++) {
Node* var = added_variables[i];
new_tensor_names.flat<tstring>()(tn_size + i) = var->name();
new_shape_and_slices.flat<tstring>()(tn_size + i) = "";
var_nodeouts.emplace_back(var);
}
save_op_builder = save_op_builder.Input(var_nodeouts);
tensor_names_op->AddAttr("value", new_tensor_names);
shape_and_slices_op->AddAttr("value", new_shape_and_slices);
Node* new_save_op;
TF_RETURN_IF_ERROR(save_op_builder.Finalize(graph, &new_save_op));
for (const Edge* edge : save_op->out_edges()) {
graph->AddControlEdge(new_save_op, edge->dst());
}
graph->RemoveNode(save_op);
return absl::OkStatus();
}
Status AddRestoreVariableSubgraphs(Graph* graph, Node* save_op,
const std::vector<const Edge*>& in_edges,
const std::vector<Node*>& variables) {
Node* prefix_op = in_edges[0]->src();
StringPiece name_prefix = GetNodeNamePrefix(save_op);
Node* restore_all = FindRestoreAllOp(graph, name_prefix);
if (restore_all == nullptr) {
return errors::InvalidArgument("graph has SaveOp, but no restore_all NoOp");
}
const string restore_op_name = strings::StrCat(name_prefix, "/RestoreV2");
const string assign_op_name = strings::StrCat(name_prefix, "/Assign");
for (Node* var : variables) {
string new_restore_op_name =
strings::StrCat(graph->NewName(restore_op_name), "_qt");
string new_assign_op_name =
strings::StrCat(graph->NewName(assign_op_name), "_qt");
string tensor_names_op_name =
strings::StrCat(new_restore_op_name, "/tensor_names");
string shape_and_slices_op_name =
strings::StrCat(new_restore_op_name, "/shape_and_slices");
Node* tensor_names;
Tensor tensor_names_val(DT_STRING, TensorShape({1}));
tensor_names_val.flat<tstring>()(0) = var->name();
TF_RETURN_IF_ERROR(NodeBuilder(tensor_names_op_name, "Const")
.Attr("dtype", DT_STRING)
.Attr("value", tensor_names_val)
.Finalize(graph, &tensor_names));
Node* shape_and_slices;
Tensor shape_and_slices_val(DT_STRING, TensorShape({1}));
shape_and_slices_val.flat<tstring>()(0) = "";
TF_RETURN_IF_ERROR(NodeBuilder(shape_and_slices_op_name, "Const")
.Attr("dtype", DT_STRING)
.Attr("value", shape_and_slices_val)
.Finalize(graph, &shape_and_slices));
Node* restore_op;
TF_RETURN_IF_ERROR(NodeBuilder(new_restore_op_name, "RestoreV2")
.Input(prefix_op)
.Input(tensor_names)
.Input(shape_and_slices)
.Attr("dtypes", {DT_FLOAT})
.Finalize(graph, &restore_op));
Node* assign_op;
TF_RETURN_IF_ERROR(NodeBuilder(new_assign_op_name, "Assign")
.Input(var)
.Input(restore_op)
.Finalize(graph, &assign_op));
graph->AddControlEdge(assign_op, restore_all);
}
return absl::OkStatus();
}
Status AddSaveAndRestore(Graph* graph, const std::vector<Node*>& variables) {
Node* save_op = nullptr;
std::vector<const Edge*> in_edges;
bool found = false;
TF_RETURN_IF_ERROR(FindSaveOp(graph, &save_op, &in_edges, &found));
if (found) {
TF_RETURN_IF_ERROR(
AddRestoreVariableSubgraphs(graph, save_op, in_edges, variables));
TF_RETURN_IF_ERROR(
ConnectVariablesToSaveOp(graph, save_op, in_edges, variables));
}
return absl::OkStatus();
}
Status MakeReductionAxes(Graph* graph, string name_prefix, Node* input,
Node** output) {
name_prefix = strings::StrCat(name_prefix, "/ReductionAxes");
Node* start;
Tensor zero_tensor(DT_INT32, TensorShape());
zero_tensor.flat<int32>()(0) = 0;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/RangeStart"), "Const")
.Attr("dtype", DT_INT32)
.Attr("value", zero_tensor)
.Finalize(graph, &start));
Node* delta;
Tensor one_tensor(DT_INT32, TensorShape());
one_tensor.flat<int32>()(0) = 1;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/RangeDelta"), "Const")
.Attr("dtype", DT_INT32)
.Attr("value", one_tensor)
.Finalize(graph, &delta));
Node* rank;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputRank"), "Rank")
.Input(input)
.Finalize(graph, &rank));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/ReductionAxes"), "Range")
.Input(start)
.Input(rank)
.Input(delta)
.Finalize(graph, output));
return absl::OkStatus();
}
Status MakeExponentialMovingAverage(Graph* graph, string name_prefix,
const NodeBuilder::NodeOut& input,
Node* decay, Node* update_variable,
Node** assign_value) {
name_prefix = strings::StrCat(name_prefix, "/EMA");
Node* one;
Tensor one_tensor(DT_FLOAT, TensorShape());
one_tensor.flat<float>()(0) = 1.0;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/OneConst"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", one_tensor)
.Finalize(graph, &one));
Node* decay_complement;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/DecayComplement"), "Sub")
.Input(one)
.Input(decay)
.Finalize(graph, &decay_complement));
Node* value_diff;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/ValueDiff"), "Sub")
.Input(update_variable)
.Input(input)
.Finalize(graph, &value_diff));
Node* update_value;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/UpdateValue"), "Mul")
.Input(value_diff)
.Input(decay_complement)
.Finalize(graph, &update_value));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/EMAValue"), "Sub")
.Input(update_variable)
.Input(update_value)
.Finalize(graph, assign_value));
return absl::OkStatus();
}
Status MakeInitializedEMAVariable(Graph* graph, const string& name, Node* decay,
Node* init_val,
std::vector<Node*>* added_variables,
Node** var) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name, "/Variable"), "VariableV2")
.Attr("shape", TensorShape())
.Attr("dtype", DT_FLOAT)
.Finalize(graph, var));
added_variables->push_back(*var);
Node* is_initialized;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/IsInitialized"),
"IsVariableInitialized")
.Input(*var)
.Finalize(graph, &is_initialized));
Node* switch_node;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/Switch"), "Switch")
.Input(init_val)
.Input(is_initialized)
.Finalize(graph, &switch_node));
NodeBuilder::NodeOut output_false = NodeBuilder::NodeOut(switch_node, 0);
NodeBuilder::NodeOut output_true = NodeBuilder::NodeOut(switch_node, 1);
Node* ema_value;
TF_RETURN_IF_ERROR(MakeExponentialMovingAverage(graph, name, output_true,
decay, *var, &ema_value));
Node* assign_value;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/Merge"), "Merge")
.Input({output_false, ema_value})
.Finalize(graph, &assign_value));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name, "/AssignValue"), "Assign")
.Input(*var)
.Input(assign_value)
.Finalize(graph, var));
return absl::OkStatus();
}
Status MakeEMAMinMaxVars(Graph* graph, const string& name_prefix, Node* input,
std::vector<Node*>* added_variables, Node** min_var,
Node** max_var) {
Tensor decay_tensor(DT_FLOAT, TensorShape());
decay_tensor.flat<float>()(0) = kEMADecay;
Node* decay;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/Decay"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", decay_tensor)
.Finalize(graph, &decay));
Node* reduction_axes;
TF_RETURN_IF_ERROR(
MakeReductionAxes(graph, name_prefix, input, &reduction_axes));
Node* min;
string min_name = strings::StrCat(name_prefix, "/Min");
TF_RETURN_IF_ERROR(NodeBuilder(min_name, "Min")
.Input(input)
.Input(reduction_axes)
.Finalize(graph, &min));
Node* max;
string max_name = strings::StrCat(name_prefix, "/Max");
TF_RETURN_IF_ERROR(NodeBuilder(max_name, "Max")
.Input(input)
.Input(reduction_axes)
.Finalize(graph, &max));
TF_RETURN_IF_ERROR(MakeInitializedEMAVariable(graph, min_name, decay, min,
added_variables, min_var));
TF_RETURN_IF_ERROR(MakeInitializedEMAVariable(graph, max_name, decay, max,
added_variables, max_var));
return absl::OkStatus();
}
Status MakeInputMinMax(Graph* graph, const string& name_prefix,
const EdgeToConvert& edge,
std::vector<Node*>* added_variables, Node** input_min,
Node** input_max) {
if (edge.range_given) {
Tensor input_min_tensor(DT_FLOAT, TensorShape());
input_min_tensor.flat<float>()(0) = edge.input_min;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputMin"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", input_min_tensor)
.Finalize(graph, input_min));
Tensor input_max_tensor(DT_FLOAT, TensorShape());
input_max_tensor.flat<float>()(0) = edge.input_max;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputMax"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", input_max_tensor)
.Finalize(graph, input_max));
} else {
TF_RETURN_IF_ERROR(MakeEMAMinMaxVars(graph, name_prefix, edge.edge->src(),
added_variables, input_min,
input_max));
}
return absl::OkStatus();
}
Status MakeQuantizeOp(Graph* graph, const string& name_prefix,
const string& quant_op_type, const EdgeToConvert& edge,
std::vector<Node*>* added_variables,
Node** convert_node) {
Node* input_min;
Node* input_max;
TF_RETURN_IF_ERROR(MakeInputMinMax(graph, name_prefix, edge, added_variables,
&input_min, &input_max));
string quant_name = strings::StrCat(name_prefix, "/", quant_op_type);
if (quant_op_type == "QuantizeAndDequantizeV2") {
TF_RETURN_IF_ERROR(NodeBuilder(quant_name, quant_op_type)
.Input(edge.edge->src())
.Input(input_min)
.Input(input_max)
.Attr("signed_input", edge.signed_input)
.Attr("num_bits", edge.num_bits)
.Attr("range_given", true)
.Finalize(graph, convert_node));
} else if (quant_op_type == "FakeQuantWithMinMaxVars") {
TF_RETURN_IF_ERROR(NodeBuilder(quant_name, quant_op_type)
.Input(edge.edge->src())
.Input(input_min)
.Input(input_max)
.Attr("num_bits", edge.num_bits)
.Finalize(graph, convert_node));
} else {
return errors::InvalidArgument("Unknown quant op type: ", quant_op_type);
}
return absl::OkStatus();
}
Status ProcessTargetEdges(Graph* graph, const string& quant_op_type,
const std::vector<EdgeToConvert>& target_edges) {
std::unordered_map<string, Node*, StringPieceHasher> name_index;
std::vector<Node*> added_variables;
for (const EdgeToConvert edge : target_edges) {
Node* convert_node;
string name_prefix = edge.edge->src()->name();
auto iter = name_index.find(name_prefix);
if (iter == name_index.end()) {
TF_RETURN_IF_ERROR(MakeQuantizeOp(graph, name_prefix, quant_op_type, edge,
&added_variables, &convert_node));
name_index[name_prefix] = convert_node;
} else {
convert_node = iter->second;
}
graph->AddEdge(convert_node, 0, edge.edge->dst(), edge.edge->dst_input());
graph->RemoveEdge(edge.edge);
}
TF_RETURN_IF_ERROR(AddSaveAndRestore(graph, added_variables));
return absl::OkStatus();
}
}
Status DoQuantizeTraining(int32_t num_bits, const string& quant_op_type,
Graph* graph) {
if (graph == nullptr) {
return errors::InvalidArgument("Cannot accept empty graph pointer.");
}
if (num_bits < 1 || num_bits > 63) {
return errors::OutOfRange("num_bits should be in range [1, 63] but is: ",
num_bits);
}
int potential_input = 0;
std::vector<EdgeToConvert> target_edges;
for (Node* node : graph->nodes()) {
if (nodes_to_rewrite->find(node->type_string()) !=
nodes_to_rewrite->end() &&
!IsGradientNode(graph, node)) {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() == Graph::kControlSlot) {
continue;
} else {
bool signed_input = false;
bool range_given = false;
float input_min = 0;
float input_max = 0;
bool known_op = FindType(graph, edge->src(), &signed_input,
&range_given, &input_min, &input_max);
if (!known_op) {
potential_input++;
if (potential_input > kAllowedInputs) {
return errors::Unimplemented(
"Found an unknown op: ", edge->src()->name(),
" with type: ", edge->src()->type_string(),
"; Unknown ops are considered as model input for now and "
"only ",
kAllowedInputs, " inputs are supported currently.");
}
}
target_edges.emplace_back(EdgeToConvert(
edge, num_bits, signed_input, range_given, input_min, input_max));
}
}
}
}
TF_RETURN_IF_ERROR(ProcessTargetEdges(graph, quant_op_type, target_edges));
return absl::OkStatus();
}
Status DoQuantizeTrainingOnGraphDef(const GraphDef& input_graphdef,
int32_t num_bits,
const string& quant_op_type,
GraphDef* result_graphdef) {
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, input_graphdef, &graph));
TF_RETURN_IF_ERROR(DoQuantizeTraining(num_bits, quant_op_type, &graph));
graph.ToGraphDef(result_graphdef);
return absl::OkStatus();
}
Status DoQuantizeTrainingOnSerializedGraphDef(const string& input_graph_string,
int32_t num_bits,
const string& quant_op_type,
string* result_graph_string) {
GraphDef input_graphdef;
if (!ParseProtoUnlimited(&input_graphdef, input_graph_string)) {
return errors::InvalidArgument(
"input_graph_string is not a serialized GraphDef protocol buffer");
}
GraphDef output_graphdef;
TF_RETURN_IF_ERROR(DoQuantizeTrainingOnGraphDef(
input_graphdef, num_bits, quant_op_type, &output_graphdef));
if (!output_graphdef.SerializeToString(result_graph_string)) {
return errors::Internal(
"quantize training transformation resulted in invalid GraphDef");
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/quantize_training.h"
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class QuantizeTrainingTest : public ::testing::Test {
protected:
QuantizeTrainingTest() { Reset(); }
void Reset() { g_.reset(new Graph(OpRegistry::Global())); }
template <typename T>
Node* Constant(gtl::ArraySlice<T> values, TensorShape shape) {
return test::graph::Constant(g_.get(), test::AsTensor(values, shape));
}
Status Placeholder(Graph* g, const string& name, TensorShape shape,
Node** out) {
TF_RETURN_IF_ERROR(NodeBuilder(name, "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", shape)
.Finalize(g, out));
return absl::OkStatus();
}
Status FindNode(Graph* g, const string& name, Node** out) {
for (Node* node : g->nodes()) {
if (node->name() == name) {
*out = node;
return absl::OkStatus();
}
}
return errors::Unimplemented("Node ", name, " not found.");
}
std::unique_ptr<Graph> g_;
};
TEST_F(QuantizeTrainingTest, SignedInput) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(63, g->num_nodes());
Node* identity_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/QuantizeAndDequantizeV2"),
&identity_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*identity_q_node->attrs().Find("signed_input")));
Node* relu_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&relu_q_node));
ASSERT_EQ("false",
SummarizeAttrValue(*relu_q_node->attrs().Find("signed_input")));
}
TEST_F(QuantizeTrainingTest, RangeGivenTrue) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, b);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(38, g->num_nodes());
Node* relu6_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu6->name(), "/QuantizeAndDequantizeV2"),
&relu6_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*relu6_q_node->attrs().Find("range_given")));
Node* relu_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&relu_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*relu_q_node->attrs().Find("range_given")));
}
TEST_F(QuantizeTrainingTest, WithBackwardNodes_QuantizeAndDequantize) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* c = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
Node* d = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
g->AddControlEdge(g->source_node(), c);
g->AddControlEdge(g->source_node(), d);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
Node* m2 = test::graph::Matmul(g, identity, c, false, false);
g->AddControlEdge(m1, g->sink_node());
g->AddControlEdge(m2, g->sink_node());
Node* backward_m;
TF_ASSERT_OK(NodeBuilder(g->NewName("gradients/n"), "MatMul")
.Input(d)
.Input(m2)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Finalize(g, &backward_m));
g->AddControlEdge(backward_m, g->sink_node());
int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(95, g->num_nodes());
Node* found_node;
Status s = FindNode(g, strings::StrCat(d->name(), "/QuantizeAndDequantizeV2"),
&found_node);
EXPECT_TRUE(absl::StrContains(s.ToString(), "not found")) << s;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&found_node));
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/QuantizeAndDequantizeV2"),
&found_node));
TF_ASSERT_OK(FindNode(
g, strings::StrCat(c->name(), "/QuantizeAndDequantizeV2"), &found_node));
}
TEST_F(QuantizeTrainingTest, WithBackwardNodes_FakeQuant) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* c = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
Node* d = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
g->AddControlEdge(g->source_node(), c);
g->AddControlEdge(g->source_node(), d);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
Node* m2 = test::graph::Matmul(g, identity, c, false, false);
g->AddControlEdge(m1, g->sink_node());
g->AddControlEdge(m2, g->sink_node());
Node* backward_m;
TF_ASSERT_OK(NodeBuilder(g->NewName("gradients/n"), "MatMul")
.Input(d)
.Input(m2)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Finalize(g, &backward_m));
g->AddControlEdge(backward_m, g->sink_node());
int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "FakeQuantWithMinMaxVars", g));
EXPECT_EQ(95, g->num_nodes());
Node* found_node;
Status s = FindNode(g, strings::StrCat(d->name(), "/FakeQuantWithMinMaxVars"),
&found_node);
EXPECT_TRUE(absl::StrContains(s.ToString(), "not found")) << s;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/FakeQuantWithMinMaxVars"),
&found_node));
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/FakeQuantWithMinMaxVars"),
&found_node));
TF_ASSERT_OK(FindNode(
g, strings::StrCat(c->name(), "/FakeQuantWithMinMaxVars"), &found_node));
}
TEST_F(QuantizeTrainingTest, QuantizeSerializedGraphDef) {
Reset();
Graph* graph = g_.get();
Node* const_a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* const_b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
graph->AddControlEdge(graph->source_node(), const_a);
graph->AddControlEdge(graph->source_node(), const_b);
Node* relu = test::graph::Relu(graph, const_a);
Node* identity = test::graph::Identity(graph, const_b);
Node* matmul = test::graph::Matmul(graph, relu, identity, false, false);
graph->AddControlEdge(matmul, graph->sink_node());
int num_bits = 8;
GraphDef input_graph;
graph->ToGraphDef(&input_graph);
string input_string;
input_graph.SerializeToString(&input_string);
string result_string;
TF_ASSERT_OK(DoQuantizeTrainingOnSerializedGraphDef(
input_string, num_bits, "QuantizeAndDequantizeV2", &result_string));
GraphDef result_graphdef;
EXPECT_TRUE(ParseProtoUnlimited(&result_graphdef, result_string));
GraphConstructorOptions opts;
Graph result_graph(OpRegistry::Global());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, result_graphdef, &result_graph));
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", graph));
EXPECT_EQ(graph->num_nodes(), result_graph.num_nodes());
}
TEST_F(QuantizeTrainingTest, QuantizeGraphDef) {
Reset();
Graph* graph = g_.get();
Node* const_a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* const_b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
graph->AddControlEdge(graph->source_node(), const_a);
graph->AddControlEdge(graph->source_node(), const_b);
Node* relu = test::graph::Relu(graph, const_a);
Node* identity = test::graph::Identity(graph, const_b);
Node* matmul = test::graph::Matmul(graph, relu, identity, false, false);
graph->AddControlEdge(matmul, graph->sink_node());
int num_bits = 8;
GraphDef input_graphdef;
graph->ToGraphDef(&input_graphdef);
GraphDef result_graphdef;
TF_ASSERT_OK(DoQuantizeTrainingOnGraphDef(
input_graphdef, num_bits, "QuantizeAndDequantizeV2", &result_graphdef));
GraphConstructorOptions opts;
Graph result_graph(OpRegistry::Global());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, result_graphdef, &result_graph));
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", graph));
EXPECT_EQ(graph->num_nodes(), result_graph.num_nodes());
}
TEST_F(QuantizeTrainingTest, FixedRangeAndEMARange_QuantizeAndDequantize) {
Reset();
Graph* g = g_.get();
Node* a;
TF_ASSERT_OK(Placeholder(g, "a", {2, 2}, &a));
Node* c = Constant<float>({2.0, 3.0, 4.0, 5.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), c);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, c);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
SessionOptions options;
Session* sess;
TF_ASSERT_OK(NewSession(options, &sess));
GraphDef gdef;
g->ToGraphDef(&gdef);
TF_ASSERT_OK(sess->Create(gdef));
string min_const_name = strings::StrCat(relu6->name(), "/InputMin");
string max_const_name = strings::StrCat(relu6->name(), "/InputMax");
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
Tensor a1(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a1, {0.0, 1.0, 2.0, 3.0});
Tensor a2(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a2, {1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK(sess->Run({{"a", a1}}, {m1->name()}, {}, &outputs));
string min_var_name = strings::StrCat(relu->name(), "/Min/Variable");
string max_var_name = strings::StrCat(relu->name(), "/Max/Variable");
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 3.0);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
TF_ASSERT_OK(sess->Run({{"a", a2}}, {m1->name()}, {}, &outputs));
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
const float decay = 0.999;
const float expected_min = 0.0 * decay + 1.0 * (1.0 - decay);
const float expected_max = 3.0 * decay + 4.0 * (1.0 - decay);
EXPECT_NEAR(outputs[0].flat<float>()(0), expected_min, 1e-4);
EXPECT_NEAR(outputs[1].flat<float>()(0), expected_max, 1e-4);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
}
TEST_F(QuantizeTrainingTest, FixedRangeAndEMARange_FakeQuant) {
Reset();
Graph* g = g_.get();
Node* a;
TF_ASSERT_OK(Placeholder(g, "a", {2, 2}, &a));
Node* c = Constant<float>({2.0, 3.0, 4.0, 5.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), c);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, c);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "FakeQuantWithMinMaxVars", g));
SessionOptions options;
Session* sess;
TF_ASSERT_OK(NewSession(options, &sess));
GraphDef gdef;
g->ToGraphDef(&gdef);
TF_ASSERT_OK(sess->Create(gdef));
string min_const_name = strings::StrCat(relu6->name(), "/InputMin");
string max_const_name = strings::StrCat(relu6->name(), "/InputMax");
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
Tensor a1(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a1, {0.0, 1.0, 2.0, 3.0});
Tensor a2(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a2, {1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK(sess->Run({{"a", a1}}, {m1->name()}, {}, &outputs));
string min_var_name = strings::StrCat(relu->name(), "/Min/Variable");
string max_var_name = strings::StrCat(relu->name(), "/Max/Variable");
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 3.0);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
TF_ASSERT_OK(sess->Run({{"a", a2}}, {m1->name()}, {}, &outputs));
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
const float decay = 0.999;
const float expected_min = 0.0 * decay + 1.0 * (1.0 - decay);
const float expected_max = 3.0 * decay + 4.0 * (1.0 - decay);
EXPECT_NEAR(outputs[0].flat<float>()(0), expected_min, 1e-4);
EXPECT_NEAR(outputs[1].flat<float>()(0), expected_max, 1e-4);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
}
}
} |
1,589 | cpp | tensorflow/tensorflow | graph_runner | tensorflow/core/common_runtime/graph_runner.cc | tensorflow/core/common_runtime/graph_runner_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_RUNNER_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_RUNNER_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
namespace tsl {
class Env;
}
namespace tensorflow {
using Env = tsl::Env;
class Device;
class Graph;
class GraphRunner {
public:
GraphRunner(Env* env);
GraphRunner(Device* device);
~GraphRunner();
typedef std::vector<std::pair<string, Tensor>> NamedTensorList;
Status Run(Graph* graph, FunctionLibraryRuntime* function_library,
const NamedTensorList& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs);
private:
std::unique_ptr<Device> device_deleter_;
Device* const device_;
};
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/single_threaded_cpu_device.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class SimpleRendezvous : public RendezvousInterface {
public:
explicit SimpleRendezvous() {}
Status Send(const ParsedKey& parsed, const Args& send_args, const Tensor& val,
const bool is_dead) override {
if (is_dead) {
return errors::Internal("Send of a dead tensor");
}
mutex_lock l(mu_);
string edge_name(parsed.edge_name);
if (table_.count(edge_name) > 0) {
return errors::Internal("Send of an already sent tensor");
}
table_[edge_name] = val;
return absl::OkStatus();
}
void RecvAsync(const ParsedKey& parsed, const Args& recv_args,
DoneCallback done) override {
Tensor tensor;
Status status = absl::OkStatus();
{
string key(parsed.edge_name);
mutex_lock l(mu_);
if (table_.count(key) <= 0) {
status = errors::Internal("Did not find key ", key);
} else {
tensor = table_[key];
}
}
done(status, Args{}, recv_args, tensor, false);
}
void StartAbort(const Status& status) override {}
private:
typedef std::unordered_map<string, Tensor> Table;
mutex mu_;
Table table_ TF_GUARDED_BY(mu_);
};
}
GraphRunner::GraphRunner(Env* env)
: device_deleter_(NewSingleThreadedCpuDevice(env)),
device_(device_deleter_.get()) {}
GraphRunner::GraphRunner(Device* device) : device_(device) {}
GraphRunner::~GraphRunner() {}
Status GraphRunner::Run(Graph* graph, FunctionLibraryRuntime* function_library,
const NamedTensorList& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
if (device_ == nullptr) {
return errors::NotFound("Cannot find a device for GraphRunner.");
}
if (function_library && function_library->device() &&
function_library->device()->device_type() != device_->device_type()) {
VLOG(1) << "Cannot run on: " << device_->device_type()
<< " with a function library for a "
<< function_library->device()->device_type() << " device.";
function_library = nullptr;
}
std::unique_ptr<Graph> graph_to_run(new Graph(graph->op_registry()));
CopyGraph(*graph, graph_to_run.get());
SimpleRendezvous rendez;
std::vector<string> input_names;
for (const auto& in : inputs) {
const string& tensor_name = in.first;
input_names.emplace_back(tensor_name);
string full_key = Rendezvous::CreateKey("/device:CPU:0", 1, "/device:CPU:1",
tensor_name, FrameAndIter(0, 0));
Rendezvous::ParsedKey parsed;
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(full_key, &parsed));
TF_RETURN_IF_ERROR(rendez.Send(parsed, Rendezvous::Args(), in.second,
false ));
}
subgraph::RewriteGraphMetadata metadata;
TF_RETURN_IF_ERROR(subgraph::RewriteGraphForExecution(
graph_to_run.get(), input_names, output_names, {} ,
device_->attributes(), false , &metadata));
auto runner = [](Executor::Args::Closure c) { c(); };
LocalExecutorParams params;
params.device = device_;
params.function_library = function_library;
const int producer = graph_to_run->versions().producer();
params.create_kernel = [this, function_library, producer](
const std::shared_ptr<const NodeProperties>& props,
OpKernel** kernel) {
return CreateNonCachedKernel(device_, function_library, props, producer,
kernel);
};
params.delete_kernel = [](OpKernel* kernel) { delete kernel; };
Executor* executor;
TF_RETURN_IF_ERROR(NewLocalExecutor(params, *graph_to_run, &executor));
std::unique_ptr<Executor> executor_unref(executor);
Executor::Args args;
args.step_id = LogMemory::CONSTANT_FOLDING_STEP_ID;
args.runner = runner;
args.rendezvous = &rendez;
args.collective_executor = nullptr;
CancellationManager cancellation_manager;
args.cancellation_manager = &cancellation_manager;
if (function_library != nullptr) {
args.session_config = function_library->config_proto();
}
TF_RETURN_IF_ERROR(executor->Run(args));
outputs->resize(output_names.size());
for (size_t i = 0; i < output_names.size(); ++i) {
const string& output_key =
Rendezvous::CreateKey("/device:CPU:0", 1, "/device:CPU:1",
output_names[i], FrameAndIter(0, 0));
Rendezvous::ParsedKey parsed;
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(output_key, &parsed));
bool is_dead;
Tensor output_tensor;
TF_RETURN_IF_ERROR(
rendez.Recv(parsed, Rendezvous::Args(), &output_tensor, &is_dead));
(*outputs)[i] = tensor::DeepCopy(output_tensor);
}
return absl::OkStatus();
}
} | #include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
TEST(GraphRunnerTest, SingleConst) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
GraphRunner graph_runner(Env::Default());
std::vector<Tensor> outputs;
Status s = graph_runner.Run(root.graph(), nullptr, {}, {c.name()}, &outputs);
TF_ASSERT_OK(s);
test::ExpectEqual(test::AsScalar(42.0f), outputs[0]);
}
TEST(GraphRunnerTest, DeepCopy) {
Scope root = Scope::NewRootScope();
auto p1 = ops::Placeholder(root.WithOpName("p1"), DT_FLOAT);
auto p2 = ops::Placeholder(root.WithOpName("p2"), DT_FLOAT);
auto add = ops::Add(root.WithOpName("add"), p1, p2);
Tensor p1_data(DT_FLOAT, TensorShape({}));
Tensor p2_data(DT_FLOAT, TensorShape({}));
p1_data.scalar<float>()() = 1.0f;
p2_data.scalar<float>()() = 2.0f;
std::vector<std::pair<string, Tensor>> inputs = {{"p1:0", p1_data},
{"p2:0", p2_data}};
std::vector<Tensor> outputs;
{
GraphRunner graph_runner(Env::Default());
Status s =
graph_runner.Run(root.graph(), nullptr, inputs, {"add:0"}, &outputs);
TF_ASSERT_OK(s);
}
test::ExpectEqual(test::AsScalar(3.0f), outputs[0]);
}
TEST(GraphRunnerTest, MultiFetchConst) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
auto pi = ops::Const(root, 3.14f);
GraphRunner graph_runner(Env::Default());
std::vector<Tensor> outputs;
Status s = graph_runner.Run(root.graph(), nullptr, {}, {c.name(), pi.name()},
&outputs);
TF_ASSERT_OK(s);
test::ExpectEqual(test::AsScalar(42.0f), outputs[0]);
test::ExpectEqual(test::AsScalar(3.14f), outputs[1]);
}
TEST(GraphRunnerTest, FeedAndFetch) {
Scope root = Scope::NewRootScope();
auto p1 = ops::Placeholder(root.WithOpName("p1"), DT_FLOAT);
auto p2 = ops::Placeholder(root.WithOpName("p2"), DT_FLOAT);
auto add = ops::Add(root.WithOpName("add"), p1, p2);
Tensor p1_data(DT_FLOAT, TensorShape({}));
Tensor p2_data(DT_FLOAT, TensorShape({}));
p1_data.scalar<float>()() = 1.0f;
p2_data.scalar<float>()() = 2.0f;
std::vector<std::pair<string, Tensor>> inputs = {{"p1:0", p1_data},
{"p2:0", p2_data}};
GraphRunner graph_runner(Env::Default());
std::vector<Tensor> outputs;
Status s =
graph_runner.Run(root.graph(), nullptr, inputs, {"add:0"}, &outputs);
TF_ASSERT_OK(s);
test::ExpectEqual(test::AsScalar(3.0f), outputs[0]);
}
}
} |
1,590 | cpp | tensorflow/tensorflow | partitioning_utils | tensorflow/core/common_runtime/partitioning_utils.cc | tensorflow/core/common_runtime/partitioning_utils_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PARTITIONING_UTILS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_PARTITIONING_UTILS_H_
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
Status PartitionFunctionGraph(
const DeviceSet& device_set, std::unique_ptr<Graph> graph,
std::unordered_map<string, std::unique_ptr<Graph>>* subgraphs,
std::function<string(const Edge*)> get_tensor_name_attr = nullptr);
absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
const DeviceSet& device_set, std::unique_ptr<Graph> graph);
Status UpdateArgAndRetvalMetadata(
Graph* graph, std::vector<FunctionArgIndex>* arg_indices,
std::vector<int>* ret_indices,
std::vector<AllocatorAttributes>* arg_alloc_attrs,
std::vector<AllocatorAttributes>* ret_alloc_attrs, bool ints_on_device);
class FunctionNameGenerator {
public:
FunctionNameGenerator(const FunctionLibraryDefinition* flib_def,
const string& name)
: flib_def_(flib_def), name_(name), counter_(0) {}
string GetName();
private:
const FunctionLibraryDefinition* flib_def_;
const string name_;
uint32 counter_;
};
}
#endif
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_partition.h"
namespace tensorflow {
namespace {
Status PartitionFunctionGraph(
const DeviceSet& device_set, Graph* graph,
std::unordered_map<string, GraphDef>* partitions,
std::function<string(const Node*)> node_to_loc,
std::function<string(const Edge*)> get_tensor_name_attr) {
PartitionOptions partition_options;
if (node_to_loc != nullptr) {
partition_options.node_to_loc = node_to_loc;
} else {
partition_options.node_to_loc = [](const Node* node) {
return node->assigned_device_name();
};
}
int64_t edge_name_counter = 0;
partition_options.new_name = [&edge_name_counter](const string& prefix) {
return strings::StrCat(prefix, "/_", ++edge_name_counter);
};
partition_options.get_incarnation =
[&device_set](const string& name) -> int64 {
const Device* d = device_set.FindDeviceByName(name);
if (d == nullptr) {
return PartitionOptions::kIllegalIncarnation;
} else {
return d->attributes().incarnation();
}
};
partition_options.control_flow_added = false;
partition_options.get_tensor_name_attr = get_tensor_name_attr;
partition_options.can_make_destructive_changes = true;
return Partition(partition_options, graph, partitions);
}
struct SendRecvPair {
Node* send_node = nullptr;
Node* recv_node = nullptr;
};
constexpr char kTensorNameAttr[] = "tensor_name";
Status MakeSendRecvDependencyExplicit(Graph* graph) {
absl::flat_hash_map<std::string, SendRecvPair> send_recv_pairs;
for (Node* node : graph->op_nodes()) {
if (node->IsSend() || node->IsRecv()) {
auto tensor_name_it = node->def().attr().find(kTensorNameAttr);
if (tensor_name_it == node->def().attr().end()) {
return errors::Internal(
"'", kTensorNameAttr,
"' attribute is not found from node: ", node->DebugString());
}
if (node->IsSend()) {
send_recv_pairs[tensor_name_it->second.s()].send_node = node;
} else {
send_recv_pairs[tensor_name_it->second.s()].recv_node = node;
}
}
}
for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {
if (send_recv_pair.send_node == nullptr ||
send_recv_pair.recv_node == nullptr) {
return errors::Internal(
"No matching Send/Recv nodes found for tensor_name = ", tensor_name);
}
graph->AddControlEdge(send_recv_pair.send_node, send_recv_pair.recv_node);
}
return absl::OkStatus();
}
}
Status PartitionFunctionGraph(
const DeviceSet& device_set, std::unique_ptr<Graph> graph,
std::unordered_map<string, std::unique_ptr<Graph>>* subgraphs,
std::function<string(const Edge*)> get_tensor_name_attr) {
std::unordered_map<string, GraphDef> partitions;
TF_RETURN_IF_ERROR(
PartitionFunctionGraph(device_set, graph.get(), &partitions,
nullptr, get_tensor_name_attr));
const OpRegistryInterface* default_registry =
graph->flib_def().default_registry();
graph.reset();
for (auto& partition : partitions) {
const string& device = partition.first;
GraphDef& graph_def = partition.second;
auto subgraph = std::make_unique<Graph>(default_registry);
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph(opts, std::move(graph_def), subgraph.get()));
subgraphs->emplace(device, std::move(subgraph));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
const DeviceSet& device_set, std::unique_ptr<Graph> graph) {
auto node_to_loc = [](const Node* node) {
return node->assigned_device_name();
};
bool has_multiple_devices = false;
absl::optional<std::string> location;
for (const Node* node : graph->op_nodes()) {
if (location) {
if (*location != node_to_loc(node)) {
has_multiple_devices = true;
break;
}
} else {
location = node_to_loc(node);
}
}
if (!has_multiple_devices) {
return graph;
}
auto new_graph = std::make_unique<Graph>(graph->flib_def());
std::unordered_map<string, GraphDef> partitions;
TF_RETURN_IF_ERROR(PartitionFunctionGraph(device_set, graph.get(),
&partitions, node_to_loc,
nullptr));
GraphDef merged_graph_def;
if (!partitions.empty()) {
auto iter = partitions.begin();
merged_graph_def = std::move(iter->second);
while (++iter != partitions.end()) {
merged_graph_def.MergeFrom(iter->second);
}
}
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, std::move(merged_graph_def),
new_graph.get()));
TF_RETURN_IF_ERROR(MakeSendRecvDependencyExplicit(new_graph.get()));
return std::move(new_graph);
}
Status UpdateArgAndRetvalMetadata(
Graph* graph, std::vector<FunctionArgIndex>* arg_indices,
std::vector<int>* ret_indices,
std::vector<AllocatorAttributes>* arg_alloc_attrs,
std::vector<AllocatorAttributes>* ret_alloc_attrs, bool ints_on_device) {
std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes;
std::vector<std::pair<Node*, int>> ret_nodes;
const AttrValue* attr_value;
for (Node* node : graph->op_nodes()) {
if (node->IsArg()) {
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
int sub_index = -1;
if (node->attrs().Find("sub_index", &attr_value).ok()) {
sub_index = static_cast<int>(attr_value->i());
}
arg_nodes.emplace_back(node, FunctionArgIndex(index, sub_index));
} else if (node->IsRetval()) {
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
ret_nodes.emplace_back(node, index);
}
}
auto arg_comparator = [](std::pair<Node*, FunctionArgIndex> a,
std::pair<Node*, FunctionArgIndex> b) {
return std::tie(a.second.index, a.second.sub_index) <
std::tie(b.second.index, b.second.sub_index);
};
std::sort(arg_nodes.begin(), arg_nodes.end(), arg_comparator);
auto ret_comparator = [](std::pair<Node*, int> a, std::pair<Node*, int> b) {
return a.second < b.second;
};
std::sort(ret_nodes.begin(), ret_nodes.end(), ret_comparator);
arg_indices->reserve(arg_nodes.size());
for (const auto& pair : arg_nodes) arg_indices->push_back(pair.second);
ret_indices->reserve(ret_nodes.size());
for (const auto& pair : ret_nodes) ret_indices->push_back(pair.second);
for (int i = 0; i < arg_nodes.size(); ++i) {
Node* arg = arg_nodes[i].first;
arg->AddAttr("index", i);
}
if (arg_alloc_attrs != nullptr) {
TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForArgs(
arg_nodes, ints_on_device, *arg_alloc_attrs));
}
for (int i = 0; i < ret_nodes.size(); ++i) {
Node* ret = ret_nodes[i].first;
ret->AddAttr("index", i);
}
if (ret_alloc_attrs) {
TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForRets(
ret_nodes, ints_on_device, *ret_alloc_attrs));
}
return absl::OkStatus();
}
string FunctionNameGenerator::GetName() {
while (true) {
const string candidate = strings::StrCat(name_, "_", counter_++);
if (flib_def_->Find(candidate) == nullptr) {
return candidate;
}
}
}
} | #include "tensorflow/core/common_runtime/partitioning_utils.h"
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
using ::testing::SizeIs;
class PartitioningUtilsTest : public ::testing::Test {
public:
void SetUp() override {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 2});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0",
&devices));
device0_ = devices[0].get();
device1_ = devices[1].get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
for (auto d : device_mgr_->ListDevices()) {
device_set_.AddDevice(d);
}
}
void SwapGraph(Graph* graph, bool assign_device = false) {
Scope s = Scope::NewRootScope();
if (assign_device) {
s = s.WithDevice(device0_->name());
}
auto x = ops::_Arg(s.WithOpName("x"), DT_FLOAT, 0);
auto y = ops::_Arg(s.WithOpName("y"), DT_FLOAT, 1);
auto id_x = ops::Identity(s.WithOpName("id_x"), x);
auto id_y = ops::Identity(s.WithOpName("id_y"), y);
auto dx_retval = ops::_Retval(s.WithOpName("retval1"), id_y, 0);
auto dy_retval = ops::_Retval(s.WithOpName("retval2"), id_x, 1);
TF_ASSERT_OK(s.ToGraph(graph));
if (assign_device) {
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
}
void TwoDeviceSwapGraph(Graph* graph) {
Scope s = Scope::NewRootScope();
Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0");
Scope s2 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:1");
auto x = ops::_Arg(s1.WithOpName("x"), DT_FLOAT, 0);
auto y = ops::_Arg(s2.WithOpName("y"), DT_FLOAT, 1);
auto id_x = ops::Identity(s1.WithOpName("id_x"), x);
auto id_y = ops::Identity(s2.WithOpName("id_y"), y);
auto dx_retval = ops::_Retval(s2.WithOpName("retval1"), id_y, 0);
auto dy_retval = ops::_Retval(s1.WithOpName("retval2"), id_x, 1);
TF_ASSERT_OK(s.ToGraph(graph));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
void SubGraph(Graph* subgraph, DataType dtype,
absl::Span<const int> arg_indices,
absl::Span<const int> ret_indices) {
Scope s = Scope::NewRootScope();
Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0");
CHECK_EQ(arg_indices.size(), ret_indices.size());
for (size_t i = 0; i < arg_indices.size(); ++i) {
auto x = ops::_Arg(s1.WithOpName("x"), dtype, arg_indices[i]);
auto id_x = ops::Identity(s1.WithOpName("id_x"), x);
auto dx_retval =
ops::_Retval(s1.WithOpName("retval1"), id_x, ret_indices[i]);
}
TF_ASSERT_OK(s.ToGraph(subgraph));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(subgraph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
std::unique_ptr<DeviceMgr> device_mgr_;
Device* device0_ = nullptr;
Device* device1_ = nullptr;
DeviceSet device_set_;
};
TEST_F(PartitioningUtilsTest, GraphWithoutAssignedDevicesFails) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
SwapGraph(graph.get());
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << status.ToString();
}
TEST_F(PartitioningUtilsTest, OneDevice) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
SwapGraph(graph.get(), true);
int num_nodes = graph->num_op_nodes();
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(1, subgraphs.size());
const auto& pair = *subgraphs.begin();
ASSERT_EQ("/job:a/replica:0/task:0/device:CPU:0", pair.first);
ASSERT_EQ(num_nodes, pair.second->num_op_nodes());
}
TEST_F(PartitioningUtilsTest, TwoDevices) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
TwoDeviceSwapGraph(graph.get());
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(2, subgraphs.size());
const auto& part1 = subgraphs["/job:a/replica:0/task:0/device:CPU:0"];
ASSERT_EQ(3, part1->num_op_nodes());
const auto& part2 = subgraphs["/job:a/replica:0/task:0/device:CPU:1"];
ASSERT_EQ(3, part2->num_op_nodes());
}
TEST_F(PartitioningUtilsTest, InsertTransferOpsWithOneDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice(device0_->name());
auto x = ops::_Arg(scope.WithOpName("x"), DT_FLOAT, 0);
auto id_x = ops::Identity(scope.WithOpName("id_x"), x);
auto ret_x = ops::_Retval(scope.WithOpName("ret_x"), id_x, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
EXPECT_EQ(graph->num_op_nodes(), 3);
int send_count = 0, recv_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
ASSERT_EQ(send_count, 0);
ASSERT_EQ(recv_count, 0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph,
InsertTransferOps(device_set_, std::move(graph)));
EXPECT_EQ(new_graph->num_op_nodes(), 3);
send_count = recv_count = 0;
for (const auto* op : new_graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
EXPECT_EQ(send_count, 0);
EXPECT_EQ(recv_count, 0);
}
TEST_F(PartitioningUtilsTest, InsertTransferOpsWithTwoDevices) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope();
Scope scope1 = scope.WithDevice(device0_->name());
Scope scope2 = scope.WithDevice(device1_->name());
auto x = ops::_Arg(scope1.WithOpName("x"), DT_FLOAT, 0);
auto id_x = ops::Identity(scope2.WithOpName("id_x"), x);
auto ret_x = ops::_Retval(scope1.WithOpName("ret_x"), id_x, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
EXPECT_EQ(graph->num_op_nodes(), 3);
int send_count = 0, recv_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
ASSERT_EQ(send_count, 0);
ASSERT_EQ(recv_count, 0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph,
InsertTransferOps(device_set_, std::move(graph)));
EXPECT_EQ(new_graph->num_op_nodes(), 7);
send_count = recv_count = 0;
auto get_tensor_name_attr = [](const Node* node) -> std::string {
auto tensor_name_it = node->def().attr().find("tensor_name");
return tensor_name_it->second.s();
};
absl::flat_hash_map<std::string, std::pair<Node*, Node*>> send_recv_pairs;
for (auto* op : new_graph->op_nodes()) {
if (op->IsSend()) {
++send_count;
send_recv_pairs[get_tensor_name_attr(op)].first = op;
} else if (op->IsRecv()) {
++recv_count;
send_recv_pairs[get_tensor_name_attr(op)].second = op;
}
}
EXPECT_EQ(send_count, 2);
EXPECT_EQ(recv_count, 2);
for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {
ASSERT_TRUE(send_recv_pair.first != nullptr &&
send_recv_pair.second != nullptr);
std::vector<const Edge*> out_edges(
send_recv_pair.first->out_edges().begin(),
send_recv_pair.first->out_edges().end());
ASSERT_THAT(out_edges, SizeIs(2));
for (const Edge* out_edge : out_edges) {
if (out_edge->dst() != new_graph->sink_node()) {
EXPECT_TRUE(out_edge->IsControlEdge());
EXPECT_EQ(out_edge->dst(), send_recv_pair.second);
}
}
}
}
void CheckRetIndices(const std::vector<int>& expected,
const std::vector<int>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], actual[i]) << " at index " << i;
}
}
void CheckArgIndices(const std::vector<FunctionArgIndex>& expected,
const std::vector<FunctionArgIndex>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i].index, actual[i].index) << " at index " << i;
ASSERT_EQ(expected[i].sub_index, actual[i].sub_index) << " at index " << i;
}
}
void CheckAlloc(const std::vector<bool>& expected,
const std::vector<AllocatorAttributes>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], actual[i].on_host()) << " at index " << i;
}
}
void CheckIndex(const Node& node, int expected_index) {
const AttrValue* attr_value;
TF_ASSERT_OK(node.attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
ASSERT_EQ(expected_index, index);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRets) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_FLOAT, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckArgIndices({{3, -1}}, arg_indices);
CheckRetIndices({5}, ret_indices);
CheckAlloc({false}, arg_alloc_attrs);
CheckAlloc({false}, ret_alloc_attrs);
std::unordered_map<string, Node*> nodes = graph->BuildNodeNameIndex();
ASSERT_EQ(1, nodes.count("x"));
CheckIndex(*nodes["x"], 0);
ASSERT_EQ(1, nodes.count("retval1"));
CheckIndex(*nodes["retval1"], 0);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsNotOnDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_INT32, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Int32FulltypePass int32_fulltype;
TF_ASSERT_OK(
int32_fulltype.ProcessGraph(graph.get(), false));
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckAlloc({true}, arg_alloc_attrs);
CheckAlloc({true}, ret_alloc_attrs);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsOnDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_INT32, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, true);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckAlloc({false}, arg_alloc_attrs);
CheckAlloc({false}, ret_alloc_attrs);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRets_Order) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_FLOAT, {9, 7, 5, 3, 1}, {2, 4, 6, 8, 10});
const std::map<int, int> sub_indices = {
{7, 2}, {3, 1}, {1, 0}, {5, 2}, {9, 0}};
const AttrValue* attr_value;
for (Node* n : graph->op_nodes()) {
if (n->IsArg()) {
TF_ASSERT_OK(n->attrs().Find("index", &attr_value));
n->AddAttr("sub_index",
sub_indices.at(static_cast<int>(attr_value->i())));
}
}
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckArgIndices({{1, 0}, {3, 1}, {5, 2}, {7, 2}, {9, 0}}, arg_indices);
CheckRetIndices({2, 4, 6, 8, 10}, ret_indices);
CheckAlloc({false, false, false, false, false}, arg_alloc_attrs);
CheckAlloc({false, false, false, false, false}, ret_alloc_attrs);
}
}
} |
1,591 | cpp | tensorflow/tensorflow | device_set | tensorflow/core/common_runtime/device_set.cc | tensorflow/core/common_runtime/device_set_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_SET_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_SET_H_
#include <memory>
#include <unordered_map>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
typedef std::vector<std::pair<Device*, int32>> PrioritizedDeviceVector;
class DeviceSet {
public:
DeviceSet();
~DeviceSet();
void AddDevice(Device* device) TF_LOCKS_EXCLUDED(devices_mu_);
void set_client_device(Device* device) {
DCHECK(client_device_ == nullptr);
client_device_ = device;
}
Device* client_device() const { return client_device_; }
const std::vector<Device*>& devices() const { return devices_; }
void FindMatchingDevices(const DeviceNameUtils::ParsedName& spec,
std::vector<Device*>* devices) const;
Device* FindDeviceByName(const string& fullname) const;
std::vector<DeviceType> PrioritizedDeviceTypeList() const;
const PrioritizedDeviceVector& prioritized_devices() const
TF_LOCKS_EXCLUDED(devices_mu_);
const PrioritizedDeviceTypeVector& prioritized_device_types() const
TF_LOCKS_EXCLUDED(devices_mu_);
static int DeviceTypeOrder(const DeviceType& d);
static void SortPrioritizedDeviceVector(PrioritizedDeviceVector* vector);
static void SortPrioritizedDeviceTypeVector(
PrioritizedDeviceTypeVector* vector);
private:
mutable mutex devices_mu_;
mutable absl::flat_hash_map<DeviceNameUtils::ParsedName, std::vector<Device*>>
matching_device_cache_;
std::vector<Device*> devices_;
mutable PrioritizedDeviceVector prioritized_devices_
TF_GUARDED_BY(devices_mu_);
mutable PrioritizedDeviceTypeVector prioritized_device_types_
TF_GUARDED_BY(devices_mu_);
std::unordered_map<string, Device*> device_by_name_;
Device* client_device_ = nullptr;
DeviceSet(const DeviceSet&) = delete;
void operator=(const DeviceSet&) = delete;
};
}
#endif
#include "tensorflow/core/common_runtime/device_set.h"
#include <set>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
DeviceSet::DeviceSet() = default;
DeviceSet::~DeviceSet() = default;
void DeviceSet::AddDevice(Device* device) {
mutex_lock l(devices_mu_);
devices_.push_back(device);
prioritized_devices_.clear();
prioritized_device_types_.clear();
for (const string& name :
DeviceNameUtils::GetNamesForDeviceMappings(device->parsed_name())) {
device_by_name_.insert({name, device});
}
matching_device_cache_.clear();
}
void DeviceSet::FindMatchingDevices(const DeviceNameUtils::ParsedName& spec,
std::vector<Device*>* devices) const {
{
mutex_lock l(devices_mu_);
auto match = matching_device_cache_.find(spec);
if (match != matching_device_cache_.end()) {
*devices = match->second;
}
}
devices->clear();
for (Device* d : devices_) {
if (DeviceNameUtils::IsCompleteSpecification(spec, d->parsed_name())) {
devices->push_back(d);
}
}
mutex_lock l(devices_mu_);
matching_device_cache_.insert({spec, *devices});
}
Device* DeviceSet::FindDeviceByName(const string& name) const {
return gtl::FindPtrOrNull(device_by_name_, name);
}
int DeviceSet::DeviceTypeOrder(const DeviceType& d) {
return DeviceFactory::DevicePriority(d.type_string());
}
static bool DeviceTypeComparator(const DeviceType& a, const DeviceType& b) {
auto a_priority = DeviceSet::DeviceTypeOrder(a);
auto b_priority = DeviceSet::DeviceTypeOrder(b);
if (a_priority != b_priority) {
return a_priority > b_priority;
}
return StringPiece(a.type()) < StringPiece(b.type());
}
std::vector<DeviceType> DeviceSet::PrioritizedDeviceTypeList() const {
std::vector<DeviceType> result;
std::set<string> seen;
for (Device* d : devices_) {
const auto& t = d->device_type();
if (seen.insert(t).second) {
result.emplace_back(t);
}
}
std::sort(result.begin(), result.end(), DeviceTypeComparator);
return result;
}
void DeviceSet::SortPrioritizedDeviceTypeVector(
PrioritizedDeviceTypeVector* vector) {
if (vector == nullptr) return;
auto device_sort = [](const PrioritizedDeviceTypeVector::value_type& a,
const PrioritizedDeviceTypeVector::value_type& b) {
if (a.second != b.second) {
return a.second > b.second;
}
return DeviceTypeComparator(a.first, b.first);
};
std::sort(vector->begin(), vector->end(), device_sort);
}
void DeviceSet::SortPrioritizedDeviceVector(PrioritizedDeviceVector* vector) {
auto device_sort = [](const std::pair<Device*, int32>& a,
const std::pair<Device*, int32>& b) {
if (a.second != b.second) {
return a.second > b.second;
}
const string& a_type_name = a.first->device_type();
const string& b_type_name = b.first->device_type();
if (a_type_name != b_type_name) {
auto a_priority = DeviceFactory::DevicePriority(a_type_name);
auto b_priority = DeviceFactory::DevicePriority(b_type_name);
if (a_priority != b_priority) {
return a_priority > b_priority;
}
}
if (a.first->IsLocal() != b.first->IsLocal()) {
return a.first->IsLocal();
}
return StringPiece(a.first->name()) < StringPiece(b.first->name());
};
std::sort(vector->begin(), vector->end(), device_sort);
}
namespace {
void UpdatePrioritizedVectors(
const std::vector<Device*>& devices,
PrioritizedDeviceVector* prioritized_devices,
PrioritizedDeviceTypeVector* prioritized_device_types) {
if (prioritized_devices->size() != devices.size()) {
for (Device* d : devices) {
prioritized_devices->emplace_back(
d, DeviceSet::DeviceTypeOrder(DeviceType(d->device_type())));
}
DeviceSet::SortPrioritizedDeviceVector(prioritized_devices);
}
if (prioritized_device_types != nullptr &&
prioritized_device_types->size() != devices.size()) {
std::set<DeviceType> seen;
for (const std::pair<Device*, int32>& p : *prioritized_devices) {
DeviceType t(p.first->device_type());
if (seen.insert(t).second) {
prioritized_device_types->emplace_back(t, p.second);
}
}
}
}
}
const PrioritizedDeviceVector& DeviceSet::prioritized_devices() const {
mutex_lock l(devices_mu_);
UpdatePrioritizedVectors(devices_, &prioritized_devices_,
nullptr);
return prioritized_devices_;
}
const PrioritizedDeviceTypeVector& DeviceSet::prioritized_device_types() const {
mutex_lock l(devices_mu_);
UpdatePrioritizedVectors(devices_, &prioritized_devices_,
&prioritized_device_types_);
return prioritized_device_types_;
}
} | #include "tensorflow/core/common_runtime/device_set.h"
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static Device* Dev(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
class DeviceSetTest : public ::testing::Test {
public:
Device* AddDevice(const char* type, const char* name) {
Device* d = Dev(type, name);
owned_.emplace_back(d);
devices_.AddDevice(d);
return d;
}
const DeviceSet& device_set() const { return devices_; }
std::vector<DeviceType> types() const {
return devices_.PrioritizedDeviceTypeList();
}
private:
DeviceSet devices_;
std::vector<std::unique_ptr<Device>> owned_;
};
class DummyFactory : public DeviceFactory {
public:
Status ListPhysicalDevices(std::vector<string>* devices) override {
return absl::OkStatus();
}
Status CreateDevices(const SessionOptions& options, const string& name_prefix,
std::vector<std::unique_ptr<Device>>* devices) override {
return absl::OkStatus();
}
};
REGISTER_LOCAL_DEVICE_FACTORY("d1", DummyFactory);
REGISTER_LOCAL_DEVICE_FACTORY("d2", DummyFactory, 51);
REGISTER_LOCAL_DEVICE_FACTORY("d3", DummyFactory, 49);
TEST_F(DeviceSetTest, PrioritizedDeviceTypeList) {
EXPECT_EQ(50, DeviceSet::DeviceTypeOrder(DeviceType("d1")));
EXPECT_EQ(51, DeviceSet::DeviceTypeOrder(DeviceType("d2")));
EXPECT_EQ(49, DeviceSet::DeviceTypeOrder(DeviceType("d3")));
EXPECT_EQ(std::vector<DeviceType>{}, types());
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
EXPECT_EQ(std::vector<DeviceType>{DeviceType("d1")}, types());
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:1");
EXPECT_EQ(std::vector<DeviceType>{DeviceType("d1")}, types());
AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ((std::vector<DeviceType>{DeviceType("d2"), DeviceType("d1")}),
types());
AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ((std::vector<DeviceType>{
DeviceType("d2"),
DeviceType("d1"),
DeviceType("d3"),
}),
types());
}
TEST_F(DeviceSetTest, prioritized_devices) {
Device* d1 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
Device* d2 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ(device_set().prioritized_devices(),
(PrioritizedDeviceVector{std::make_pair(d2, 51),
std::make_pair(d1, 50)}));
Device* d3 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ(
device_set().prioritized_devices(),
(PrioritizedDeviceVector{std::make_pair(d2, 51), std::make_pair(d1, 50),
std::make_pair(d3, 49)}));
}
TEST_F(DeviceSetTest, prioritized_device_types) {
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ(
device_set().prioritized_device_types(),
(PrioritizedDeviceTypeVector{std::make_pair(DeviceType("d2"), 51),
std::make_pair(DeviceType("d1"), 50)}));
AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ(
device_set().prioritized_device_types(),
(PrioritizedDeviceTypeVector{std::make_pair(DeviceType("d2"), 51),
std::make_pair(DeviceType("d1"), 50),
std::make_pair(DeviceType("d3"), 49)}));
}
TEST_F(DeviceSetTest, SortPrioritizedDeviceVector) {
Device* d1_0 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
Device* d2_0 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
Device* d3_0 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
Device* d1_1 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:1");
Device* d2_1 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:1");
Device* d3_1 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:1");
PrioritizedDeviceVector sorted{
std::make_pair(d3_1, 30), std::make_pair(d1_0, 10),
std::make_pair(d2_0, 20), std::make_pair(d3_0, 30),
std::make_pair(d1_1, 20), std::make_pair(d2_1, 10)};
device_set().SortPrioritizedDeviceVector(&sorted);
EXPECT_EQ(sorted, (PrioritizedDeviceVector{
std::make_pair(d3_0, 30), std::make_pair(d3_1, 30),
std::make_pair(d2_0, 20), std::make_pair(d1_1, 20),
std::make_pair(d2_1, 10), std::make_pair(d1_0, 10)}));
}
TEST_F(DeviceSetTest, SortPrioritizedDeviceTypeVector) {
PrioritizedDeviceTypeVector sorted{std::make_pair(DeviceType("d3"), 20),
std::make_pair(DeviceType("d1"), 20),
std::make_pair(DeviceType("d2"), 30)};
device_set().SortPrioritizedDeviceTypeVector(&sorted);
EXPECT_EQ(sorted, (PrioritizedDeviceTypeVector{
std::make_pair(DeviceType("d2"), 30),
std::make_pair(DeviceType("d1"), 20),
std::make_pair(DeviceType("d3"), 20)}));
}
}
} |
1,592 | cpp | tensorflow/tensorflow | graph_constructor | tensorflow/core/common_runtime/graph_constructor.cc | tensorflow/core/common_runtime/graph_constructor_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_CONSTRUCTOR_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_CONSTRUCTOR_H_
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class ShapeRefiner;
struct GraphConstructorOptions {
GraphConstructorOptions() = default;
bool allow_internal_ops = false;
bool expect_device_spec = false;
bool validate_nodes = false;
bool add_default_attributes = true;
};
extern Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
const GraphDef& gdef, Graph* g);
extern Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
GraphDef&& gdef, Graph* g);
extern Status ConvertNodeDefsToGraph(
const GraphConstructorOptions& opts, absl::Span<const NodeDef> nodes,
Graph* g, const GraphDebugInfo* debug_info = nullptr);
struct ImportGraphDefOptions {
ImportGraphDefOptions()
: uniquify_names(false),
uniquify_prefix(false),
skip_mapped_nodes(false),
validate_shape(true),
propagate_device_spec(false) {}
string prefix;
bool uniquify_names;
bool uniquify_prefix;
std::map<SafeTensorId, SafeTensorId> input_map;
bool skip_mapped_nodes;
std::vector<string> control_dependencies;
std::vector<SafeTensorId> return_tensors;
std::vector<string> return_nodes;
bool validate_colocation_constraints = true;
bool validate_shape;
string default_device;
bool propagate_device_spec;
};
struct ImportGraphDefResults {
typedef int Index;
std::vector<std::pair<Node*, Index>> return_tensors;
std::vector<Node*> return_nodes;
std::vector<SafeTensorId> missing_unused_input_map_keys;
};
extern Status ImportGraphDef(const ImportGraphDefOptions& opts,
const GraphDef& gdef, Graph* g,
ShapeRefiner* refiner,
ImportGraphDefResults* results = nullptr);
extern void CopyGraph(const Graph& src, Graph* dest);
}
#endif
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
static constexpr const bool kDoNotCheckDuplicates = true;
inline bool IsMerge(const NodeDef& node_def) {
return node_def.op() == "Merge" || node_def.op() == "RefMerge" ||
node_def.op() == "_XlaMerge";
}
inline bool IsNextIteration(const NodeDef& node_def) {
return node_def.op() == "NextIteration" ||
node_def.op() == "RefNextIteration";
}
bool IsValidNodeName(StringPiece s, bool allow_internal_ops) {
using ::tensorflow::strings::Scanner;
Scanner scanner(s);
scanner
.One(allow_internal_ops ? Scanner::LETTER_DIGIT_DOT_UNDERSCORE
: Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scanner.GetResult())
return false;
if (scanner.empty())
return true;
scanner.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
class GraphConstructor {
public:
struct Options {
Options(const GraphConstructorOptions& in)
: allow_internal_ops(in.allow_internal_ops),
expect_device_spec(in.expect_device_spec),
propagate_device_spec(false),
uniquify_names(false),
uniquify_prefix(false),
skip_mapped_nodes(false),
importing(false),
validate_nodes(in.validate_nodes),
validate_colocation_constraints(false),
add_default_attributes(in.add_default_attributes) {}
Options(const ImportGraphDefOptions& in)
: allow_internal_ops(false),
expect_device_spec(false),
propagate_device_spec(in.propagate_device_spec),
prefix(in.prefix.empty() || str_util::EndsWith(in.prefix, "/")
? in.prefix
: in.prefix + "/"),
uniquify_names(in.uniquify_names),
uniquify_prefix(in.uniquify_prefix),
input_map(in.input_map.begin(), in.input_map.end()),
skip_mapped_nodes(in.skip_mapped_nodes),
control_dependencies(in.control_dependencies),
return_tensors(in.return_tensors.begin(), in.return_tensors.end()),
return_nodes(in.return_nodes),
importing(true),
validate_nodes(true),
validate_colocation_constraints(in.validate_colocation_constraints),
validate_shape(in.validate_shape),
default_device(in.default_device) {}
bool allow_internal_ops;
bool expect_device_spec;
bool propagate_device_spec;
string prefix;
bool uniquify_names;
bool uniquify_prefix;
std::map<TensorId, TensorId> input_map;
bool skip_mapped_nodes;
std::vector<string> control_dependencies;
std::vector<TensorId> return_tensors;
std::vector<string> return_nodes;
bool importing;
bool validate_nodes;
bool validate_colocation_constraints;
bool validate_shape = true;
bool add_default_attributes = true;
string default_device;
};
typedef absl::Span<const NodeDef* const> NodeDefSlice;
static Status Construct(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys);
static Status Construct(
const Options& opts, GraphDef&& graph_def, Graph* g,
ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys);
protected:
GraphConstructor(const Options& opts, Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: opts_(opts),
g_(g),
original_versions_(g->versions()),
prefix_(opts.prefix),
refiner_(refiner),
return_tensors_(return_tensors),
return_nodes_(return_nodes),
missing_unused_input_map_keys_(missing_unused_input_map_keys) {}
virtual ~GraphConstructor() {}
Status TryImport() {
TF_RETURN_IF_ERROR(EnsureNoNameCollisions());
TF_RETURN_IF_ERROR(ValidateInputMapAndControlDependencies());
TF_RETURN_IF_ERROR(BuildNodeIndex());
TF_RETURN_IF_ERROR(InitFromEdges());
TF_RETURN_IF_ERROR(Convert());
TF_RETURN_IF_ERROR(AddBackEdges());
TF_RETURN_IF_ERROR(UpdateVersionDef());
TF_RETURN_IF_ERROR(PopulateReturnTensors());
TF_RETURN_IF_ERROR(PopulateReturnNodes());
TF_RETURN_IF_ERROR(PopulateMissingUnusedInputMapKeys());
UpdateUniquifiedColocationNames();
FixupSourceAndSinkEdges(g_);
return absl::OkStatus();
}
private:
Status EnsureNoNameCollisions();
Status ValidateInputMapAndControlDependencies();
Status BuildNodeIndex();
Status InitFromEdges();
Status Convert();
Status AddBackEdges();
Status UpdateVersionDef();
Status PopulateReturnTensors();
Status PopulateReturnNodes();
Status PopulateMissingUnusedInputMapKeys();
FunctionDefLibraryStackTraces CreateStackTracesForFunctionDefLibrary(
const FunctionDefLibrary& library) const;
void Undo();
void PrintCycles();
void DFS(int cur_node, std::vector<int>* cur_branch,
std::vector<bool>* is_on_cur_branch,
absl::flat_hash_set<int>* unvisited,
const std::vector<absl::string_view>& node_names);
Status IsNodeFullyMapped(const NodeDef& node_def, bool* is_node_mapped);
Status ValidateColocationConstraints(const NodeDef& node_def);
Status MakeNode(NodeDef&& node_def, Node** node);
Status MakeEdge(Node* src, int output_index, Node* dst, int input_index);
Status ValidateShape(Node* node);
Status ModifyNodeDefForImport(NodeDef* node_def);
void RemapNodeDefInputs(NodeDef* node_def,
std::vector<bool>* input_already_exists);
void AddControlDependencies(NodeDef* node_def,
std::vector<bool>* input_already_exists);
void AddPrefixToNodeDef(const std::vector<bool>& input_already_exists,
NodeDef* node_def);
void UniquifyNames(const std::vector<bool>& input_already_exists,
NodeDef* node_def);
void UpdateUniquifiedColocationNames();
bool NameExistsInGraph(StringPiece name);
bool NameExistsInGraphDef(StringPiece name);
string FindUniqueName(StringPiece original_name);
void UpdatePendingCountAndReady(int processed, bool is_next_iteration);
virtual size_t node_def_count() const = 0;
virtual const NodeDef& get_node_def(int i) const = 0;
virtual NodeDef consume_node_def(int i) = 0;
virtual const VersionDef* versions() const = 0;
virtual std::optional<FunctionDefLibrary> consume_library() = 0;
virtual const GraphDebugInfo* debug_info() const = 0;
const Options opts_;
Graph* g_;
const VersionDef original_versions_;
string prefix_;
StackTracesMap traces_;
ShapeRefiner* refiner_;
std::vector<std::pair<Node*, int>>* return_tensors_;
std::vector<Node*>* return_nodes_;
std::vector<SafeTensorId>* missing_unused_input_map_keys_;
std::set<TensorId> used_input_map_keys_;
absl::flat_hash_set<int> merge_node_indices_;
struct NodeInfo {
explicit NodeInfo(int i) : gdef_index(i), node(nullptr) {}
NodeInfo() : NodeInfo(-1) {}
int gdef_index;
Node* node;
};
absl::flat_hash_map<std::string, NodeInfo> gdef_nodes_;
absl::flat_hash_set<StringPiece> gdef_prefixes_;
absl::flat_hash_map<StringPiece, Node*> existing_nodes_;
absl::flat_hash_set<StringPiece> existing_prefixes_;
gtl::FlatMap<string, string> uniquified_names_;
std::set<int> ready_;
std::vector<int> pending_count_;
std::vector<gtl::InlinedVector<int, 4>> outputs_;
struct InputInfo {
explicit InputInfo(const string& node_name, Node* n, int i)
: name(node_name), node(n), index(i) {}
string name;
Node* node;
int index;
static bool IsControlInput(const InputInfo& input) {
return input.index == Graph::kControlSlot;
}
static int CompareName(const InputInfo& lhs, const InputInfo& rhs) {
return lhs.name < rhs.name;
}
static bool IsSameName(const InputInfo& lhs, const InputInfo& rhs) {
return lhs.name == rhs.name;
}
};
struct EdgeInfo {
explicit EdgeInfo(const string& name, int i1, Node* n, int i2)
: src_name(name), src_index(i1), dst_node(n), dst_index(i2) {}
string src_name;
int src_index;
Node* dst_node;
int dst_index;
};
std::vector<EdgeInfo> back_edges_;
GraphConstructor(const GraphConstructor&) = delete;
void operator=(const GraphConstructor&) = delete;
};
class NodeDefCopyingGraphConstructor : public GraphConstructor {
public:
NodeDefCopyingGraphConstructor(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: GraphConstructor(opts, g, refiner, return_tensors, return_nodes,
missing_unused_input_map_keys),
node_defs_(node_defs),
versions_(versions),
library_(library),
debug_info_(debug_info) {}
private:
size_t node_def_count() const override { return node_defs_.size(); }
const NodeDef& get_node_def(int i) const override { return *node_defs_[i]; }
NodeDef consume_node_def(int i) override { return *node_defs_[i]; }
const VersionDef* versions() const override { return versions_; }
std::optional<FunctionDefLibrary> consume_library() override {
if (library_ == nullptr) {
return std::nullopt;
} else {
return *library_;
}
}
const GraphDebugInfo* debug_info() const override { return debug_info_; }
const NodeDefSlice node_defs_;
const VersionDef* const versions_;
const FunctionDefLibrary* const library_;
const GraphDebugInfo* const debug_info_;
};
class NodeDefMovingGraphConstructor : public GraphConstructor {
public:
NodeDefMovingGraphConstructor(
const Options& opts, GraphDef&& graph_def, Graph* g,
ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: GraphConstructor(opts, g, refiner, return_tensors, return_nodes,
missing_unused_input_map_keys),
graph_def_(std::move(graph_def)),
is_consumed_(graph_def_.node_size(), false) {}
private:
size_t node_def_count() const override { return graph_def_.node().size(); }
const NodeDef& get_node_def(int i) const override {
CHECK(!is_consumed_[i])
<< "NodeDef " << i << " accessed after it was consumed.";
return graph_def_.node(i);
}
NodeDef consume_node_def(int i) override {
CHECK(!is_consumed_[i]) << "NodeDef " << i << " consumed twice.";
is_consumed_[i] = true;
return std::move(*graph_def_.mutable_node(i));
}
const VersionDef* versions() const override { return &graph_def_.versions(); }
std::optional<FunctionDefLibrary> consume_library() override {
return std::move(*graph_def_.mutable_library());
}
const GraphDebugInfo* debug_info() const override {
return &graph_def_.debug_info();
}
GraphDef graph_def_;
std::vector<bool> is_consumed_;
};
bool ForwardCompatibilityWindowPassed(const VersionDef& versions) {
return (versions.producer() - TF_GRAPH_DEF_VERSION) > 21;
}
Status MaybeAppendVersionWarning(const VersionDef* versions,
const Status& import_status) {
if (versions && ForwardCompatibilityWindowPassed(*versions)) {
return Status(
import_status.code(),
absl::StrCat(
"Converting GraphDef to Graph has failed with an error: '",
import_status.message(),
"' The binary trying to import the GraphDef was built when "
"GraphDef version was ",
TF_GRAPH_DEF_VERSION,
". The GraphDef was produced by a binary built when GraphDef "
"version was ",
versions->producer(),
". The difference between these versions is larger than "
"TensorFlow's forward compatibility guarantee, and might be the "
"root cause for failing to import the GraphDef."));
}
return import_status;
}
Status GraphConstructor::Construct(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys) {
if (versions) {
TF_RETURN_IF_ERROR(CheckVersions(*versions, TF_GRAPH_DEF_VERSION,
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
"GraphDef", "graph"));
}
NodeDefCopyingGraphConstructor c(opts, node_defs, versions, library,
debug_info, g, refiner, return_tensors,
return_nodes, missing_unused_input_map_keys);
Status s = c.TryImport();
if (!s.ok()) {
c.Undo();
s = MaybeAppendVersionWarning(versions, s);
}
return s;
}
Status GraphConstructor::Construct(
const Options& opts, GraphDef&& graph_def, Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys) {
TF_RETURN_IF_ERROR(CheckVersions(graph_def.versions(), TF_GRAPH_DEF_VERSION,
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
"GraphDef", "graph"));
VersionDef version_def = graph_def.versions();
NodeDefMovingGraphConstructor c(opts, std::move(graph_def), g, refiner,
return_tensors, return_nodes,
missing_unused_input_map_keys);
Status s = c.TryImport();
if (!s.ok()) {
c.Undo();
s = MaybeAppendVersionWarning(&version_def, s);
}
return s;
}
void GraphConstructor::UpdatePendingCountAndReady(int processed,
bool is_next_iteration) {
for (size_t i = 0; i < outputs_[processed].size(); ++i) {
const int output = outputs_[processed][i];
bool is_next_iteration_to_merge_edge =
is_next_iteration && merge_node_indices_.count(output) == 1;
if (!is_next_iteration_to_merge_edge) {
int* current_pending_count = &pending_count_[output];
CHECK_GT(*current_pending_count, 0);
(*current_pending_count)--;
if (*current_pending_count == 0) {
ready_.insert(output);
}
}
}
}
bool NodeNameInValues(const std::map<TensorId, TensorId>& input_map,
const StringPiece& node_name) {
for (auto iter = input_map.begin(); iter != input_map.end(); ++iter) {
if (iter->second.first == node_name) return true;
}
return false;
}
bool NodeNameInValues(const std::vector<string>& control_dependencies,
const StringPiece& node_name) {
return std::find(control_d | #include "tensorflow/core/common_runtime/graph_constructor.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class GraphConstructorTest : public ::testing::Test {
protected:
GraphConstructorTest() : graph_(OpRegistry::Global()) {}
void Convert(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef_));
}
void ExpectError(const string& gdef_ascii,
const std::vector<string>& expected_error_strs,
string not_expected_error_str = "") {
const string original_graph_description = GraphDebugString();
Convert(gdef_ascii);
GraphConstructorOptions opts;
Status status = ConvertGraphDefToGraph(opts, gdef_, &graph_);
EXPECT_FALSE(status.ok());
for (const string& error : expected_error_strs) {
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Expected to find '" << error << "' in " << status;
}
if (!not_expected_error_str.empty()) {
EXPECT_TRUE(!absl::StrContains(status.message(), not_expected_error_str))
<< "Expected not to find '" << not_expected_error_str << "' in "
<< status;
}
EXPECT_EQ(original_graph_description, GraphDebugString());
}
void ExpectError(const string& gdef_ascii, const ImportGraphDefOptions& opts,
const std::vector<string>& expected_error_strs,
ShapeRefiner* refiner = nullptr,
ImportGraphDefResults* results = nullptr) {
const string original_graph_description = GraphDebugString();
Convert(gdef_ascii);
Status status = ImportGraphDef(opts, gdef_, &graph_, refiner, results);
EXPECT_FALSE(status.ok());
for (const string& error : expected_error_strs) {
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Expected to find '" << error << "' in " << status;
}
EXPECT_EQ(original_graph_description, GraphDebugString());
}
void ExpectOK(const string& gdef_ascii) {
Convert(gdef_ascii);
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef_, &graph_));
}
void ExpectOK(const string& gdef_ascii, const ImportGraphDefOptions& opts,
ShapeRefiner* refiner = nullptr,
ImportGraphDefResults* results = nullptr) {
Convert(gdef_ascii);
Status s = ImportGraphDef(opts, gdef_, &graph_, refiner, results);
EXPECT_EQ(absl::OkStatus(), s) << s;
}
void ExpectVersions(int min_consumer, int producer) {
EXPECT_EQ(min_consumer, graph_.versions().min_consumer())
<< "Expected min consumer " << min_consumer << ", got "
<< graph_.versions().min_consumer();
EXPECT_EQ(producer, graph_.versions().producer())
<< "Expected producer " << producer << ", got "
<< graph_.versions().producer();
}
Node* FindNode(const string& name) {
for (Node* n : graph_.nodes()) {
if (n->name() == name) return n;
}
return nullptr;
}
bool HasNode(const string& name) { return FindNode(name) != nullptr; }
bool HasEdge(const string& src, int src_out, const string& dst, int dst_in) {
for (const Edge* e : graph_.edges()) {
if (e->src()->name() == src && e->src_output() == src_out &&
e->dst()->name() == dst && e->dst_input() == dst_in) {
return true;
}
}
return false;
}
bool HasControlEdge(const string& src, const string& dst) {
return HasEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
string ColocationGroup(const string& node) {
Node* n = nullptr;
for (Node* ni : graph_.nodes()) {
if (ni->name() == node) {
n = ni;
break;
}
}
if (n == nullptr) {
return "";
}
std::vector<string> value;
Status s = GetNodeAttr(n->attrs(), kColocationAttrName, &value);
if (!s.ok()) {
return "";
}
if (value.size() != 1) {
ADD_FAILURE()
<< "ColocationGroup was written with the assumption of at most 1 "
"value for the _class attribute. Update it and its callers";
return "";
}
StringPiece loc(value[0]);
return absl::ConsumePrefix(&loc, kColocationGroupPrefix) ? string(loc) : "";
}
string GraphDebugString() const {
return graph_.ToGraphDefDebug().DebugString();
}
Graph graph_;
private:
GraphDef gdef_;
};
Status Scalars(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
REGISTER_OP("ABC");
REGISTER_OP("TestParams").Output("o: float").SetShapeFn(Scalars);
REGISTER_OP("TestInput")
.Output("a: float")
.Output("b: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestMul")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestInt").Input("a: int32");
REGISTER_OP("TestOneInputTwoOutputs")
.Input("x: float")
.Output("y: float")
.Output("z: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestOneInputOneOutput")
.Input("x: T")
.Output("y: T")
.Attr("T: {float, int64}")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("TestVariadicOutput")
.Output("outputs: N * int32")
.Attr("N: int >= 0")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("TestDefaultAttr")
.Attr("default_int: int=31415")
.SetShapeFn(shape_inference::NoOutputs);
REGISTER_OP("RequiresCurrentGraphVersion")
.Output("version: int32")
.SetIsStateful()
.SetShapeFn([](shape_inference::InferenceContext* c) {
if (c->graph_def_version() != TF_GRAPH_DEF_VERSION) {
return errors::InvalidArgument("Wrong graph version for shape");
}
return shape_inference::ScalarShape(c);
});
TEST_F(GraphConstructorTest, InvalidNodeName) {
auto expect_invalid_name = [this](const char* name) {
ExpectError(strings::StrCat("node { name: '", name, "' op: 'ABC' }"),
{"Node name contains invalid characters"});
};
expect_invalid_name("a:b");
expect_invalid_name("_abc");
expect_invalid_name(R"(a\\b)");
expect_invalid_name("/a");
expect_invalid_name("-a");
ExpectOK("node { name: 'a-bc_' op: 'ABC' }");
ExpectOK("node { name: 'a-B.0/.c_' op: 'ABC' }");
ExpectOK("node { name: '0123' op: 'ABC' }");
ExpectOK("node { name: '.0123' op: 'ABC' }");
}
TEST_F(GraphConstructorTest, InvalidSourceNodeName) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: 'W999' input: 'input' }",
{"Unknown input node", "W999"});
}
TEST_F(GraphConstructorTest, InvalidSourceNodeIndex) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1:1', 'input:1' ] }",
{"Connecting to invalid output 1 of source node W1"});
}
TEST_F(GraphConstructorTest, GraphWithCycle) {
ExpectError(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }",
{"cycle"});
}
TEST_F(GraphConstructorTest, GraphWithOKCycle) {
ExpectOK(R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF");
}
TEST_F(GraphConstructorTest, ImportGraphThatUsesConstantValueFromInsideLoop) {
const string pb_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "Const_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 1
}
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Enter_1"
op: "Enter"
input: "Const_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Merge_1"
op: "Merge"
input: "while/Enter_1"
input: "while/NextIteration_1"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Switch_1"
op: "Switch"
input: "while/Merge_1"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge_1"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while/Switch_1:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/transpose"
op: "Transpose"
input: "while/Identity_1"
input: "while/Identity_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "Tperm"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Identity"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration_1"
op: "NextIteration"
input: "while/transpose"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit_1"
op: "Exit"
input: "while/Switch_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 21
}
)EOF";
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(pb_ascii, &def));
ImportGraphDefOptions opts;
auto s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
}
TEST_F(GraphConstructorTest, TypeMismatch) {
ExpectError(
"node { name: 'input' op: 'TestInput' }"
"node { name: 'int' op: 'TestInt' input: [ 'input' ] }",
{"Input 0 of node int was passed float from input:0 incompatible with "
"expected int32."});
}
TEST_F(GraphConstructorTest, EmptyGraph) {
ExpectOK("");
ExpectVersions(0, 0);
}
TEST_F(GraphConstructorTest, VersionGraph) {
ExpectOK(strings::StrCat("versions { producer: ", TF_GRAPH_DEF_VERSION,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER,
"}"));
ExpectVersions(TF_GRAPH_DEF_VERSION_MIN_CONSUMER, TF_GRAPH_DEF_VERSION);
}
TEST_F(GraphConstructorTest, ForwardCompatError) {
ExpectError(
strings::StrCat(
"node { name: 'a:b' op: 'ABC' }\n"
"versions { producer: ",
TF_GRAPH_DEF_VERSION + 22,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER, "}"),
{"forward compatibility guarantee"});
}
TEST_F(GraphConstructorTest, NoForwardCompatError) {
ExpectError(
strings::StrCat(
"node { name: 'a:b' op: 'ABC' }\n"
"versions { producer: ",
TF_GRAPH_DEF_VERSION + 21,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER, "}"),
{"Node name contains invalid characters"},
"forward compatibility guarantee");
}
TEST_F(GraphConstructorTest, LowVersion) {
ExpectError(strings::StrCat("versions { producer: ", -1, " }"),
{strings::StrCat("GraphDef producer version -1 below min "
"producer ",
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
" supported by TensorFlow ", TF_VERSION_STRING,
". Please regenerate your graph.")});
}
TEST_F(GraphConstructorTest, HighVersion) {
const int version = TF_GRAPH_DEF_VERSION + 1;
ExpectError(strings::StrCat("versions { min_consumer: ", version, " }"),
{strings::StrCat("GraphDef min consumer version ", version,
" above current version ", TF_GRAPH_DEF_VERSION,
" for TensorFlow ", TF_VERSION_STRING,
". Please upgrade TensorFlow.")});
}
TEST_F(GraphConstructorTest, BadVersion) {
const int version = TF_GRAPH_DEF_VERSION + 1;
const int bad = TF_GRAPH_DEF_VERSION;
ExpectError(
strings::StrCat("versions { producer: ", version, " bad_consumers: ", bad,
" }"),
{strings::StrCat(
"GraphDef disallows consumer version ", bad,
". Please upgrade TensorFlow: this version is likely buggy.")});
}
TEST_F(GraphConstructorTest, SimpleModel) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }");
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("W1", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
}
TEST_F(GraphConstructorTest, SimpleModelWithControlEdges) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' input: [ '^W1' ] }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W1', 'input:1', '^t1' ] }");
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasEdge("W1", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
EXPECT_TRUE(HasEdge("W1", 0, "t2", 0));
EXPECT_TRUE(HasEdge("input", 1, "t2", 1));
EXPECT_TRUE(HasControlEdge("W1", "input"));
EXPECT_TRUE(HasControlEdge("t1", "t2"));
}
TEST_F(GraphConstructorTest, Error_ControlEdgeBeforeRealInput) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' input: [ '^W1' ] }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W1', '^t1', 'input:1' ] }",
{"Node 't2': Control dependencies must come after regular dependencies"});
}
TEST_F(GraphConstructorTest, ImportGraphDef) {
GraphDef def;
ImportGraphDefOptions opts;
const string& source = graph_.FindNodeId(Graph::kSourceId)->name();
const string& sink = graph_.FindNodeId(Graph::kSinkId)->name();
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(2, graph_.num_nodes());
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_EQ(1, graph_.num_edges());
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node { name: "A" op: "TestParams" }
node { name: "X" op: "TestParams" }
node {
name: "B"
op: "TestOneInputTwoOutputs"
input: "A"
attr {
key: "_class"
value { list { s: "loc:@A" } }
}
}
node {
name: "C"
op: "TestOneInputTwoOutputs"
input: "B:1"
input: "^X"
}
node {
name: "D"
op: "TestMul"
input: "B:0"
input: "C:0"
})EOF",
&def);
ASSERT_TRUE(parsed);
s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(5 + 2, graph_.num_nodes());
EXPECT_EQ("A", ColocationGroup("B"));
EXPECT_TRUE(HasEdge("A", 0, "B", 0));
EXPECT_TRUE(HasEdge("B", 1, "C", 0));
EXPECT_TRUE(HasEdge("B", 0, "D", 0));
EXPECT_TRUE(HasEdge("C", 0, "D", 1));
EXPECT_TRUE(HasControlEdge("X", "C"));
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "A"));
EXPECT_TRUE(HasControlEdge(source, "X"));
EXPECT_TRUE(HasControlEdge("D", sink));
EXPECT_EQ(9, graph_.num_edges());
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
opts.prefix = "import";
s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(
10 + 2,
graph_.num_nodes());
EXPECT_EQ("A", ColocationGroup("B"));
EXPECT_EQ("import/A", ColocationGroup("import/B"));
EXPECT_TRUE(HasEdge("A", 0, "B", 0));
EXPECT_TRUE(HasEdge("B", 1, "C", 0));
EXPECT_TRUE(HasEdge("B", 0, "D", 0));
EXPECT_TRUE(HasEdge("C", 0, "D", 1));
EXPECT_TRUE(HasControlEdge("X", "C"));
EXPECT_TRUE(HasEdge("import/A", 0, "import/B", 0));
EXPECT_TRUE(HasEdge("import/B", 1, "import/C", 0));
EXPECT_TRUE(HasEdge("import/B", 0, "import/D", 0));
EXPECT_TRUE(HasEdge("import/C", 0, "import/D", 1));
EXPECT_TRUE(HasControlEdge("import/X", "import/C"));
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "A"));
EXPECT_TRUE(HasControlEdge(source, "X"));
EXPECT_TRUE(HasControlEdge("D", sink));
EXPECT_TRUE(HasControlEdge(source, "import/A"));
EXPECT_TRUE(HasControlEdge(source, "import/X"));
EXPECT_TRUE(HasControlEdge("import/D", sink));
EXPECT_EQ(17, graph_.num_edges());
}
TEST_F(GraphConstructorTest, ImportGraphDef_DefaultAttrs) {
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{ name:'A' op:'TestDefaultAttr'}", &def));
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
Node* a = nullptr;
for (Node* n : graph_.nodes()) {
if (n->name() == "A") {
a = n;
break;
}
}
ASSERT_TRUE(a != nullptr);
int value = 0;
s = GetNodeAttr(a->attrs(), "default_int", &value);
ASSERT_EQ(absl::OkStatus(), s) << s << " -- " << a->def().DebugString();
EXPECT_EQ(31415, value);
}
TEST_F(GraphConstructorTest, ImportGraphDef_Versioning) {
GraphDef def;
const ImportGraphDefOptions opts;
def.mutable_versions()->set_producer(TF_GRAPH_DEF_VERSION_MIN_PRODUCER - 1);
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
def.mutable_versions()->set_min_consumer(TF_GRAPH_DEF_VERSION + 1);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
def.mutable_versions()->add_bad_consumers(TF_GRAPH_DEF_VERSION);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
graph_.ToGraphDef(&def);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
def.Clear();
const int original_min_consumer = graph_.versions().min_consumer();
def.mutable_versions()->set_min_consumer(original_min_consumer + 2);
def.mutable_versions()->add_bad_consumers(TF_GRAPH_DEF_VERSION - 1);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(original_min_consumer + 2, graph_.versions().min_consumer());
ASSERT_EQ(1, graph_.versions().bad_consumers_size());
EXPECT_EQ(TF_GRAPH_DEF_VERSION - 1, graph_.versions().bad_consumers(0));
}
TEST_F(GraphConstructorTest, ImportGraphDef_DeprecatedOps) {
GraphDef def;
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node {
name: "zeros"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
dim {
size: 149
}
dim {
size: 149
}
dim {
size: 32
}
}
float_val: 0.0
}
}
}
}
node {
name: "m_v_beta_gamma"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 32
}
}
tensor_content: "\265\374\010=S\250\t\276\206\371>;Z\306y>\217]@\276\347\206\202\275\3747\241\275+1\227=J1\352\275\353?H;`\253\000>\023Y\014\276\341\310L;\301\030\314;\032Kw\275\273fQ;\036\252\200=\257o/\273\377\241\247\275\307,\332\274L\255\247\274\023\331R=r\271\225<\016/\204<\364\340\375\272t\030J=\220\306}\276\276x\003\275\231\013}\276\212\034\224\276\257\020\216>A\223\217\276"
}
}
}
}
node {
name: "batchnorm"
op: "BatchNormWithGlobalNormalization"
input: "zeros"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "scale_after_normalization"
value {
b: false
}
}
attr {
key: "variance_epsilon"
value {
f: 0.0010000000475
}
}
}
)EOF",
&def);
ASSERT_TRUE(parsed);
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
Graph g2(OpRegistry::Global());
def.mutable_versions()->set_producer(10);
s = ImportGraphDef(ImportGraphDefOptions(), def, &g2, nullptr);
EXPECT_EQ(error::UNIMPLEMENTED, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"BatchNormWithGlobalNormalization is not "
"available in GraphDef version 10"))
<< s;
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMap) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasNode("new_input"));
EXPECT_TRUE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_FALSE(HasEdge("new_input", 0, "t1", 0));
EXPECT_FALSE(HasEdge("new_input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "input:1");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithPrefix) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'input' op: 'TestInput' } "
"node { name: 'unmapped_input' op: 'TestInput'}",
ImportGraphDefOptions(), &refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("input", 0)] = TensorId("input", 0);
opts.input_map[TensorId("input", 1)] = TensorId("input", 0);
opts.prefix = "import";
ExpectOK(
R"EOF(
node { name: 'input' op: 'TestInput' }
node { name: 'unmapped_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'input:0', 'input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
node { name: 't3' op: 'TestMul' input: [ 'unmapped_input:0',
'unmapped_input:1' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("unmapped_input"));
EXPECT_TRUE(HasNode("import/unmapped_input"));
EXPECT_TRUE(HasNode("import/t1"));
EXPECT_TRUE(HasNode("import/t2"));
EXPECT_TRUE(HasNode("import/input"));
EXPECT_TRUE(HasEdge("input", 0, "import/t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "import/t1", 1));
EXPECT_FALSE(HasEdge("import/input", 0, "import/t1", 0));
EXPECT_FALSE(HasEdge("import/input", 0, "import/t1", 1));
EXPECT_TRUE(HasEdge("import/t1", 0, "import/t2", 0));
EXPECT_TRUE(HasEdge("import/unmapped_input", 0, "import/t3", 0));
EXPECT_TRUE(HasEdge("import/unmapped_input", 1, "import/t3", 1));
Node* t1 = FindNode("import/t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
EXPECT_EQ(t1->requested_inputs()[0], "input:0");
EXPECT_EQ(t1->requested_inputs()[1], "input:0");
Node* t2 = FindNode("import/t2");
ASSERT_EQ(t2->requested_inputs().size(), 2);
EXPECT_EQ(t2->requested_inputs()[0], "import/t1:0");
EXPECT_EQ(t2->requested_inputs()[1], "import/t1:0");
Node* t3 = FindNode("import/t3");
ASSERT_EQ(t3->requested_inputs().size(), 2);
EXPECT_EQ(t3->requested_inputs()[0], "import/unmapped_input:0");
EXPECT_EQ(t3->requested_inputs()[1], "import/unmapped_input:1");
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithControlEdges) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry()); |
1,593 | cpp | tensorflow/tensorflow | function_optimization_registry | tensorflow/core/common_runtime/function_optimization_registry.cc | tensorflow/core/common_runtime/function_optimization_registry_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_FUNCTION_OPTIMIZATION_REGISTRY_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_FUNCTION_OPTIMIZATION_REGISTRY_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
class FunctionOptimizationPass {
public:
struct FunctionOptions {
std::string xla_compile_device_type = "";
bool allow_soft_placement = false;
};
virtual ~FunctionOptimizationPass() {}
virtual Status Run(const std::string& function_name,
const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptions& function_options,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) = 0;
};
class FunctionOptimizationPassRegistry {
public:
void Init(std::unique_ptr<FunctionOptimizationPass> pass);
Status Run(const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated);
static FunctionOptimizationPassRegistry& Global();
private:
std::unique_ptr<FunctionOptimizationPass> pass_;
};
namespace function_optimization_registration {
class FunctionOptimizationPassRegistration {
public:
explicit FunctionOptimizationPassRegistration(
std::unique_ptr<FunctionOptimizationPass> pass) {
FunctionOptimizationPassRegistry::Global().Init(std::move(pass));
}
};
}
}
#endif
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include <string>
#include "tensorflow/core/framework/metrics.h"
namespace tensorflow {
void FunctionOptimizationPassRegistry::Init(
std::unique_ptr<FunctionOptimizationPass> pass) {
DCHECK(!pass_) << "Only one pass should be set.";
pass_ = std::move(pass);
}
Status FunctionOptimizationPassRegistry::Run(
const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) {
if (!pass_) return absl::OkStatus();
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"GraphOptimizationPass", "FunctionOptimizationPassRegistry"});
return pass_->Run(function_name, device_set, config_proto, function_options,
graph, flib_def, control_ret_node_names,
control_rets_updated);
}
FunctionOptimizationPassRegistry& FunctionOptimizationPassRegistry::Global() {
static FunctionOptimizationPassRegistry* kGlobalRegistry =
new FunctionOptimizationPassRegistry;
return *kGlobalRegistry;
}
} | #include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include <memory>
#include <string>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
class PassingFunctionPass : public FunctionOptimizationPass {
public:
static bool ran_;
Status Run(const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) override {
ran_ = true;
return absl::OkStatus();
}
};
bool PassingFunctionPass::ran_ = false;
TEST(FunctionOptimizationPassRegistry, PassNoError) {
EXPECT_FALSE(PassingFunctionPass::ran_);
FunctionOptimizationPassRegistry::Global().Init(
std::make_unique<PassingFunctionPass>());
DeviceSet device_set;
ConfigProto config_proto;
FunctionOptimizationPass::FunctionOptions function_options;
Status status = FunctionOptimizationPassRegistry::Global().Run(
"test_func", device_set, config_proto, function_options,
nullptr,
nullptr,
nullptr, nullptr);
EXPECT_EQ(status, absl::OkStatus());
EXPECT_TRUE(PassingFunctionPass::ran_);
}
} |
1,594 | cpp | tensorflow/tensorflow | permuter | tensorflow/core/common_runtime/permuter.cc | tensorflow/core/common_runtime/permuter_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PERMUTER_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_PERMUTER_H_
#include <deque>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/framework/collective.h"
namespace tensorflow {
class Device;
class Permuter : public CollectiveImplementationInterface {
public:
Permuter();
~Permuter() override = default;
void Run(StatusCallback done) override;
Status InitializeCollectiveParams(CollectiveParams* col_params) override {
return absl::OkStatus();
}
Status InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) override;
private:
std::shared_ptr<CollectiveContext> col_ctx_;
const CollectiveParams* col_params_;
StatusCallback done_;
mutex mu_;
Status status_ TF_GUARDED_BY(mu_);
int counter_ TF_GUARDED_BY(mu_);
void DispatchSend(int src_rank, int target_rank, const Tensor* tensor,
const StatusCallback& done);
void DispatchRecv(int src_rank, int target_rank, Tensor* tensor,
const StatusCallback& done);
StatusCallback CheckCounterAndCallDone();
};
}
#endif
#include "tensorflow/core/common_runtime/permuter.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Permuter::Permuter()
: col_ctx_(nullptr), col_params_(nullptr), done_(nullptr), counter_(0) {}
StatusCallback Permuter::CheckCounterAndCallDone() {
return [this](const Status& s) {
mu_.lock();
status_.Update(s);
int counter = ++counter_;
Status status = status_;
mu_.unlock();
if (counter == 2) done_(status);
};
}
Status Permuter::InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) {
DCHECK(col_ctx->dev_mgr);
col_ctx_ = col_ctx;
col_params_ = col_ctx->col_params.get();
return collective_util::InitializeDeviceAndLocality(
col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device,
&col_ctx->device_locality);
}
void Permuter::Run(StatusCallback done) {
if (col_params_->instance.permutation.size() !=
col_params_->instance.devices.size()) {
done(errors::Internal("Permutation must be the same size as devices"));
}
done_ = std::move(done);
DispatchSend(col_params_->default_rank,
col_params_->instance.permutation[col_params_->default_rank],
col_ctx_->input, CheckCounterAndCallDone());
for (int i = 0; i < col_params_->instance.permutation.size(); ++i) {
if (col_params_->default_rank == col_params_->instance.permutation[i]) {
DispatchRecv(i, col_params_->instance.permutation[i], col_ctx_->output,
CheckCounterAndCallDone());
}
}
}
void Permuter::DispatchSend(int src_rank, int target_rank, const Tensor* tensor,
const StatusCallback& done) {
string send_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
VLOG(1) << "DispatchSend " << send_buf_key << " from_device "
<< col_ctx_->device_name << " to_device "
<< col_params_->instance.devices[target_rank]
<< " target_rank=" << target_rank << " src_rank=" << src_rank;
col_ctx_->col_exec->remote_access()->PostToPeer(
col_params_->instance.devices[target_rank],
col_params_->group.members[target_rank].task, send_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
col_ctx_->op_ctx->cancellation_manager(), done);
}
void Permuter::DispatchRecv(int src_rank, int target_rank, Tensor* tensor,
const StatusCallback& done) {
string recv_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
VLOG(1) << "DispatchRecv " << recv_buf_key << " to_device "
<< col_ctx_->device_name << " from_device "
<< col_params_->instance.devices[src_rank]
<< " target_rank=" << target_rank << " src_rank=" << src_rank;
col_ctx_->col_exec->remote_access()->RecvFromPeer(
col_params_->instance.devices[src_rank],
col_params_->group.members[src_rank].task,
col_params_->group.members[src_rank].is_local, recv_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
0, col_ctx_->op_ctx->cancellation_manager(), done);
}
namespace {
REGISTER_COLLECTIVE(Permute, Permuter);
}
} | #include "tensorflow/core/common_runtime/permuter.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class PermuterTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices,
const std::vector<int>& permutation, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, permutation, dtype, shape, test_env_.get()));
}
}
}
typedef std::function<void(Tensor*)> InitFunc;
void Permute(int fail_after) {
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, &done] {
di->DoPermute();
++done;
});
if (fail_after > 0) {
Env::Default()->SleepForMicroseconds(100);
}
}
while (done < instances_.size()) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int tensor_len, int fail_after) {
std::vector<int> permutation(num_workers * num_devices);
std::iota(permutation.begin(), permutation.end(), 0);
for (int i = 0; i < permutation.size(); i += 2) {
if (permutation.size() == i + 1) {
std::swap(permutation[i], permutation[0]);
continue;
}
std::next_permutation(permutation.begin() + i,
permutation.begin() + i + 2);
}
Init(num_workers, num_devices, permutation, dtype,
TensorShape({tensor_len}), device_type, fail_after);
gtl::InlinedVector<T, 4> expected(tensor_len * num_devices * num_workers,
0.0);
for (int di = 0; di < instances_.size(); ++di) {
instances_[di]->InitTensor(
[&permutation, &expected, di, tensor_len](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
t->flat<T>()(i) = value;
expected[permutation[di] * tensor_len + i] = value;
}
});
}
Permute(fail_after);
for (int di = 0; di < instances_.size(); ++di) {
if (!instances_[di]->status_.ok()) {
ASSERT_GT(fail_after, 0);
ASSERT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
continue;
}
TF_EXPECT_OK(instances_[di]->status_);
test::ExpectTensorEqual<T>(
test::AsTensor<T>(
absl::MakeSpan(expected).subspan(di * tensor_len, tensor_len)),
instances_[di]->output_tensor_);
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, std::vector<int> permutation, DataType dtype,
const TensorShape& shape, CollectiveTestEnv* test_env)
: test_env_(test_env),
input_tensor_(dtype, shape),
output_tensor_(dtype, shape) {
col_params_ = CreateCollectiveParams(*test_env_, rank, "Permute",
PERMUTE_COLLECTIVE, dtype, shape);
col_params_->instance.permutation = std::move(permutation);
for (const CollGroupMember& member : col_params_->group.members) {
col_params_->instance.devices.push_back(member.device.name());
}
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
}
void InitTensor(const InitFunc& f) { f(&input_tensor_); }
void DoPermute() {
status_ = RunCollective(test_env_, col_params_.get(), device_,
&input_tensor_, &output_tensor_);
}
CollectiveTestEnv* test_env_;
Tensor input_tensor_;
Tensor output_tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
};
#define DEF_TEST(B, T, W, D, L, A) \
TEST_F(PermuterTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Len##L##_Abrt##A) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_BOOL: { \
RunTest<bool>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 0)
DEF_TEST(FLOAT, CPU, 1, 3, 3, 0)
DEF_TEST(FLOAT, CPU, 1, 7, 3, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 2, 3, 0)
DEF_TEST(FLOAT, CPU, 2, 1, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 4095, 0)
DEF_TEST(FLOAT, CPU, 4, 4, 1045991, 0)
DEF_TEST(BOOL, CPU, 1, 4, 1, 0)
DEF_TEST(BOOL, CPU, 2, 4, 1, 0)
DEF_TEST(BOOL, CPU, 2, 4, 1001, 0)
DEF_TEST(DOUBLE, CPU, 2, 4, 128, 0)
DEF_TEST(INT32, CPU, 2, 4, 128, 0)
DEF_TEST(INT64, CPU, 2, 4, 128, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 1)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 5)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 0)
DEF_TEST(FLOAT, GPU, 1, 7, 3, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 33, 0)
DEF_TEST(FLOAT, GPU, 1, 3, 64, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 4095, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1045991, 0)
DEF_TEST(BOOL, GPU, 1, 4, 1, 0)
DEF_TEST(BOOL, GPU, 1, 4, 1001, 0)
DEF_TEST(DOUBLE, GPU, 1, 8, 1001, 0)
DEF_TEST(INT64, GPU, 1, 8, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 128, 6)
#endif
}
} |
1,595 | cpp | tensorflow/tensorflow | collective_param_resolver_local | tensorflow/core/common_runtime/collective_param_resolver_local.cc | tensorflow/core/common_runtime/collective_param_resolver_local_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_COLLECTIVE_PARAM_RESOLVER_LOCAL_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_COLLECTIVE_PARAM_RESOLVER_LOCAL_H_
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
class CompleteGroupRequest;
class CompleteGroupResponse;
class CompleteInstanceRequest;
class CompleteInstanceResponse;
class ConfigProto;
class DeviceMgr;
class CollectiveParamResolverLocal : public ParamResolverInterface {
public:
CollectiveParamResolverLocal(const ConfigProto& config,
const DeviceMgr* dev_mgr,
DeviceResolverInterface* dev_resolver,
NcclCommunicatorInterface* nccl_communicator,
const string& task_name);
~CollectiveParamResolverLocal() override {}
void CompleteParamsAsync(const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr,
const StatusCallback& done) override;
void CompleteGroupAsync(const DeviceAttributes& device,
CollGroupParams* group_params,
CancellationManager* cancel_mgr,
const StatusCallback& done) override;
void CompleteInstanceAsync(const CompleteInstanceRequest* request,
CompleteInstanceResponse* response,
CancellationManager* cancel_mgr,
const StatusCallback& done) override;
Status LookupGroup(int32_t group_key, CollGroupParams* group) override;
void StartAbort(const Status& s) override;
protected:
friend class CollectiveParamResolverLocalTest;
struct GroupRec {
mutable mutex mu;
CollGroupParams group TF_GUARDED_BY(mu);
Status status TF_GUARDED_BY(mu);
std::unordered_map<string, int64_t> incarnations_by_device_name
TF_GUARDED_BY(mu);
std::vector<CollGroupParams*> pending_params TF_GUARDED_BY(mu);
std::vector<StatusCallback> pending_done TF_GUARDED_BY(mu);
};
void CompleteGroupLocal(const DeviceAttributes& device,
CollGroupParams* group_params,
CancellationManager* cancel_mgr, StatusCallback done)
TF_LOCKS_EXCLUDED(group_mu_);
void FinishGroup(GroupRec* gr) TF_EXCLUSIVE_LOCKS_REQUIRED(gr->mu);
void CancelGroup(int32 group_key) TF_LOCKS_EXCLUDED(group_mu_);
Status LookupAndPopulateGroupParams(CollGroupParams* group_params);
struct InstanceRec;
typedef std::function<void(InstanceRec*)> IRConsumer;
struct InstanceRec {
mutex mu;
CollectiveParams* shared;
Status status TF_GUARDED_BY(mu);
int source_rank TF_GUARDED_BY(mu);
string communicator_key TF_GUARDED_BY(mu);
int known_count TF_GUARDED_BY(mu);
std::vector<bool> known TF_GUARDED_BY(mu);
std::vector<IRConsumer> known_waiters TF_GUARDED_BY(mu);
InstanceRec()
: shared(new CollectiveParams()), source_rank(-1), known_count(0) {}
~InstanceRec() { shared->Unref(); }
};
InstanceRec* GetOrCreateInstanceRec(CollectiveParams* cp, bool* created)
TF_LOCKS_EXCLUDED(instance_mu_, group_mu_);
void InitInstanceSharedParams(const CollectiveParams* cp, InstanceRec* ir);
void CompleteDefaultRanking(CollGroupParams* gp);
void CompleteInstanceLocal(const string& device, CollectiveParams* cp,
const StatusCallback& done)
TF_LOCKS_EXCLUDED(instance_mu_, group_mu_);
void CompleteInstanceFromInitializedIRec(const string& device,
CollectiveParams* cp,
InstanceRec* ir,
const StatusCallback& done)
TF_LOCKS_EXCLUDED(ir->mu);
void WaitForGroup(InstanceRec* ir, CollectiveParams* cp, const IRConsumer& f)
TF_LOCKS_EXCLUDED(ir->mu);
Status GetLocalDeviceLocalities(const CollectiveParams& cp,
std::vector<DeviceLocality>* localities);
void SetDefaultRank(const string& device, CollectiveParams* cp);
void AssignCollectiveType(CollectiveParams* cp);
void StartAbortLocal(const Status& s)
TF_LOCKS_EXCLUDED(status_mu_, group_mu_, instance_mu_);
const bool nccl_;
const DeviceMgr* dev_mgr_;
DeviceResolverInterface* dev_resolver_;
NcclCommunicatorInterface* nccl_communicator_;
string task_name_;
string gpu_ring_order_;
mutex group_mu_;
gtl::FlatMap<int32, std::unique_ptr<GroupRec>> group_table_
TF_GUARDED_BY(group_mu_);
struct TupleHash {
std::size_t operator()(const std::tuple<int64_t, int32_t> x) const {
return (std::get<0>(x) << 20) + std::get<1>(x);
}
};
mutex instance_mu_;
gtl::FlatMap<int32_t, gtl::FlatMap<std::tuple<int64_t, int32_t>,
std::unique_ptr<InstanceRec>, TupleHash>>
instance_table_ TF_GUARDED_BY(instance_mu_);
mutex status_mu_;
Status status_ TF_GUARDED_BY(status_mu_);
};
}
#endif
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include <stddef.h>
#include <algorithm>
#include <tuple>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
CollectiveParamResolverLocal::CollectiveParamResolverLocal(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverInterface* dev_resolver,
NcclCommunicatorInterface* nccl_communicator, const string& task_name)
: nccl_(config.experimental().collective_nccl()),
dev_mgr_(dev_mgr),
dev_resolver_(dev_resolver),
nccl_communicator_(nccl_communicator),
task_name_(task_name),
gpu_ring_order_(
config.gpu_options().experimental().collective_ring_order()) {}
void CollectiveParamResolverLocal::CompleteGroupAsync(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
CompleteGroupLocal(device, group_params, cancel_mgr, done);
}
namespace {
const char* GetCollectiveName(const CollectiveParams* cp, bool nccl) {
switch (cp->instance.type) {
case BROADCAST_COLLECTIVE:
return nccl ? "NcclBroadcast" : "HierarchicalTreeBroadcast";
case REDUCTION_COLLECTIVE:
return nccl ? "NcclReduce" : "RingReduce";
case GATHER_COLLECTIVE:
return nccl ? "NcclGather" : "RingGather";
case PERMUTE_COLLECTIVE:
return "Permute";
case ALL_TO_ALL_COLLECTIVE:
return nccl ? "NcclAllToAll" : "AllToAll";
case REDUCE_SCATTER_COLLECTIVE:
return nccl ? "NcclReduceScatter" : "undef";
default:
return "undef";
}
}
string TaskNameFromDeviceName(const string& device_name) {
DeviceNameUtils::ParsedName parsed_device;
CHECK(DeviceNameUtils::ParseFullName(device_name, &parsed_device));
string task_name;
CHECK(DeviceNameUtils::GetTaskName(parsed_device, &task_name));
return task_name;
}
struct RankFormatter {
void operator()(std::string* out, CollGroupMember m) const {
out->append(std::to_string(m.rank));
}
};
Status CheckUserSpecifiedRanks(const std::vector<CollGroupMember> members) {
absl::flat_hash_set<int> user_ranks = {};
bool at_least_one_member_with_no_rank = false;
bool at_least_one_member_with_user_rank = false;
for (const auto& m : members) {
if (m.rank == -1) {
at_least_one_member_with_no_rank = true;
} else {
at_least_one_member_with_user_rank = true;
user_ranks.insert(m.rank);
}
}
auto received_ranks = absl::StrJoin(members, ",", RankFormatter());
if (at_least_one_member_with_no_rank && at_least_one_member_with_user_rank) {
return errors::InvalidArgument(
"Only part of the group members have user given rank specified.",
"Received ranks: ", received_ranks);
}
if (at_least_one_member_with_user_rank &&
user_ranks.size() < members.size()) {
return errors::InvalidArgument(
"Duplicate ranks specified for group members. Received ranks: ",
received_ranks);
}
return absl::OkStatus();
}
}
void CollectiveParamResolverLocal::CompleteGroupLocal(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, StatusCallback done) {
VLOG(1) << "CompleteGroup device=" << device.name() << ": "
<< group_params->ToString();
std::vector<StatusCallback> to_be_called;
GroupRec* gr = nullptr;
Status status;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(group_params->group_key);
if (it == group_table_.end()) {
gr = new GroupRec;
mutex_lock grl(gr->mu);
gr->group.group_key = group_params->group_key;
gr->group.group_size = group_params->group_size;
gr->group.device_type = group_params->device_type;
if (nccl_communicator_ != nullptr) {
gr->group.runtime_details.communicator_key =
nccl_communicator_->GenerateCommunicatorKey();
}
group_table_[gr->group.group_key].reset(gr);
VLOG(2) << "New group_key=" << gr->group.group_key
<< " group_size=" << gr->group.group_size
<< " runtime_details=" << gr->group.runtime_details.ToString();
} else {
gr = it->second.get();
}
}
{
mutex_lock l(status_mu_);
status = status_;
}
if (!status.ok()) {
done(status);
return;
}
if (cancel_mgr != nullptr) {
CancellationToken token = cancel_mgr->get_cancellation_token();
bool is_cancelled = !cancel_mgr->RegisterCallback(
token, std::bind(&CollectiveParamResolverLocal::CancelGroup, this,
group_params->group_key));
if (is_cancelled) {
done(errors::Cancelled("CompleteGroup is cancelled before it starts"));
return;
}
done = [cancel_mgr, token,
original_done = std::move(done)](const Status& status) {
cancel_mgr->TryDeregisterCallback(token);
original_done(status);
};
}
{
mutex_lock gr_lock(gr->mu);
VLOG(2) << "gr device_type=" << gr->group.device_type
<< " cp device_type=" << group_params->device_type
<< " current device=" << device.name();
if (gr->status.ok()) {
if (group_params->device_type != gr->group.device_type) {
gr->status = errors::Internal(
"Device ", device.name(),
" is joining a group with incompatible device type",
gr->group.device_type.type_string(),
" (group_key=", gr->group.group_key, ")");
} else if (group_params->group_size != gr->group.group_size) {
gr->status = errors::Internal(
"Device ", device.name(), " is joining a group with size",
group_params->group_size, ", but that group has size ",
gr->group.group_size, " (group_key=", gr->group.group_key, ")");
}
}
bool new_device = false;
if (gr->status.ok()) {
auto it = gr->incarnations_by_device_name.find(device.name());
if (it == gr->incarnations_by_device_name.end()) {
if (gr->group.members.size() == gr->group.group_size) {
gr->status =
errors::Internal("Device ", device.name(),
" is joining a group that is already full",
" (group_key=", gr->group.group_key, ")");
} else {
gr->incarnations_by_device_name[device.name()] = device.incarnation();
CollGroupMember member;
member.device = device;
if (group_params->user_specified_rank == -1 ||
(group_params->user_specified_rank >= 0 &&
group_params->user_specified_rank < gr->group.group_size)) {
member.rank = group_params->user_specified_rank;
} else {
gr->status = errors::InvalidArgument(
"User Provided rank is invalid. It should be between [0, "
"group_size)");
}
gr->group.members.push_back(std::move(member));
new_device = true;
if (VLOG_IS_ON(1)) {
string dev_buf;
for (const auto& m : gr->group.members) {
strings::StrAppend(&dev_buf, ",", m.device.name());
}
VLOG(1) << "CompleteGroupLocal group_key=" << gr->group.group_key
<< " group_size=" << gr->group.group_size << " (current"
<< " devices)=(" << dev_buf << ") (number of"
<< " devices pending)="
<< (gr->group.group_size - gr->group.members.size());
}
}
} else {
if (it->second != device.incarnation()) {
gr->status = errors::FailedPrecondition(
"Device ", device.name(),
" current incarnation doesn't match with one in the group. This "
"usually means this worker has restarted but the collective "
"leader hasn't, or this worker connects to a wrong cluster.");
}
}
}
if (gr->status.ok()) {
VLOG(2) << "group_size " << gr->group.group_size << " set size "
<< gr->group.members.size() << " gr " << gr;
if (gr->group.members.size() < gr->group.group_size) {
gr->pending_done.push_back(std::move(done));
gr->pending_params.push_back(group_params);
return;
}
CHECK_EQ(gr->group.members.size(), gr->group.group_size);
auto st = CheckUserSpecifiedRanks(gr->group.members);
if (!st.ok()) {
gr->status = st;
}
if (new_device) {
FinishGroup(gr);
}
*group_params = gr->group;
for (auto* params : gr->pending_params) {
*params = gr->group;
}
}
to_be_called.swap(gr->pending_done);
gr->pending_params.clear();
status = gr->status;
}
done(status);
for (int i = 0; i < to_be_called.size(); ++i) {
to_be_called[i](status);
}
}
namespace {
struct DevRec {
string task;
string device;
int original_rank;
int local_rank;
int global_rank;
const DeviceLocality* locality;
};
typedef std::unordered_map<string, DevRec> TaskDeviceMap;
typedef std::unordered_map<string, TaskDeviceMap> GlobalDeviceMap;
GlobalDeviceMap BuildDevRecs(const CollGroupParams& gp) {
GlobalDeviceMap gdm;
CHECK_EQ(gp.members.size(), gp.members.size());
for (int i = 0; i < gp.members.size(); ++i) {
TaskDeviceMap& tdm = gdm[gp.members[i].task];
DevRec* dr = &tdm[gp.members[i].device.name()];
dr->task = gp.members[i].task;
dr->device = gp.members[i].device.name();
dr->original_rank = i;
dr->local_rank = 0;
dr->global_rank = 0;
dr->locality = &gp.members[i].device.locality();
}
return gdm;
}
bool ParseRingOrder(const string& gpu_ring_order_str, TaskDeviceMap* tdm) {
std::vector<string> split_gpu_ring_order_str =
str_util::Split(gpu_ring_order_str, ',');
if (split_gpu_ring_order_str.size() != tdm->size()) return false;
gtl::FlatMap<int32, int32> gpu_ranks;
for (int32_t rank = 0;
rank < static_cast<int32>(split_gpu_ring_order_str.size()); ++rank) {
int32_t tmp;
if (strings::safe_strto32(split_gpu_ring_order_str[rank], &tmp)) {
gpu_ranks[tmp] = rank;
} else {
return false;
}
}
for (auto& tdm_it : *tdm) {
DeviceNameUtils::ParsedName parsed_name;
DevRec* dr = &tdm_it.second;
if (!DeviceNameUtils::ParseFullName(dr->device, &parsed_name)) {
return false;
}
auto rank_it = gpu_ranks.find(parsed_name.id);
if (rank_it == gpu_ranks.end()) return false;
dr->local_rank = rank_it->second;
}
VLOG(2) << "Assigned local ranks based on ring order " << gpu_ring_order_str;
return true;
}
void OrderTaskDeviceMap(const string& gpu_ring_order, TaskDeviceMap* tdm) {
CHECK_GT(tdm->size(), 0);
if (ParseRingOrder(gpu_ring_order, tdm)) return;
int least_rank = -1;
string next_device;
std::set<string> selected;
for (const auto& it : *tdm) {
if (least_rank < 0 || it.second.original_rank < least_rank) {
least_rank = it.second.original_rank;
next_device = it.second.device;
}
}
CHECK_GE(least_rank, 0);
DeviceNameUtils::ParsedName parsed_name;
CHECK(DeviceNameUtils::ParseFullName(next_device, &parsed_name));
int next_rank = 0;
while (true) {
selected.insert(next_device);
auto next_dev_it = tdm->find(next_device);
CHECK(next_dev_it != tdm->end());
DevRec* dr = &next_dev_it->second;
dr->local_rank = next_rank;
++next_rank;
if (selected.size() == tdm->size()) {
break;
}
const InterconnectLink* best_link = nullptr;
if (parsed_name.type == "GPU") {
for (const InterconnectLink& il : dr->locality->links().link()) {
parsed_name.id = il.device_id();
string endpoint_device =
DeviceNameUtils::ParsedNameToString(parsed_name);
if (selected.find(endpoint_device) != selected.end()) {
continue;
}
if (tdm->find(endpoint_device) == tdm->end()) {
continue;
}
if (best_link == nullptr || il.strength() > best_link->strength()) {
best_link = &il;
}
}
}
if (best_link != nullptr) {
parsed_name.id = best_link->device_id();
next_device = DeviceNameUtils::ParsedNameToString(parsed_name);
} else {
least_rank = -1;
for (const auto& it : *tdm) {
if (selected.find(it.second.device) != selected.end()) {
continue;
}
if (least_rank < 0 || it.second.original_rank < least_rank) {
least_rank = it.second.original_rank;
next_device = it.second.device;
}
}
CHECK_GE(least_rank, 0);
}
}
}
GlobalDeviceMap EstablishGlobalRank(const CollGroupParams& gp,
const string& gpu_ring_order) {
VLOG(1) << "EstablishGlobalRank";
GlobalDeviceMap gdm = BuildDevRecs(gp);
for (auto& iter : gdm) {
TaskDeviceMap& tdm = iter.second;
OrderTaskDeviceMap(gpu_ring_order, &tdm);
}
std::set<string> tasks;
for (const CollGroupMember& member : gp.members) {
tasks.insert(member.task);
}
int next_rank = 0;
for (const string& task : tasks) {
TaskDeviceMap* tdm = &gdm[task];
for (auto& it : *tdm) {
it.second.global_rank = it.second.local_rank + next_rank;
}
next_rank += tdm->size();
}
return gdm;
}
void SetDevPerTask(CollGroupParams* gp) {
gp->num_devices_per_task.clear();
for (const CollGroupMember& member : gp->members) {
gp->num_devices_per_task[member.task]++;
}
gp->same_num_devices_per_task = false;
int dev_per_task = -1;
for (const auto& task_dev : gp->num_devices_per_task) {
if (dev_per_task == -1) {
dev_per_task = task_dev.second;
} else if (dev_per_task != task_dev.second) {
return;
}
}
gp->same_num_devices_per_task = true;
}
}
void CollectiveParamResolverLocal::FinishGroup(GroupRec* gr) {
for (CollGroupMember& member : gr->group.members) {
member.task = TaskNameFromDeviceName(member.device.name());
member.is_local = member.task == task_name_;
}
CompleteDefaultRanking(&gr->group);
SetDevPerTask(&gr->group);
gr->group.num_tasks =
static_cast<int32>(gr->group.num_devices_per_task.size());
}
void CollectiveParamResolverLocal::CancelGroup(int32 group_key) {
std::vector<StatusCallback> pending_done;
GroupRec* gr = nullptr;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(group_key);
if (it == group_table_.end()) {
return;
}
gr = it->second.get();
}
{
mutex_lock l(gr->mu);
if (gr->group.members.size() == gr->group.group_size) {
return;
}
gr->status = errors::Cancelled("group is cancelled");
pending_done.swap(gr->pending_done);
gr->pending_params.clear();
}
for (const StatusCallback& done : pending_done) {
done(errors::Cancelled("group is cancelled"));
}
}
void CollectiveParamResolverLocal::SetDefaultRank(const string& device,
CollectiveParams* cp) {
CHECK_EQ(cp->group.group_size, cp->group.members.size()) << cp->ToString();
for (int i = 0; i < cp->group.group_size; ++i) {
if (cp->group.members[i].device.name() == device) {
cp->default_rank = i;
}
if (cp->group.members[i].rank == -1) {
cp->group.members[i].rank = i;
}
}
}
void CollectiveParamResolverLocal::InitInstanceSharedParams(
const CollectiveParams* cp, InstanceRec* ir) {
ir->shared->instance = cp->instance;
ir->shared->default_rank = -1;
}
void CollectiveParamResolverLocal::CompleteDefaultRanking(CollGroupParams* gp) {
std::sort(gp->members.begin(), gp->members.end(),
[](const CollGroupMember& lhs, const CollGroupMember& rhs) {
return DeviceNameUtils::CompareFullNames(lhs.device.name(),
rhs.device.name());
});
GlobalDeviceMap gdm = EstablishGlobalRank(*gp, gpu_ring_order_);
std::vector<CollGroupMember> new_members(gp->group_size);
for (const auto& git : gdm) {
const TaskDeviceMap& tdm = git.second;
for (const auto& tit : tdm) {
const DevRec& dr = tit.second;
new_members[dr.global_rank] = std::move(gp->members[dr.original_rank]);
}
}
if (VLOG_IS_ON(2)) {
string buf;
for (const auto& m : new_members)
strings::StrAppend(&buf, "\n", m.device.name());
VLOG(2) << "Optimized device order for group " << gp->group_key << ": "
<< buf;
}
gp->members = std::move(new_members);
}
CollectiveParamResolverLocal::InstanceRec*
CollectiveParamResolverLocal::GetOrCreateInstanceRec(CollectiveParams* cp,
bool* created) {
*created = false;
InstanceRec* irec = nullptr;
{
mutex_lock l(instance_mu_);
std::tuple<int64_t, int32_t> key = {cp->instance.step_id,
cp->instance.instance_key};
auto group_it = instance_table_.find(cp->group.group_key);
if (group_it != instance_table_.end()) {
auto instance_it = group_it->second.find(key);
if (instance_it != group_it->second.end()) {
irec = instance_it->second.get();
}
}
if (irec == nullptr) {
irec = new InstanceRec;
*created = true;
{
mutex_lock il(irec->mu);
irec->known.resize(cp->group.group_size, false);
}
InitInstanceSharedParams(cp, irec);
instance_table_[cp->group.group_key][key].reset(irec);
}
}
Status status;
{
mutex_lock l(status_mu_);
status = status_;
}
if (!status.ok()) {
mutex_lock l(irec->mu);
irec->status = status;
}
return irec;
}
Status CollectiveParamResolverLocal::LookupGroup(int32_t group_key,
CollGroupParams* group) {
mutex_lock l(group_mu_);
auto group_rec = group_table_.find(group_key);
if (group_rec == group_table_.end()) {
return errors::InvalidArgument("Group ", group_key,
" is not "
"initialized. Please call group "
"initialization op first before invoking "
"collective op.");
}
mutex_lock lock(group_rec->second->mu);
if (!group_rec->second->status.ok()) {
return errors::FailedPrecondition(
"Failed to run collective due to "
"unsuccessful group initialization. "
"Group initialization failed with error ",
group_rec->second->status.ToString());
}
*group = group_rec->second->group;
return absl::OkStatus();
}
void CollectiveParamResolverLocal::CompleteParamsAsync(
const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteParams local " << device.name() << " for " << cp << ": " | #include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include <atomic>
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
#define NUM_DEVS 3
class CollectiveParamResolverLocalTest : public ::testing::Test {
protected:
CollectiveParamResolverLocalTest() {
ConfigProto cp;
SessionOptions options;
task_name_ = "/job:localhost/replica:0/task:0";
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name_, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
drl_.reset(new DeviceResolverLocal(device_mgr_.get()));
ResetParamResolver(ConfigProto());
}
void ResetParamResolver(const ConfigProto& config) {
prl_.reset(new CollectiveParamResolverLocal(
config, device_mgr_.get(), drl_.get(), nullptr,
task_name_));
}
void RunCompleteDefaultRanking(
CollGroupParams group, const std::vector<int32>& gpu_ring_order,
const std::vector<string>& expected_device_order) {
ConfigProto config;
if (!gpu_ring_order.empty()) {
config.mutable_gpu_options()
->mutable_experimental()
->set_collective_ring_order(absl::StrJoin(gpu_ring_order, ","));
}
ResetParamResolver(config);
prl_->CompleteDefaultRanking(&group);
std::vector<string> actual_device_order;
for (const CollGroupMember& member : group.members) {
actual_device_order.push_back(member.device.name());
}
EXPECT_EQ(actual_device_order, expected_device_order);
}
DeviceAttributes GetDeviceAttributes(const string& device_name) {
Device* device = nullptr;
TF_CHECK_OK(device_mgr_->LookupDevice(device_name, &device));
return device->attributes();
}
string task_name_;
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<DeviceResolverLocal> drl_;
std::unique_ptr<CollectiveParamResolverLocal> prl_;
};
TEST_F(CollectiveParamResolverLocalTest, CompleteDefaultRanking) {
constexpr int kNumGpus = 8;
CollGroupParams group;
group.device_type = DeviceType("GPU");
group.num_tasks = 1;
group.group_size = kNumGpus;
std::unordered_set<int> clique1 = {0, 1, 6, 7};
for (int gpu_idx = 0; gpu_idx < kNumGpus; ++gpu_idx) {
CollGroupMember member;
member.task = "/job:localhost/replica:0/task:0";
member.device.set_name(strings::StrCat(
"/job:localhost/replica:0/task:0/device:GPU:", gpu_idx));
for (int link_idx = 0; link_idx < kNumGpus; ++link_idx) {
if (gpu_idx == link_idx) continue;
bool gpu_in_clique1 = clique1.find(gpu_idx) != clique1.end();
bool link_in_clique1 = clique1.find(link_idx) != clique1.end();
if ((gpu_in_clique1 && link_in_clique1) ||
(!gpu_in_clique1 && !link_in_clique1)) {
LocalLinks* links = member.device.mutable_locality()->mutable_links();
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(link_idx);
ilink->set_strength(2);
} else if ((gpu_idx == 3 && link_idx == 7) ||
(gpu_idx == 7 && link_idx == 3)) {
LocalLinks* links = member.device.mutable_locality()->mutable_links();
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(link_idx);
ilink->set_strength(1);
}
}
group.members.push_back(member);
}
RunCompleteDefaultRanking(group, {1, 3, 5, 7, 6, 4, 2, 0},
{
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:5",
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:0",
});
RunCompleteDefaultRanking(group, {7, 6, 5, 4, 3, 2, 1, 0},
{
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:5",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:0",
});
RunCompleteDefaultRanking(group, {},
{
"/job:localhost/replica:0/task:0/device:GPU:0",
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:5",
});
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsReduction1Task) {
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
cp->group.group_key = 1;
cp->group.group_size = 3;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = 7;
cp->instance.type = REDUCTION_COLLECTIVE;
cp->instance.data_type = DataType(DT_FLOAT);
cp->instance.shape = TensorShape({5});
cp->instance.impl_details.subdiv_offsets.push_back(0);
cp->is_source = false;
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
TF_ASSERT_OK(statuses[i]);
ASSERT_EQ(cps[i]->group.members.size(), 3);
for (int j = 0; j < NUM_DEVS; ++j) {
EXPECT_EQ(
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", j),
cps[i]->group.members[j].device.name());
EXPECT_TRUE(cps[i]->group.members[j].is_local);
}
EXPECT_EQ(cps[i]->instance.impl_details.subdiv_source_rank.size(), 0);
EXPECT_FALSE(cps[i]->is_source);
EXPECT_EQ(cps[i]->default_rank, i);
EXPECT_TRUE(cps[i]->group.same_num_devices_per_task);
cps[i]->Unref();
}
}
void InitializeCollectiveParamsForBroadcast(int instance_key, int device_idx,
bool is_source,
CollectiveParams* cp) {
cp->group.group_key = 1;
cp->group.group_size = 3;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = instance_key;
cp->instance.type = BROADCAST_COLLECTIVE;
cp->instance.data_type = DataType(DT_FLOAT);
cp->instance.shape = TensorShape({5});
cp->instance.impl_details.subdiv_offsets.push_back(0);
cp->is_source = is_source;
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsBroadcast1Task) {
constexpr int kInstanceKey = 5;
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
InitializeCollectiveParamsForBroadcast(kInstanceKey, i, i == 1, cp);
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
TF_ASSERT_OK(statuses[i]);
ASSERT_EQ(cps[i]->group.members.size(), 3);
for (int j = 0; j < NUM_DEVS; ++j) {
EXPECT_EQ(
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", j),
cps[i]->group.members[j].device.name());
EXPECT_TRUE(cps[i]->group.members[j].is_local);
}
EXPECT_EQ(cps[i]->is_source, (i == 1));
EXPECT_EQ(cps[i]->default_rank, i);
EXPECT_TRUE(cps[i]->group.same_num_devices_per_task);
cps[i]->Unref();
}
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsBroadcastForgotSender) {
constexpr int kInstanceKey = 8;
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
InitializeCollectiveParamsForBroadcast(kInstanceKey, i, false, cp);
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
EXPECT_EQ(statuses[i].code(), error::INTERNAL);
EXPECT_EQ(statuses[i].message(),
strings::StrCat(
"Instance ", kInstanceKey,
" found no source for broadcast. This could mean that there"
" were group_size=",
NUM_DEVS, " BcastRecvs but no BcastSend."));
cps[i]->Unref();
}
}
CollectiveParams* MakeCollectiveParams(int group_key, int instance_key,
bool is_source) {
auto* cp = new CollectiveParams();
cp->group.group_key = group_key;
cp->group.group_size = NUM_DEVS;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = instance_key;
cp->instance.type = BROADCAST_COLLECTIVE;
cp->is_source = is_source;
return cp;
}
TEST_F(CollectiveParamResolverLocalTest, AbortPendingGroup) {
CancellationManager cancel_mgr;
std::vector<CollectiveParams*> cp(NUM_DEVS - 1);
BlockingCounter start(NUM_DEVS - 1);
BlockingCounter done(NUM_DEVS - 1);
for (int i = 0; i < NUM_DEVS - 1; ++i) {
Env::Default()->SchedClosure([this, i, &cancel_mgr, &cp, &start, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams( 100, 100,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i], &cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(),
absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.DecrementCount();
cp->Unref();
});
start.DecrementCount();
});
}
start.Wait();
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
}
TEST_F(CollectiveParamResolverLocalTest, AbortPendingInstance) {
CancellationManager cancel_mgr;
std::vector<CollectiveParams*> cp(NUM_DEVS);
int group_key = 100;
int instance_key = 100;
{
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i,
&cancel_mgr, &cp, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i],
&cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(), error::OK);
done.DecrementCount();
cp->Unref();
});
});
}
done.Wait();
}
BlockingCounter start(NUM_DEVS - 1);
BlockingCounter done(NUM_DEVS - 1);
for (int i = 0; i < NUM_DEVS - 1; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i, &cancel_mgr,
&cp, &start, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key + 1,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i], &cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(),
absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.DecrementCount();
cp->Unref();
});
start.DecrementCount();
});
}
start.Wait();
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsAfterAbortion) {
CancellationManager cancel_mgr;
int group_key = 100;
int instance_key = 100;
{
std::vector<CollectiveParams*> cp(NUM_DEVS);
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i,
&cancel_mgr, &cp, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i],
&cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(), error::OK);
done.DecrementCount();
cp->Unref();
});
});
}
done.Wait();
}
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
auto complete_params = [this, &cancel_mgr](int group_key, int instance_key) {
string device = "/job:localhost/replica:0/task:0/device:CPU:0";
Notification done;
auto* cp = MakeCollectiveParams(group_key, instance_key,
true);
core::ScopedUnref unref(cp);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp, &cancel_mgr,
[&done](const Status& s) {
EXPECT_EQ(s.code(), absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.Notify();
});
done.WaitForNotification();
};
complete_params(group_key, instance_key);
complete_params(group_key, instance_key + 1);
complete_params(group_key + 1, instance_key + 1);
}
TEST_F(CollectiveParamResolverLocalTest, AbortNormalCompleteParamsAsync) {
CancellationManager cancel_mgr;
std::atomic<int64_t> num_ok{0};
for (int cnt = 0; cnt < 100; ++cnt) {
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
Env::Default()->SchedClosure(
[this, i, device, &num_ok, &cancel_mgr, &done] {
int key = 100;
while (true) {
Status status;
Notification n;
auto* cp =
MakeCollectiveParams( key, key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
&cancel_mgr,
[&status, &n](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
cp->Unref();
if (!status.ok()) {
EXPECT_EQ(status.code(), absl::StatusCode::kAborted);
EXPECT_EQ(status.message(), "__aborted__");
done.DecrementCount();
return;
}
++num_ok;
++key;
}
});
}
int64_t delay_ms = random::New64() % 50000;
Env::Default()->SleepForMicroseconds(delay_ms);
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
ResetParamResolver(ConfigProto());
}
EXPECT_GT(num_ok.load(), 50);
}
} |
1,596 | cpp | tensorflow/tensorflow | placer_inspection_required_ops_utils | tensorflow/core/common_runtime/placer_inspection_required_ops_utils.cc | tensorflow/core/common_runtime/placer_inspection_required_ops_utils_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PLACER_INSPECTION_REQUIRED_OPS_UTILS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_PLACER_INSPECTION_REQUIRED_OPS_UTILS_H_
#include <vector>
#include "absl/types/optional.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class PlacerInspectionRequiredOpChecker {
public:
PlacerInspectionRequiredOpChecker(const Graph* graph,
const FunctionLibraryDefinition* flib_def);
Status IsPlacerInspectionRequired(const Node& node, bool* is_deep);
private:
const Graph& graph_;
const FunctionLibraryDefinition& flib_def_;
std::vector<absl::optional<bool>> cache_;
};
Status GetFunctionDefAndAttrs(const FunctionLibraryDefinition& flib_def,
const Node& node,
core::RefCountPtr<FunctionRecord>* fdef,
NameAttrList* func);
class FunctionStack {
public:
explicit FunctionStack(const string& function_name);
FunctionStack Push(const Node* node_in_current_function,
const string& new_current_function) const;
bool HasFunction(const string& function_name) const;
const string& current_function_name() const { return current_function_name_; }
string FormatForError() const;
private:
struct Frame {
Frame(const string& function, const Node* node)
: function_name(function), node(node) {}
string function_name;
const Node* node;
};
string current_function_name_;
std::vector<Frame> frames_;
};
Status IsolatePlacerInspectionRequiredOps(
const FunctionLibraryDefinition& flib_def, Graph* graph);
}
#endif
#include "tensorflow/core/common_runtime/placer_inspection_required_ops_utils.h"
#include <unordered_map>
#include <unordered_set>
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/refcount.h"
namespace tensorflow {
namespace {
bool IsFunctionCall(const Node& node) {
const string& op_type = node.op_def().name();
return op_type == "PartitionedCall" || op_type == "StatefulPartitionedCall";
}
Status Set(const Node& node, bool value, bool* is_deep,
std::vector<absl::optional<bool>>* cache) {
*is_deep = value;
(*cache)[node.id()] = value;
return absl::OkStatus();
}
}
PlacerInspectionRequiredOpChecker::PlacerInspectionRequiredOpChecker(
const Graph* graph, const FunctionLibraryDefinition* flib_def)
: graph_(*graph), flib_def_(*flib_def) {
cache_.resize(graph_.num_node_ids());
}
Status PlacerInspectionRequiredOpChecker::IsPlacerInspectionRequired(
const Node& node, bool* is_deep) {
if (cache_[node.id()].has_value()) {
*is_deep = cache_[node.id()].value();
return absl::OkStatus();
}
if (!IsFunctionCall(node)) {
return Set(node, false, is_deep, &cache_);
}
core::RefCountPtr<FunctionRecord> fdef;
NameAttrList func;
TF_RETURN_IF_ERROR(GetFunctionDefAndAttrs(flib_def_, node, &fdef, &func));
DataTypeVector types;
TF_RETURN_IF_ERROR(OutputTypesForNode(AttrSlice(&func.attr()),
fdef->fdef().signature(), &types));
for (DataType type : types) {
if (type == DT_RESOURCE) {
return Set(node, true, is_deep, &cache_);
}
}
return Set(node, false, is_deep, &cache_);
}
Status GetFunctionDefAndAttrs(const FunctionLibraryDefinition& flib_def,
const Node& node,
core::RefCountPtr<FunctionRecord>* fdef,
NameAttrList* func) {
TF_RETURN_IF_ERROR(GetNodeAttr(node.def(), "f", func));
const string& function_name = func->name();
*fdef = flib_def.FindRecord(function_name);
if (*fdef == nullptr) {
return errors::InvalidArgument(
"Failed to find function \"", function_name,
"\" in function library: ", flib_def.ToProto().DebugString());
}
return absl::OkStatus();
}
FunctionStack::FunctionStack(const string& function_name)
: current_function_name_(function_name) {}
FunctionStack FunctionStack::Push(const Node* node_in_current_function,
const string& new_current_function) const {
FunctionStack new_stack(new_current_function);
new_stack.frames_ = frames_;
new_stack.frames_.emplace_back(current_function_name_,
node_in_current_function);
return new_stack;
}
bool FunctionStack::HasFunction(const string& function_name) const {
if (current_function_name_ == function_name) {
return true;
}
for (const Frame& frame : frames_) {
if (frame.function_name == function_name) {
return true;
}
}
return false;
}
string FunctionStack::FormatForError() const {
std::vector<string> msgs;
for (int i = 0; i < frames_.size(); ++i) {
if (frames_[i].function_name.empty()) {
msgs.push_back(absl::StrCat("Graph contains node ",
FormatNodeForError(*frames_[i].node)));
} else {
msgs.push_back(absl::StrCat(
"Function ", errors::FormatFunctionForError(frames_[i].function_name),
" contains node ", FormatNodeForError(*frames_[i].node)));
}
const string& fname = (i + 1 < frames_.size())
? frames_[i + 1].function_name
: current_function_name_;
msgs.push_back(absl::StrCat("Node ", FormatNodeForError(*frames_[i].node),
" calls function ",
errors::FormatFunctionForError(fname)));
}
return absl::StrJoin(msgs, "\n ");
}
namespace {
using OutputEdgeMap = std::vector<std::vector<const Edge*>>;
constexpr char kIdentityOp[] = "Identity";
string Uniquify(const string& candidate_name,
std::unordered_set<string>* node_names) {
if (node_names->find(candidate_name) == node_names->end()) {
node_names->insert(candidate_name);
return candidate_name;
}
for (int counter = 0;; ++counter) {
string candidate = absl::StrCat(candidate_name, "_", counter);
if (node_names->find(candidate) == node_names->end()) {
node_names->insert(candidate);
return candidate;
}
}
}
Status AddInputIdentity(Node* node, int input_idx, Graph* graph,
std::unordered_set<string>* node_names) {
const Edge* edge;
TF_RETURN_IF_ERROR(node->input_edge(input_idx, &edge));
string identity_name = Uniquify(
absl::StrCat(edge->src()->name(), "_", node->name()), node_names);
NodeDefBuilder builder(identity_name, kIdentityOp);
builder.Attr("T", node->input_type(input_idx));
NodeDefBuilder::NodeOut input(edge->src()->name(), edge->src_output(),
node->input_type(input_idx));
builder.Input(input);
NodeDef identity_def;
TF_RETURN_IF_ERROR(builder.Finalize(&identity_def));
MergeDebugInfo(NodeDebugInfo(*node), &identity_def);
VLOG(6) << "Adding identity into " << edge->src()->name() << ":"
<< edge->src_output() << " -> " << edge->dst()->name() << ":"
<< input_idx << " \n"
<< identity_def.DebugString();
TF_ASSIGN_OR_RETURN(Node * identity_node, graph->AddNode(identity_def));
graph->AddEdge(edge->src(), edge->src_output(), identity_node, 0);
TF_RETURN_IF_ERROR(graph->UpdateEdge(identity_node, 0, node, input_idx));
VLOG(6) << "Successfully inserted identity. Modified node: \n"
<< node->DebugString();
return absl::OkStatus();
}
struct EdgePtrCompare {
bool operator()(const Edge* lhs, const Edge* rhs) const {
return lhs->id() < rhs->id();
}
};
Status AddOutputIdentities(Node* node, Graph* graph,
std::unordered_set<string>* node_names) {
auto add_identity = [&](int src_output, const string& identity_name,
Node** identity_node) {
NodeDefBuilder builder(identity_name, kIdentityOp);
builder.Attr("T", node->output_type(src_output));
NodeDefBuilder::NodeOut input(node->name(), src_output,
node->output_type(src_output));
builder.Input(input);
NodeDef identity_def;
TF_RETURN_IF_ERROR(builder.Finalize(&identity_def));
MergeDebugInfo(NodeDebugInfo(*node), &identity_def);
TF_ASSIGN_OR_RETURN(*identity_node, graph->AddNode(identity_def));
graph->AddEdge(node, src_output, *identity_node, 0);
return absl::OkStatus();
};
std::vector<bool> output_used(node->num_outputs(), false);
const EdgeSet& out_edges = node->out_edges();
std::vector<const Edge*> edge_vector(out_edges.begin(), out_edges.end());
std::sort(edge_vector.begin(), edge_vector.end(), EdgePtrCompare());
for (const Edge* edge : edge_vector) {
if (edge->IsControlEdge()) {
continue;
}
output_used[edge->src_output()] = true;
Node* dst = edge->dst();
int dst_input = edge->dst_input();
int src_output = edge->src_output();
string identity_name =
Uniquify(absl::StrCat(node->name(), "_", dst->name()), node_names);
Node* identity_node;
TF_RETURN_IF_ERROR(add_identity(src_output, identity_name, &identity_node));
VLOG(6) << "Adding identity into " << node->name() << ":" << src_output
<< " -> " << dst->name() << ":" << dst_input << " \n"
<< identity_node->DebugString();
TF_RETURN_IF_ERROR(graph->UpdateEdge(identity_node, 0, dst, dst_input));
}
for (int output_idx = 0; output_idx < node->num_outputs(); ++output_idx) {
if (output_used[output_idx]) {
continue;
}
string identity_name = Uniquify(node->name(), node_names);
Node* identity_node;
TF_RETURN_IF_ERROR(add_identity(output_idx, identity_name, &identity_node));
VLOG(6) << "Added identity into " << node->name() << ":" << output_idx
<< " -> <no consumer>: \n"
<< identity_node->DebugString();
}
return absl::OkStatus();
}
Status IsolateNode(Node* node, Graph* graph) {
std::unordered_set<string> node_names(graph->num_nodes());
for (Node* n : graph->nodes()) {
node_names.insert(n->name());
}
for (int i = 0; i < node->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(AddInputIdentity(node, i, graph, &node_names));
}
TF_RETURN_IF_ERROR(AddOutputIdentities(node, graph, &node_names));
return absl::OkStatus();
}
}
Status IsolatePlacerInspectionRequiredOps(
const FunctionLibraryDefinition& flib_def, Graph* graph) {
PlacerInspectionRequiredOpChecker checker(graph, &flib_def);
for (Node* node : graph->op_nodes()) {
bool should_be_isolated = false;
TF_RETURN_IF_ERROR(
checker.IsPlacerInspectionRequired(*node, &should_be_isolated));
if (!should_be_isolated) {
continue;
}
TF_RETURN_IF_ERROR(IsolateNode(node, graph));
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/placer_inspection_required_ops_utils.h"
#include <map>
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
void VerifyPlacerInspectionRequiredOps(const GraphDef& graph_def,
std::map<string, bool> deep_nodes) {
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), graph_def.library());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
PlacerInspectionRequiredOpChecker checker(&graph, &flib_def);
std::unordered_map<string, Node*> node_map = graph.BuildNodeNameIndex();
for (const auto& entry : deep_nodes) {
const Node* node = node_map[entry.first];
ASSERT_NE(node, nullptr) << "Failed to find node " << entry.first
<< " in the graph " << graph_def.DebugString();
const bool expected_is_deep = entry.second;
bool actual_is_deep;
TF_EXPECT_OK(checker.IsPlacerInspectionRequired(*node, &actual_is_deep));
EXPECT_EQ(expected_is_deep, actual_is_deep)
<< " Expected is_deep to be " << expected_is_deep << " for node "
<< entry.first;
}
}
TEST(PlacerInspectionRequiredOpCheckerTest, Basic) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph_def = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
},
{func});
VerifyPlacerInspectionRequiredOps(graph_def,
{{"x", false}, {"f", true}, {"y", false}});
}
TEST(PlacerInspectionRequiredOpCheckerTest, DirectCallsAreNotDeep) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph_def = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "ResourceIdentity", {"x"}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
},
{func});
VerifyPlacerInspectionRequiredOps(graph_def,
{{"x", false}, {"f", false}, {"y", false}});
}
TEST(PlacerInspectionRequiredOpCheckerTest,
FunctionsNotReturningResourcesAreNotDeep) {
FunctionDef func = test::function::ReadResourceVariable();
GraphDef graph_def = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("ReadResourceVariable", {})}}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_FLOAT}}),
},
{func});
VerifyPlacerInspectionRequiredOps(graph_def,
{{"x", false}, {"f", false}, {"y", false}});
}
} |
1,597 | cpp | tensorflow/tensorflow | optimize_function_graph_utils | tensorflow/core/common_runtime/optimize_function_graph_utils.cc | tensorflow/core/common_runtime/optimize_function_graph_utils_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_OPTIMIZE_FUNCTION_GRAPH_UTILS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_OPTIMIZE_FUNCTION_GRAPH_UTILS_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
static const char kGraphCachingEnvVariableName[] = "TF_GRAPH_CACHING";
constexpr absl::Duration kCachingThresholdDuration = absl::Seconds(3);
Status PinArgsAndRets(const std::vector<string>& input_devices,
const std::vector<string>& output_devices,
const DeviceSet& device_set,
const std::vector<Node*>& arg_nodes,
const std::vector<Node*>& ret_nodes,
const FunctionLibraryDefinition* lib_def,
Device* default_device);
absl::StatusOr<OptimizedFunctionGraphInfo> OptimizeFunctionGraph(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Device* default_device, Env* env,
OptimizedFunctionGraph::OptimizationSource optimization_source);
absl::StatusOr<OptimizedFunctionGraphInfo>
OptimizeFunctionGraphOrReadFromFileCache(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Device* default_device, Env* env,
absl::Duration caching_threshold_duration = kCachingThresholdDuration);
absl::StatusOr<
std::unique_ptr<std::unordered_map<string, std::unique_ptr<Graph>>>>
PreprocessAndPartitionGraph(
const std::string& function_name,
OptimizedFunctionGraphInfo& input_optimized_graph,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Env* env);
}
#endif
#include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include <algorithm>
#include <cstdlib>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/optimized_function_graph.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/host_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
Status ValidateNoListArguments(
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args, const char* arg_type,
const string& function_name) {
for (const OpDef::ArgDef& arg : args) {
if (!arg.number_attr().empty() || !arg.type_list_attr().empty()) {
return errors::InvalidArgument(
"Function ", function_name, " has an ", arg_type, " named \"",
arg.name(),
"\" that is a list of tensors."
" Multi-device functions support only single-tensor inputs "
" and outputs");
}
}
return absl::OkStatus();
}
Status ValidateMultiDeviceOptions(
const FunctionDef& fdef,
const FunctionLibraryRuntime::InstantiateOptions& options) {
const OpDef& signature = fdef.signature();
TF_RETURN_IF_ERROR(ValidateNoListArguments(signature.input_arg(), "input",
signature.name()));
TF_RETURN_IF_ERROR(ValidateNoListArguments(signature.output_arg(), "output",
signature.name()));
if (fdef.attr().count(FunctionLibraryDefinition::kIntsOnDeviceAttr) != 0 &&
fdef.attr().at(FunctionLibraryDefinition::kIntsOnDeviceAttr).b()) {
return errors::Unimplemented(
"Function '", signature.name(), "' has `",
FunctionLibraryDefinition::kIntsOnDeviceAttr,
"` attribute set. This attribute is not currently supported by "
"multi-device functions.");
}
if (options.input_devices.size() != signature.input_arg_size()) {
return errors::InvalidArgument(
"InstantiateOptions.input_devices must have the same length "
"as the number of arguments: input_devices length = ",
options.input_devices.size(),
" number of arguments = ", signature.input_arg_size());
}
if (!options.output_devices.empty() &&
options.output_devices.size() != signature.output_arg_size()) {
return errors::InvalidArgument(
"InstantiateOptions.output_devices must either be empty or have the "
"same length as the number of arguments: output_devices length = ",
options.output_devices.size(),
" number of arguments = ", signature.output_arg_size());
}
return absl::OkStatus();
}
Status SetArgShape(const std::unordered_map<int, DtypeAndPartialTensorShape>&
input_resource_dtypes_and_shapes,
const std::vector<Node*>& arg_nodes) {
for (Node* n : arg_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
if (dtype == DT_RESOURCE) {
auto dtype_and_shape_iter = input_resource_dtypes_and_shapes.find(index);
if (dtype_and_shape_iter != input_resource_dtypes_and_shapes.end()) {
AttrValue dtype_attr_value;
dtype_attr_value.mutable_list()->add_type(
dtype_and_shape_iter->second.dtype);
n->AddAttr("_handle_dtypes", dtype_attr_value);
TensorShapeProto shape_proto;
dtype_and_shape_iter->second.shape.AsProto(&shape_proto);
AttrValue shape_attr_value;
*shape_attr_value.mutable_list()->add_shape() = shape_proto;
n->AddAttr("_handle_shapes", shape_attr_value);
}
}
}
return absl::OkStatus();
}
const string* AssignedOrRequestedDeviceName(const Node& node) {
if (node.has_assigned_device_name()) {
return &node.assigned_device_name();
}
return &node.requested_device();
}
void GetColocationGroup(const Node* node, string* group) {
static const StringPiece kColocationAttrNameStringPiece(kColocationAttrName);
const AttrValue* attr_value =
node->attrs().Find(kColocationAttrNameStringPiece);
if (attr_value != nullptr && attr_value->has_list() &&
attr_value->list().s_size() > 0) {
*group = attr_value->list().s(0);
}
}
Status WriteToCache(const std::string& dir_name, const std::string& file_name,
OptimizedFunctionGraphInfo& optimized_function_graph_info,
Env* env) {
const absl::Time cache_writing_start_time = absl::Now();
OptimizedFunctionGraph optimized_function_graph_proto;
string optimized_function_graph_proto_str;
optimized_function_graph_proto =
OptimizedFunctionGraphInfo::ToProto(optimized_function_graph_info);
optimized_function_graph_proto.SerializeToString(
&optimized_function_graph_proto_str);
if (!env->FileExists(dir_name).ok()) {
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir_name));
}
{
bool has_atomic_move = false;
TF_RETURN_IF_ERROR(env->HasAtomicMove(dir_name, &has_atomic_move));
if (!has_atomic_move) {
LOG_EVERY_POW_2(WARNING)
<< "Filesystem for OptimizedFunctionGraphInfo persistent cache at "
<< dir_name
<< " does not support atomic moves. Therefore the "
"persistent cache is racy if you have multiple optimizations "
"occurring simultaneously!";
}
}
std::string temp_file_name = file_name;
if (!env->CreateUniqueFileName(&temp_file_name, ".pb.tmp")) {
return absl::UnavailableError(
absl::StrCat("Could not create a unique file inside ", dir_name));
}
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(
env, temp_file_name, optimized_function_graph_proto_str));
TF_RETURN_IF_ERROR(env->RenameFile(temp_file_name, file_name));
const absl::Duration cache_writing_duration =
absl::Now() - cache_writing_start_time;
VLOG(3) << "Finished writing Tensorflow optimized graph into cache; took "
<< absl::ToInt64Milliseconds(cache_writing_duration)
<< " msecs, file name: " << file_name;
return absl::OkStatus();
}
absl::StatusOr<OptimizedFunctionGraphInfo> ReadFromCache(
const string& file_name, Env* env) {
absl::Time cache_reading_start_time = absl::Now();
OptimizedFunctionGraph optimized_function_graph_proto;
string optimized_function_graph_proto_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
env, file_name, &optimized_function_graph_proto_str));
optimized_function_graph_proto.ParseFromString(
optimized_function_graph_proto_str);
TF_ASSIGN_OR_RETURN(absl::StatusOr<OptimizedFunctionGraphInfo>
optimized_function_graph_info_restored,
OptimizedFunctionGraphInfo::FromProto(
std::move(optimized_function_graph_proto)));
const absl::Duration cache_reading_duration =
absl::Now() - cache_reading_start_time;
VLOG(3) << "Finished reading Tensorflow optimized graph from cache; took "
<< absl::ToInt64Milliseconds(cache_reading_duration) << " msecs";
return optimized_function_graph_info_restored;
}
string GetFileCacheName(const string& dir_name, const string& function_name,
const FunctionDef* fdef) {
string plain_func_name = function_name;
if (absl::StrContains(function_name, "_")) {
std::vector<string> func_name_tokens = absl::StrSplit(function_name, '_');
func_name_tokens.pop_back();
plain_func_name = absl::StrJoin(func_name_tokens, "_");
}
return absl::StrCat(dir_name, "/", tsl::port::JobName(), "_",
tsl::port::TaskId(), "_", plain_func_name, "_",
fdef->node_def_size());
}
Status GetGraphAndArgRets(const string& function_name, AttrSlice attrs,
core::RefCountPtr<FunctionRecord>&& fdef,
const FunctionLibraryDefinition* lib_def,
std::unique_ptr<Graph>* graph,
std::vector<Node*>* arg_nodes,
std::vector<Node*>* ret_nodes,
std::vector<string>* ret_node_names,
DataTypeVector* ret_types,
std::vector<string>* control_ret_node_names) {
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(std::move(fdef), attrs, lib_def, &fbody));
if (!fbody) {
LOG(ERROR) << "Failed to get FunctionBody for \"" << function_name << "\"";
return errors::Internal("Failed to construct FunctionBody for ",
function_name);
}
*graph = std::unique_ptr<Graph>(fbody->graph);
arg_nodes->reserve(fbody->arg_nodes.size());
std::copy(fbody->arg_nodes.begin(), fbody->arg_nodes.end(),
std::back_inserter(*arg_nodes));
ret_nodes->reserve(fbody->ret_nodes.size());
std::copy(fbody->ret_nodes.begin(), fbody->ret_nodes.end(),
std::back_inserter(*ret_nodes));
fbody->graph = nullptr;
ret_node_names->reserve(fbody->ret_nodes.size());
for (const Node* node : fbody->ret_nodes) {
ret_node_names->push_back(node->name());
}
for (const auto& ret_type : fbody->ret_types) {
ret_types->push_back(ret_type);
}
control_ret_node_names->reserve(fbody->control_ret_nodes.size());
for (const Node* node : fbody->control_ret_nodes) {
control_ret_node_names->push_back(node->name());
}
return absl::OkStatus();
}
}
Status PinArgsAndRets(const std::vector<string>& input_devices,
const std::vector<string>& output_devices,
const DeviceSet& device_set,
const std::vector<Node*>& arg_nodes,
const std::vector<Node*>& ret_nodes,
const FunctionLibraryDefinition* lib_def,
Device* default_device) {
for (Node* node : arg_nodes) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int64_t index = attr_value->i();
node->set_assigned_device_name(input_devices[index]);
}
for (Node* node : ret_nodes) {
if (output_devices.empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "T", &dtype));
VLOG(3) << "Trying to determine device for node " << node->name()
<< "[T=" << DataTypeString(dtype) << "]";
for (const auto& it : node->in_edges()) {
if (it->IsControlEdge()) continue;
Node* src_node = it->src();
const string* src_device = AssignedOrRequestedDeviceName(*src_node);
string colocation_group = "";
GetColocationGroup(src_node, &colocation_group);
VLOG(3) << "Considering src: " << src_node->name()
<< " src_device: " << *src_device
<< " colo group: " << colocation_group;
while (src_device->empty() && colocation_group.empty() &&
src_node->IsIdentity()) {
Node* input_node;
TF_RETURN_IF_ERROR(src_node->input_node(0, &input_node));
src_node = input_node;
src_device = AssignedOrRequestedDeviceName(*src_node);
GetColocationGroup(src_node, &colocation_group);
VLOG(3) << "Considering src: " << src_node->name()
<< " src_device: " << *src_device
<< " colo group: " << colocation_group;
}
const bool can_use_src_node_device =
!(dtype == DT_RESOURCE && IsFunctionCall(*lib_def, *src_node));
if (!colocation_group.empty()) {
AttrValue::ListValue colo_attr;
colo_attr.add_s(colocation_group);
std::vector<string> colo_slice = {colocation_group};
node->AddAttr(kColocationAttrName, colo_slice);
} else if (!src_device->empty() && can_use_src_node_device) {
if (dtype == DT_VARIANT && !src_node->IsArg()) {
continue;
}
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(*src_device, &parsed)) {
return errors::InvalidArgument(
"Failed to parse explicit device specification ", *src_device);
}
std::vector<Device*> matching_devices;
device_set.FindMatchingDevices(parsed, &matching_devices);
if (matching_devices.empty()) {
if (default_device != nullptr) {
matching_devices.push_back(default_device);
} else {
return errors::InvalidArgument(
"Unable to find any devices for spec ", *src_device);
}
} else if (matching_devices.size() != 1) {
bool on_same_task = true;
for (int i = 1; i < matching_devices.size(); ++i) {
if (!DeviceNameUtils::IsSameAddressSpace(
matching_devices.at(0)->parsed_name(),
matching_devices.at(i)->parsed_name())) {
on_same_task = false;
break;
}
}
if (on_same_task) {
continue;
}
if (default_device != nullptr) {
int colocated_on_default_device = 0;
for (int i = 0; i < matching_devices.size(); ++i) {
if (DeviceNameUtils::IsSameAddressSpace(
default_device->parsed_name(),
matching_devices.at(i)->parsed_name())) {
colocated_on_default_device++;
}
}
if (colocated_on_default_device == 1) {
continue;
}
}
string devices = "[";
for (Device* device : matching_devices) {
devices.append(device->name());
devices.append(", ");
}
if (devices.size() > 2) {
devices.resize(devices.size() - 2);
}
devices.append("]");
return errors::InvalidArgument(
*src_device,
"When FunctionLibraryRuntime::Options.output_devices are "
"not specified for a multi-device function, the device "
"specification on the output node must match exactly one "
"device. Matched devices are ",
devices);
}
VLOG(3) << "Setting output device to " << matching_devices[0]->name()
<< " for node " << SummarizeNode(*node);
node->set_assigned_device_name(matching_devices[0]->name());
} else if (!src_device->empty() && !can_use_src_node_device) {
VLOG(3) << "Did not set device for a resource output node "
<< SummarizeNode(*node);
}
}
} else {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int64_t index = attr_value->i();
DCHECK_GT(output_devices.size(), index);
VLOG(3) << "Setting output device to " << output_devices[index]
<< " for return at index " << index;
node->set_assigned_device_name(output_devices[index]);
}
}
return absl::OkStatus();
}
absl::StatusOr<OptimizedFunctionGraphInfo> OptimizeFunctionGraph(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Device* default_device, Env* env,
OptimizedFunctionGraph::OptimizationSource optimization_source) {
const uint64_t graph_optimization_start_time_usecs = env->NowMicros();
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? input_lib_def : options.lib_def;
core::RefCountPtr<FunctionRecord> fdef = lib_def->FindRecord(function_name);
if (fdef == nullptr) {
return errors::InvalidArgument("Failed to find function \"", function_name,
"\" in function library: ", lib_def);
}
TF_RETURN_IF_ERROR(ValidateMultiDeviceOptions(fdef->fdef(), options));
std::unique_ptr<Graph> graph;
std::vector<Node*> arg_nodes, ret_nodes;
std::vector<string> ret_node_names;
DataTypeVector ret_types;
std::vector<string> control_ret_node_names;
TF_RETURN_IF_ERROR(GetGraphAndArgRets(
function_name, attrs, fdef.GetNewRef(), lib_def, &graph, &arg_nodes,
&ret_nodes, &ret_node_names, &ret_types, &control_ret_node_names));
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
function_name, kDebugGroupOpStacktrace, "before_opt", graph.get());
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
FunctionLibraryDefinition reachable_lib_def =
lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
if (options.graph_collector != nullptr) {
options.graph_collector->CollectRawGraph(graph_def);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain, "initial",
graph.get(), &reachable_lib_def, false);
if (!options.xla_compile_device_type.empty()) {
for (Node* node : graph->op_nodes()) {
node->AddAttr("_xla_compile_device_type",
options.xla_compile_device_type);
if (default_device) {
node->set_assigned_device_name(default_device->name());
}
}
}
TF_RETURN_IF_ERROR(
SetArgShape(options.input_resource_dtypes_and_shapes, arg_nodes));
TF_RETURN_IF_ERROR(PinArgsAndRets(
options.input_devices, options.output_devices, dev_set, arg_nodes,
ret_nodes, lib_def,
options.config_proto.allow_soft_placement() ? default_device : nullptr));
graph->mutable_flib_def()->set_default_registry(&reachable_lib_def);
graph->mutable_flib_def()->Clear();
const bool should_run_optimization_passes = !options.is_component_function;
if (!should_run_optimization_passes) {
VLOG(1) << "Skipping function/graph optimization passes when instantiating "
"component function "
<< function_name;
}
std::unordered_map<string, string> node_name_to_control_ret;
bool control_rets_updated = false;
if (should_run_optimization_passes) {
FunctionOptimizationPass::FunctionOptions function_options{
options.xla_compile_device_type, options.allow_soft_placement};
TF_RETURN_IF_ERROR(FunctionOptimizationPassRegistry::Global().Run(
function_name, dev_set, options.config_proto, function_options, &graph,
&reachable_lib_def, &control_ret_node_names, &control_rets_updated));
}
if (control_rets_updated) {
for (const auto& control_ret : control_ret_node_names) {
node_name_to_control_ret.emplace(control_ret, control_ret);
}
} else {
for (const auto& control_ret : fdef->fdef().control_ret()) {
node_name_to_control_ret.emplace(control_ret.second, control_ret.first);
}
}
GraphOptimizationPassOptions optimization_options;
SessionOptions session_options;
session_options.env = env;
session_options.config = options.config_proto;
optimization_options.session_options = &session_options;
optimization_options.graph = &graph;
optimization_options.flib_def = &reachable_lib_def;
optimization_options.device_set = &dev_set;
optimization_options.is_function_graph = true;
optimization_options.composite_devices = &composite_devices;
optimization_options.default_function_device = default_device;
optimization_options.function_def = &fdef->fdef();
optimization_options.shape_inference_on_tfe_dialect_import =
options.shape_inference_on_tfe_dialect_import;
optimization_options.debug_filename_prefix = function_name;
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_pre_placement_passes", graph.get(),
&reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::PRE_PLACEMENT, optimization_options));
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_placer", graph.get(),
&reachable_lib_def, false);
Placer placer(graph.get(), function_name, optimization_options.flib_def,
&dev_set, default_device,
options.config_proto.allow_soft_placement(),
options.config_proto.log_device_placement());
TF_RETURN_IF_ERROR(placer.Run(optimization_options));
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_post_placement_passes", graph.get(),
&reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PLACEMENT, optimization_options));
}
if (options.optimize_graph_fn) {
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_graph_optimization", graph.get(),
&reachable_lib_def, false);
Status status = options.optimize_graph_fn(
std::move(ret_node_names), std::move(control_ret_node_names),
&reachable_lib_def, dev_set, cpu_device, &graph);
if (!status.ok()) {
LOG(WARNING) << "Ignoring multi-device function optimization failure: "
<< status;
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"after_graph_optimization", graph.get(),
&reachable_lib_def, false);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_post_rewrite_for_exec_passes", | #include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
constexpr absl::string_view kDevicePrefix = "/job:a/replica:0/task:0/device:";
void CreateCpuDeviceList(absl::string_view name_prefix, int num_devices,
std::vector<std::unique_ptr<Device>>& devices) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", num_devices});
TF_ASSERT_OK(
DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0", &devices));
}
void TestOptimizeFunctionGraphWithFunctionNotFound(bool load_from_cache) {
FunctionLibraryRuntime::InstantiateOptions opts;
opts.is_multi_device_function = true;
auto lib_def =
std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global());
std::vector<std::unique_ptr<Device>> devices;
CreateCpuDeviceList(kDevicePrefix, 1, devices);
DeviceSet device_set;
for (const auto& device : devices) {
device_set.AddDevice(device.get());
}
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_function_graph_info;
if (load_from_cache) {
optimized_function_graph_info = OptimizeFunctionGraphOrReadFromFileCache(
"FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[0].get(),
Env::Default(), absl::ZeroDuration());
} else {
optimized_function_graph_info = OptimizeFunctionGraph(
"FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[0].get(),
Env::Default(), OptimizedFunctionGraph::AOT);
}
EXPECT_TRUE(absl::IsInvalidArgument(optimized_function_graph_info.status()))
<< "Actual status: " << optimized_function_graph_info.status();
EXPECT_TRUE(
absl::StrContains(optimized_function_graph_info.status().message(),
"Failed to find function"))
<< "Actual error message: "
<< optimized_function_graph_info.status().message();
}
TEST(OptimizeFunctionGraphTest,
OptimizeFunctionGraphReturnsErrorIfNoFunctionFound) {
TestOptimizeFunctionGraphWithFunctionNotFound(false);
}
TEST(OptimizeFunctionGraphTest, OptimizeFunctionGraphReturnsCorrectResult) {
FunctionLibraryRuntime::InstantiateOptions opts;
opts.is_multi_device_function = true;
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::FindDevice();
auto lib_def =
std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(), proto);
std::vector<std::unique_ptr<Device>> devices;
CreateCpuDeviceList(kDevicePrefix, 3, devices);
DeviceSet device_set;
for (const auto& device : devices) {
device_set.AddDevice(device.get());
}
const absl::StatusOr<OptimizedFunctionGraphInfo> aot_result =
OptimizeFunctionGraph("FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(),
devices[1].get(), Env::Default(),
OptimizedFunctionGraph::AOT);
TF_EXPECT_OK(aot_result.status());
EXPECT_EQ(aot_result->name, "FindDevice");
EXPECT_EQ(aot_result->num_return_nodes, 1);
EXPECT_THAT(aot_result->ret_types, ElementsAre(DT_STRING));
EXPECT_GT(aot_result->optimization_duration_usecs, 0);
EXPECT_EQ(aot_result->optimization_source, OptimizedFunctionGraph::AOT);
}
TEST(OptimizeFunctionGraphTest, ReloadFromCacheReturnsErrorIfNoFunctionFound) {
TestOptimizeFunctionGraphWithFunctionNotFound(true);
}
TEST(OptimizeFunctionGraphTest, OptimizeFunctionGraphAndWriteToCache) {
Env* env = Env::Default();
const string temp_dir = "/tmp/testing_cache_direcroty";
EXPECT_TRUE(env->RecursivelyCreateDir(temp_dir).ok());
setenv(kGraphCachingEnvVariableName, temp_dir.c_str(), 1);
std::vector<string> empty_file_list;
TF_ASSERT_OK(
env->GetMatchingPaths(absl::StrCat(temp_dir, "{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::Hours(48));
TF_ASSERT_OK(optimized_info.status());
std::vector<string> file_list;
TF_ASSERT_OK(env->GetMatchingPaths(absl::StrCat(temp_dir, "{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::ZeroDuration());
TF_ASSERT_OK(optimized_info.status());
file_list.clear();
TF_ASSERT_OK(env->GetMatchingPaths(
absl::StrCat(temp_dir, "/_-1_FindDevice_1"), &file_list));
EXPECT_EQ(file_list.size(), 1);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheMissCount(
metrics::GraphOptimizationSource::kJit),
2);
optimized_info = OptimizeFunctionGraphOrReadFromFileCache(
"FindDevice_1234", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::ZeroDuration());
TF_ASSERT_OK(optimized_info.status());
file_list.clear();
TF_ASSERT_OK(env->GetMatchingPaths(
absl::StrCat(temp_dir, "/_-1_FindDevice_1"), &file_list));
EXPECT_EQ(file_list.size(), 1);
EXPECT_GT(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kJit),
1);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheMissCount(
metrics::GraphOptimizationSource::kJit),
2);
EXPECT_EQ(optimized_info->name, "FindDevice_1234");
EXPECT_EQ(optimized_info->num_return_nodes, 1);
EXPECT_THAT(optimized_info->ret_types, ElementsAre(DT_STRING));
int64_t undeleted_files;
int64_t undeleted_dirs;
TF_EXPECT_OK(
env->DeleteRecursively(temp_dir, &undeleted_files, &undeleted_dirs));
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
TF_ASSERT_OK(
env->GetMatchingPaths(absl::StrCat(temp_dir, "/*"), &empty_file_list));
ASSERT_TRUE(empty_file_list.empty());
}
}
} |
1,598 | cpp | tensorflow/tensorflow | device_propagation | tensorflow/core/common_runtime/device_propagation.cc | tensorflow/core/common_runtime/device_propagation_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_PROPAGATION_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_PROPAGATION_H_
#include <functional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace device_propagation {
typedef std::function<bool(StringPiece)> DeviceFilter;
typedef std::function<bool(const Node&)> NodeFilter;
}
void PropagateDevices(const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter,
Graph* graph);
}
#endif
#include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
namespace {
const std::string& AssignedOrRequestedDevice(const Node& node) {
if (!node.assigned_device_name().empty()) {
return node.assigned_device_name();
}
return node.requested_device();
}
bool UpdateDeviceFromInputs(
const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter, Node* node) {
if (!AssignedOrRequestedDevice(*node).empty() || !node_filter(*node)) {
return false;
}
string proposed_device = "";
Node* proposed_src = nullptr;
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
Node* src = e->src();
const string& src_device = AssignedOrRequestedDevice(*src);
if ((node->IsSwitch() && src->IsLoopCond()) ||
(node->IsMerge() && src->IsEnter())) {
continue;
}
if (!device_filter(src_device)) return false;
if (proposed_src == nullptr) {
proposed_device = src_device;
proposed_src = src;
} else if (proposed_device != src_device) {
return false;
}
}
if (proposed_src) {
node->set_assigned_device_name(proposed_src->assigned_device_name());
node->set_requested_device(proposed_src->requested_device());
return true;
} else {
return false;
}
}
}
void PropagateDevices(const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter,
Graph* graph) {
bool nodes_changed = true;
while (nodes_changed) {
nodes_changed = false;
BreadthFirstTraversal(
*graph, {}, [&nodes_changed, &node_filter, &device_filter](Node* node) {
nodes_changed |=
UpdateDeviceFromInputs(node_filter, device_filter, node);
});
}
}
} | #include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status_test_util.h"
using ::testing::UnorderedElementsAreArray;
namespace tensorflow {
namespace {
const char kTpu0[] = "/job:localhost/replica:0/task:0/device:TPU:0";
const char kTpu1[] = "/job:localhost/replica:0/task:0/device:TPU:1";
const char kTpu2[] = "/job:localhost/replica:0/task:0/device:TPU:2";
const char kGpu0[] = "/job:localhost/replica:0/task:0/device:GPU:0";
bool IsTPUDevice(StringPiece device_name) {
return absl::StrContains(device_name, "device:TPU:");
}
device_propagation::NodeFilter TargetOps(
const absl::flat_hash_set<std::string>& ops) {
return [&ops](const Node& n) { return ops.contains(n.type_string()); };
}
absl::flat_hash_map<std::string, std::string> GetNodeNameDevices(
const Graph& graph) {
absl::flat_hash_map<std::string, std::string> node_name_devices;
for (const Node* node : graph.nodes()) {
if (node->IsSource() || node->IsSink()) {
continue;
}
const string& device = node->assigned_device_name().empty()
? node->requested_device()
: node->assigned_device_name();
node_name_devices[node->name()] = device;
}
return node_name_devices;
}
TEST(DevicePropagationTest, PropagateTPUDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu1);
auto c = ops::Identity(scope.WithOpName("C"), a);
auto d =
ops::Merge(scope.WithOpName("D"), std::initializer_list<Input>{a, c});
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{b, c});
auto f = ops::Identity(scope.WithOpName("F"), a);
f.node()->set_assigned_device_name(kTpu2);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity", "Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", kTpu1},
{"C", kTpu0},
{"D", kTpu0},
{"E", ""},
{"F", kTpu2},
}));
}
TEST(DevicePropagationTest, DoNotPropagateToUnsupportedOps) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, DoNotPropagateUnmatchedDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kGpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kGpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, SwitchOpShouldIgnoreLoopCondOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_BOOL);
auto b = ops::LoopCond(scope.WithOpName("B"), a);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_FLOAT);
c.node()->set_assigned_device_name(kTpu2);
auto d = ops::Switch(scope.WithOpName("D"), c, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Switch", "LoopCond"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", ""},
{"C", kTpu2},
{"D", kTpu2},
}));
}
TEST(DevicePropagationTest, MergeOpShouldIgnoreEnterOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu2);
auto c = ops::internal::Enter(scope.WithOpName("C"), a, "Enter");
auto d = ops::NextIteration(scope.WithOpName("D"), b);
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{c, d});
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Enter", "Merge", "NextIteration"}), IsTPUDevice,
&graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", kTpu2},
{"C", ""},
{"D", kTpu2},
{"E", kTpu2},
}));
}
}
} |
1,599 | cpp | tensorflow/tensorflow | lower_if_op | tensorflow/core/common_runtime/lower_if_op.cc | tensorflow/core/common_runtime/lower_if_op_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_LOWER_IF_OP_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_LOWER_IF_OP_H_
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class Graph;
class Node;
Status RewriteIfNode(Node* n, Graph* g, bool keep_node_fetchable);
}
#endif
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
namespace tensorflow {
namespace {
using NodeOut = NodeBuilder::NodeOut;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
class CondBuilder {
public:
enum Branch { kElseBranch = 0, kThenBranch = 1 };
CondBuilder(Node* if_op, const NameAttrList& then_fn,
const NameAttrList& else_fn, bool keep_node_fetchable,
Graph* graph);
Status CreatePivotNodes();
Status AddInputs();
Status AddOutputs();
Status BuildLoweredIfOutput();
private:
string NewName(const string& infix);
Status AddInput(Node* src, int src_output);
Status SetColocationAndFinalize(NodeBuilder node_builder, Graph* graph,
Node** created_node);
std::vector<NodeOut> outputs_;
Node* control_predecessor_;
Node* if_op_;
const AttrValue* coloc_attr_;
Node* lowered_if_output_;
OutputTensor pred_;
Node* pivot_f_;
Node* pivot_t_;
Node* then_call_node_;
Node* else_call_node_;
Node* branch_executed_node_;
Graph* graph_;
string name_;
bool keep_node_fetchable_;
NodeDebugInfo debug_info_;
NodeBuilder then_call_builder_;
NodeBuilder else_call_builder_;
};
CondBuilder::CondBuilder(Node* if_op, const NameAttrList& then_fn,
const NameAttrList& else_fn, bool keep_node_fetchable,
Graph* graph)
: if_op_(if_op),
coloc_attr_(if_op_->attrs().Find(kColocationAttrName)),
graph_(graph),
name_(if_op->name()),
keep_node_fetchable_(keep_node_fetchable),
debug_info_(*if_op_),
then_call_builder_(NewName("then"), then_fn.name(), graph->op_registry(),
&debug_info_),
else_call_builder_(NewName("else"), else_fn.name(), graph->op_registry(),
&debug_info_) {
TF_CHECK_OK(if_op_->input_tensor(0, &pred_));
then_call_builder_.Device(if_op_->requested_device());
then_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : then_fn.attr()) {
then_call_builder_.Attr(i.first, i.second);
}
else_call_builder_.Device(if_op_->requested_device());
else_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : else_fn.attr()) {
else_call_builder_.Attr(i.first, i.second);
}
}
Status CondBuilder::SetColocationAndFinalize(NodeBuilder node_builder,
Graph* graph,
Node** created_node) {
if (coloc_attr_ != nullptr) {
node_builder = node_builder.Attr(kColocationAttrName, *coloc_attr_);
}
return node_builder.Finalize(graph, created_node);
}
Status CondBuilder::CreatePivotNodes() {
Node* switch_pred;
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("switch_pred"), "Switch",
graph_->op_registry(), &debug_info_)
.Input(NodeOut(pred_))
.Input(NodeOut(pred_))
.Device(if_op_->requested_device()),
graph_, &switch_pred));
control_predecessor_ = switch_pred;
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("pivot_f"), "Identity",
graph_->op_registry(), &debug_info_)
.Input(switch_pred, kElseBranch)
.Device(if_op_->requested_device()),
graph_, &pivot_f_));
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("pivot_t"), "Identity",
graph_->op_registry(), &debug_info_)
.Input(switch_pred, kThenBranch)
.Device(if_op_->requested_device()),
graph_, &pivot_t_));
return absl::OkStatus();
}
string CondBuilder::NewName(const string& infix) {
return graph_->NewName(strings::StrCat(name_, "/", infix));
}
Status CondBuilder::AddInput(Node* src, int src_output) {
Node* input;
NodeDebugInfo debug_info(*src);
TF_RETURN_IF_ERROR(
NodeBuilder(NewName(src->name()), "Switch", graph_->op_registry(),
&debug_info)
.Input(src, src_output)
.Input(pred_)
.Device(src->requested_device())
.Attr(kColocationAttrName,
{absl::StrCat(kColocationGroupPrefix, src->name())})
.Finalize(graph_, &input));
then_call_builder_.Input(input, kThenBranch);
else_call_builder_.Input(input, kElseBranch);
return absl::OkStatus();
}
Status CondBuilder::AddInputs() {
std::vector<const Edge*> edges;
TF_RETURN_IF_ERROR(if_op_->input_edges(&edges));
for (int i = 1; i < edges.size(); ++i) {
const Edge* e = edges[i];
TF_RETURN_IF_ERROR(AddInput(e->src(), e->src_output()));
}
for (const Edge* e : if_op_->in_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(e->src(), control_predecessor_);
}
}
return absl::OkStatus();
}
Status CondBuilder::AddOutputs() {
TF_RETURN_IF_ERROR(then_call_builder_.Finalize(graph_, &then_call_node_));
graph_->AddControlEdge(pivot_t_, then_call_node_);
TF_RETURN_IF_ERROR(else_call_builder_.Finalize(graph_, &else_call_node_));
graph_->AddControlEdge(pivot_f_, else_call_node_);
std::vector<Node*> merges(then_call_node_->num_outputs());
outputs_.resize(merges.size());
for (int i = 0; i < then_call_node_->num_outputs(); ++i) {
TF_RETURN_IF_ERROR(SetColocationAndFinalize(
NodeBuilder(NewName("output"), "Merge", graph_->op_registry(),
&debug_info_)
.Input({NodeOut(then_call_node_, i), NodeOut(else_call_node_, i)})
.Device(if_op_->requested_device()),
graph_, &merges[i]));
outputs_[i] = NodeOut(merges[i], 0);
}
TF_RETURN_IF_ERROR(SetColocationAndFinalize(
NodeBuilder(NewName("branch_executed"), "Merge", graph_->op_registry(),
&debug_info_)
.Input({pivot_t_, pivot_f_})
.ControlInputs({then_call_node_, else_call_node_})
.Device(if_op_->requested_device()),
graph_, &branch_executed_node_));
TF_RETURN_IF_ERROR(BuildLoweredIfOutput());
for (const Edge* e : if_op_->out_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(branch_executed_node_, e->dst());
} else {
graph_->AddEdge(merges[e->src_output()], 0, e->dst(), e->dst_input());
}
}
return absl::OkStatus();
}
Status CondBuilder::BuildLoweredIfOutput() {
NodeBuilder builder = keep_node_fetchable_ && !outputs_.empty()
? NodeBuilder(name_, "IdentityN").Input(outputs_)
: NodeBuilder(name_, "NoOp");
return builder.Device(if_op_->requested_device())
.ControlInput(branch_executed_node_)
.Finalize(graph_, &lowered_if_output_);
}
}
Status RewriteIfNode(Node* n, Graph* g, bool keep_node_fetchable) {
VLOG(2) << "Lower If node (keep_node_fetchable=" << keep_node_fetchable
<< "): " << SummarizeNode(*n);
const AttrValue* then_attr = n->attrs().Find("then_branch");
if (then_attr == nullptr) {
return errors::InvalidArgument("Then branch function missing");
}
const AttrValue* else_attr = n->attrs().Find("else_branch");
if (else_attr == nullptr) {
return errors::InvalidArgument("Else branch function missing");
}
CondBuilder cb(n, then_attr->func(), else_attr->func(), keep_node_fetchable,
g);
TF_RETURN_IF_ERROR(cb.CreatePivotNodes());
TF_RETURN_IF_ERROR(cb.AddInputs());
TF_RETURN_IF_ERROR(cb.AddOutputs());
g->RemoveNode(n);
return absl::OkStatus();
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncAttr(const string& name) {
AttrValue attr;
attr.mutable_func()->set_name(name);
return attr;
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerIfOpTest, Simple) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
*(f_lib_proto.add_function()) = test::function::XTimesFour();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
Node* written_if;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", FuncAttr("XTimesTwo"))
.Attr("else_branch", FuncAttr("XTimesFour"))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_if));
TF_ASSERT_OK(root.DoShapeInference(written_if));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
if (op->name() == "if") {
++node_called_if_count;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
ASSERT_NE(op->type_string(), "If");
if (op->name() == "if") {
++node_called_if_count;
}
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 2);
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
}
TEST(LowerIfOpTest, BranchFunctionsWithoutOutputs) {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
const auto assign_add = [](const string& fn_name, int v) {
const Tensor tensor = test::AsScalar<int32>(v);
return FDH::Create(
fn_name, {"v: resource"}, {}, {},
{
{{"c"}, "Const", {}, {{"value", tensor}, {"dtype", DT_INT32}}},
{{"upd"},
"AssignAddVariableOp",
{"v", "c:output"},
{{"dtype", DT_INT32}}},
},
{},
{{"side_effects", "upd"}});
};
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = assign_add("AddOne", 1);
*(f_lib_proto.add_function()) = assign_add("AddTwo", 2);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto initial_val = ops::Placeholder(root.WithOpName("initial_val"), DT_INT32);
auto var = ops::VarHandleOp(root.WithOpName("var"), DT_INT32, {});
auto init = ops::AssignVariableOp(root.WithOpName("init"), var, initial_val);
Node* if_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(var.node())});
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.ControlInput(init.operation.node())
.Attr("then_branch", FuncAttr("AddOne"))
.Attr("else_branch", FuncAttr("AddTwo"))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", DataTypeSlice{})
.Finalize(root.graph(), &if_node));
auto read = ops::ReadVariableOp(
root.WithOpName("read").WithControlDependencies(Output(if_node)), var,
DT_INT32);
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) ++switch_count;
if (op->IsMerge()) ++merge_count;
if (op->name() == "if") ++node_called_if_count;
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 1);
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 11);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
}
TEST(LowerIfOpTest, DoNotInlineLoweredFunction) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef x_times_two = test::function::XTimesTwo();
FunctionDef x_times_four = test::function::XTimesFour();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
(*x_times_four.mutable_attr())["_noinline"].set_b(true);
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = x_times_two;
*(f_lib_proto.add_function()) = x_times_four;
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
Node* written_if;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue tb;
tb.mutable_func()->set_name("XTimesTwo");
AttrValue eb;
eb.mutable_func()->set_name("XTimesFour");
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", tb)
.Attr("else_branch", eb)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_if));
TF_ASSERT_OK(root.DoShapeInference(written_if));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int x_times_two_count = 0;
int x_times_four_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == x_times_two.signature().name()) {
x_times_two_count++;
}
if (op->type_string() == x_times_four.signature().name()) {
x_times_four_count++;
}
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(x_times_two_count, 1);
ASSERT_EQ(x_times_four_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.